diff --git a/build.gradle b/build.gradle index 05ad5479e8d..015db80d325 100644 --- a/build.gradle +++ b/build.gradle @@ -205,9 +205,9 @@ subprojects { "org.elasticsearch.gradle:build-tools:${version}": ':build-tools', "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':server', - "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', - "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', - "org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio', + "org.elasticsearch:elasticsearch-cli:${version}": ':libs:cli', + "org.elasticsearch:elasticsearch-core:${version}": ':libs:core', + "org.elasticsearch:elasticsearch-nio:${version}": ':libs:nio', "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', @@ -226,6 +226,7 @@ subprojects { "org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:packages:deb', "org.elasticsearch.distribution.deb:elasticsearch-oss:${version}": ':distribution:packages:oss-deb', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', + "org.elasticsearch.xpack.test:feature-aware:${version}": ':x-pack:test:feature-aware', // for transport client "org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4', "org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex', @@ -311,7 +312,15 @@ gradle.projectsEvaluated { // :test:framework:test cannot run before and after :server:test return } - configurations.all { + configurations.all { Configuration configuration -> + /* + * The featureAwarePlugin configuration has a dependency on x-pack:plugin:core and x-pack:plugin:core has a dependency on the + * featureAwarePlugin configuration. The below task ordering logic would force :x-pack:plugin:core:test + * :x-pack:test:feature-aware:test to depend on each other circularly. We break that cycle here. + */ + if (configuration.name == "featureAwarePlugin") { + return + } dependencies.all { Dependency dep -> Project upstreamProject = dependencyToProject(dep) if (upstreamProject != null) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index cd6c7c36ee6..5c363ac043a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -87,8 +87,9 @@ class ClusterConfiguration { * A closure to call which returns the unicast host to connect to for cluster formation. * * This allows multi node clusters, or a new cluster to connect to an existing cluster. - * The closure takes two arguments, the NodeInfo for the first node in the cluster, and - * an AntBuilder which may be used to wait on conditions before returning. + * The closure takes three arguments, the NodeInfo for the first node in the cluster, + * the NodeInfo for the node current being configured, an AntBuilder which may be used + * to wait on conditions before returning. */ @Input Closure unicastTransportUri = { NodeInfo seedNode, NodeInfo node, AntBuilder ant -> diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index f3c84db79d6..488579785e0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -41,11 +41,28 @@ public final class ClusterClient { } /** - * Updates cluster wide specific settings using the Cluster Update Settings API + * Updates cluster wide specific settings using the Cluster Update Settings API. + * See Cluster Update Settings + * API on elastic.co + * @param clusterUpdateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + options, ClusterUpdateSettingsResponse::fromXContent, emptySet()); + } + + /** + * Updates cluster wide specific settings using the Cluster Update Settings API. *

* See Cluster Update Settings * API on elastic.co + * @deprecated Prefer {@link #putSettings(ClusterUpdateSettingsRequest, RequestOptions)} */ + @Deprecated public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, @@ -53,11 +70,26 @@ public final class ClusterClient { } /** - * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API + * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API. + * See Cluster Update Settings + * API on elastic.co + * @param clusterUpdateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, + options, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet()); + } + /** + * Asynchronously updates cluster wide specific settings using the Cluster Update Settings API. *

* See Cluster Update Settings * API on elastic.co + * @deprecated Prefer {@link #putSettingsAsync(ClusterUpdateSettingsRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index d51a92ea00f..fa7eb9ab9ec 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import org.apache.http.Header; -import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; @@ -47,10 +46,10 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; @@ -76,66 +75,159 @@ public final class IndicesClient { } /** - * Deletes an index using the Delete Index API + * Deletes an index using the Delete Index API. + * See + * Delete Index API on elastic.co + * @param deleteIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + DeleteIndexResponse::fromXContent, emptySet()); + } + + /** + * Deletes an index using the Delete Index API. *

* See * Delete Index API on elastic.co + * @deprecated Prefer {@link #delete(DeleteIndexRequest, RequestOptions)} */ + @Deprecated public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, DeleteIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously deletes an index using the Delete Index API + * Asynchronously deletes an index using the Delete Index API. + * See + * Delete Index API on elastic.co + * @param deleteIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteAsync(DeleteIndexRequest deleteIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, options, + DeleteIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously deletes an index using the Delete Index API. *

* See * Delete Index API on elastic.co + * @deprecated Prefer {@link #deleteAsync(DeleteIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex, DeleteIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Creates an index using the Create Index API + * Creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CreateIndexResponse create(CreateIndexRequest createIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + CreateIndexResponse::fromXContent, emptySet()); + } + + /** + * Creates an index using the Create Index API. *

* See * Create Index API on elastic.co + * @deprecated Prefer {@link #create(CreateIndexRequest, RequestOptions)} */ + @Deprecated public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex, CreateIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously creates an index using the Create Index API + * Asynchronously creates an index using the Create Index API. + * See + * Create Index API on elastic.co + * @param createIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void createAsync(CreateIndexRequest createIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, options, + CreateIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously creates an index using the Create Index API. *

* See * Create Index API on elastic.co + * @deprecated Prefer {@link #createAsync(CreateIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void createAsync(CreateIndexRequest createIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex, CreateIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates the mappings on an index using the Put Mapping API + * Updates the mappings on an index using the Put Mapping API. + * See + * Put Mapping API on elastic.co + * @param putMappingRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + PutMappingResponse::fromXContent, emptySet()); + } + + /** + * Updates the mappings on an index using the Put Mapping API. *

* See * Put Mapping API on elastic.co + * @deprecated Prefer {@link #putMapping(PutMappingRequest, RequestOptions)} */ + @Deprecated public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping, PutMappingResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates the mappings on an index using the Put Mapping API + * Asynchronously updates the mappings on an index using the Put Mapping API. + * See + * Put Mapping API on elastic.co + * @param putMappingRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putMappingAsync(PutMappingRequest putMappingRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, options, + PutMappingResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates the mappings on an index using the Put Mapping API. *

* See * Put Mapping API on elastic.co + * @deprecated Prefer {@link #putMappingAsync(PutMappingRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping, @@ -143,242 +235,507 @@ public final class IndicesClient { } /** - * Retrieves the mappings on an index or indices using the Get Mapping API - *

+ * Retrieves the mappings on an index or indices using the Get Mapping API. * See * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetMappingsResponse getMappings(GetMappingsRequest getMappingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, RequestConverters::getMappings, - GetMappingsResponse::fromXContent, emptySet(), headers); + public GetMappingsResponse getMappings(GetMappingsRequest getMappingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + GetMappingsResponse::fromXContent, emptySet()); } /** - * Asynchronously retrieves the mappings on an index on indices using the Get Mapping API - *

+ * Asynchronously retrieves the mappings on an index on indices using the Get Mapping API. * See * Get Mapping API on elastic.co + * @param getMappingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getMappingsAsync(GetMappingsRequest getMappingsRequest, ActionListener listener, - Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, RequestConverters::getMappings, - GetMappingsResponse::fromXContent, listener, emptySet(), headers); + public void getMappingsAsync(GetMappingsRequest getMappingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getMappingsRequest, RequestConverters::getMappings, options, + GetMappingsResponse::fromXContent, listener, emptySet()); } /** - * Updates aliases using the Index Aliases API + * Updates aliases using the Index Aliases API. + * See + * Index Aliases API on elastic.co + * @param indicesAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + IndicesAliasesResponse::fromXContent, emptySet()); + } + + /** + * Updates aliases using the Index Aliases API. *

* See * Index Aliases API on elastic.co + * @deprecated {@link #updateAliases(IndicesAliasesRequest, RequestOptions)} */ + @Deprecated public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates aliases using the Index Aliases API + * Asynchronously updates aliases using the Index Aliases API. + * See + * Index Aliases API on elastic.co + * @param indicesAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, options, + IndicesAliasesResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates aliases using the Index Aliases API. *

* See * Index Aliases API on elastic.co + * @deprecated Prefer {@link #updateAliasesAsync(IndicesAliasesRequest, RequestOptions, ActionListener)} */ + @Deprecated public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener listener, - Header... headers) { + Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases, IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); } /** - * Opens an index using the Open Index API + * Opens an index using the Open Index API. + * See + * Open Index API on elastic.co + * @param openIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public OpenIndexResponse open(OpenIndexRequest openIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + OpenIndexResponse::fromXContent, emptySet()); + } + + /** + * Opens an index using the Open Index API. *

* See * Open Index API on elastic.co + * @deprecated Prefer {@link #open(OpenIndexRequest, RequestOptions)} */ + @Deprecated public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex, OpenIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously opens an index using the Open Index API + * Asynchronously opens an index using the Open Index API. + * See + * Open Index API on elastic.co + * @param openIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void openAsync(OpenIndexRequest openIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, options, + OpenIndexResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously opens an index using the Open Index API. *

* See * Open Index API on elastic.co + * @deprecated Prefer {@link #openAsync(OpenIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void openAsync(OpenIndexRequest openIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex, OpenIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Closes an index using the Close Index API + * Closes an index using the Close Index API. + * See + * Close Index API on elastic.co + * @param closeIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + CloseIndexResponse::fromXContent, emptySet()); + } + + /** + * Closes an index using the Close Index API. *

* See * Close Index API on elastic.co + * @deprecated Prefer {@link #close(CloseIndexRequest, RequestOptions)} */ + @Deprecated public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, CloseIndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously closes an index using the Close Index API + * Asynchronously closes an index using the Close Index API. + * See + * Close Index API on elastic.co + * @param closeIndexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void closeAsync(CloseIndexRequest closeIndexRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, options, + CloseIndexResponse::fromXContent, listener, emptySet()); + } + + + /** + * Asynchronously closes an index using the Close Index API. *

* See * Close Index API on elastic.co + * @deprecated Prefer {@link #closeAsync(CloseIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex, CloseIndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Checks if one or more aliases exist using the Aliases Exist API + * Checks if one or more aliases exist using the Aliases Exist API. + * See + * Indices Aliases API on elastic.co + * @param getAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request + */ + public boolean existsAlias(GetAliasesRequest getAliasesRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, options, + RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Checks if one or more aliases exist using the Aliases Exist API. *

* See * Indices Aliases API on elastic.co + * @deprecated Prefer {@link #existsAlias(GetAliasesRequest, RequestOptions)} */ + @Deprecated public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** - * Asynchronously checks if one or more aliases exist using the Aliases Exist API + * Asynchronously checks if one or more aliases exist using the Aliases Exist API. + * See + * Indices Aliases API on elastic.co + * @param getAliasesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void existsAliasAsync(GetAliasesRequest getAliasesRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, options, + RestHighLevelClient::convertExistsResponse, listener, emptySet()); + } + + /** + * Asynchronously checks if one or more aliases exist using the Aliases Exist API. *

* See * Indices Aliases API on elastic.co + * @deprecated Prefer {@link #existsAliasAsync(GetAliasesRequest, RequestOptions, ActionListener)} */ + @Deprecated public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } /** - * Refresh one or more indices using the Refresh API + * Refresh one or more indices using the Refresh API. + * See Refresh API on elastic.co + * @param refreshRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RefreshResponse refresh(RefreshRequest refreshRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, options, + RefreshResponse::fromXContent, emptySet()); + } + + /** + * Refresh one or more indices using the Refresh API. *

* See Refresh API on elastic.co + * @deprecated Prefer {@link #refresh(RefreshRequest, RequestOptions)} */ + @Deprecated public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously refresh one or more indices using the Refresh API + * Asynchronously refresh one or more indices using the Refresh API. + * See Refresh API on elastic.co + * @param refreshRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void refreshAsync(RefreshRequest refreshRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, options, + RefreshResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously refresh one or more indices using the Refresh API. *

* See Refresh API on elastic.co + * @deprecated Prefer {@link #refreshAsync(RefreshRequest, RequestOptions, ActionListener)} */ + @Deprecated public void refreshAsync(RefreshRequest refreshRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent, listener, emptySet(), headers); } /** - * Flush one or more indices using the Flush API + * Flush one or more indices using the Flush API. + * See Flush API on elastic.co + * @param flushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public FlushResponse flush(FlushRequest flushRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, options, + FlushResponse::fromXContent, emptySet()); + } + + /** + * Flush one or more indices using the Flush API. *

* See Flush API on elastic.co + * @deprecated Prefer {@link #flush(FlushRequest, RequestOptions)} */ + @Deprecated public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously flush one or more indices using the Flush API + * Asynchronously flush one or more indices using the Flush API. + * See Flush API on elastic.co + * @param flushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void flushAsync(FlushRequest flushRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, options, + FlushResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously flush one or more indices using the Flush API. *

* See Flush API on elastic.co + * @deprecated Prefer {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)} */ + @Deprecated public void flushAsync(FlushRequest flushRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent, listener, emptySet(), headers); } - /** Initiate a synced flush manually using the synced flush API - *

- * See - * Synced flush API on elastic.co - */ - public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, - SyncedFlushResponse::fromXContent, emptySet(), headers); - } - /** - * Asynchronously initiate a synced flush manually using the synced flush API - *

+ * Initiate a synced flush manually using the synced flush API. * See * Synced flush API on elastic.co + * @param syncedFlushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, - SyncedFlushResponse::fromXContent, listener, emptySet(), headers); + public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + SyncedFlushResponse::fromXContent, emptySet()); } + /** + * Asynchronously initiate a synced flush manually using the synced flush API. + * See + * Synced flush API on elastic.co + * @param syncedFlushRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced, options, + SyncedFlushResponse::fromXContent, listener, emptySet()); + } /** - * Retrieve the settings of one or more indices - *

+ * Retrieve the settings of one or more indices. * See * Indices Get Settings API on elastic.co + * @param getSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, RequestConverters::getSettings, - GetSettingsResponse::fromXContent, emptySet(), headers); + public GetSettingsResponse getSettings(GetSettingsRequest getSettingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + GetSettingsResponse::fromXContent, emptySet()); } /** - * Asynchronously retrieve the settings of one or more indices - *

+ * Asynchronously retrieve the settings of one or more indices. * See * Indices Get Settings API on elastic.co + * @param getSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getSettingsAsync(GetSettingsRequest getSettingsRequest, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, RequestConverters::getSettings, - GetSettingsResponse::fromXContent, listener, emptySet(), headers); + public void getSettingsAsync(GetSettingsRequest getSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getSettingsRequest, RequestConverters::getSettings, options, + GetSettingsResponse::fromXContent, listener, emptySet()); } /** - * Force merge one or more indices using the Force Merge API + * Force merge one or more indices using the Force Merge API. + * See + * Force Merge API on elastic.co + * @param forceMergeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + ForceMergeResponse::fromXContent, emptySet()); + } + + /** + * Force merge one or more indices using the Force Merge API. *

* See * Force Merge API on elastic.co + * @deprecated Prefer {@link #forceMerge(ForceMergeRequest, RequestOptions)} */ + @Deprecated public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, ForceMergeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously force merge one or more indices using the Force Merge API + * Asynchronously force merge one or more indices using the Force Merge API. + * See + * Force Merge API on elastic.co + * @param forceMergeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void forceMergeAsync(ForceMergeRequest forceMergeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, options, + ForceMergeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously force merge one or more indices using the Force Merge API. *

* See * Force Merge API on elastic.co + * @deprecated Prefer {@link #forceMergeAsync(ForceMergeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge, ForceMergeResponse::fromXContent, listener, emptySet(), headers); } /** - * Clears the cache of one or more indices using the Clear Cache API + * Clears the cache of one or more indices using the Clear Cache API. + * See + * Clear Cache API on elastic.co + * @param clearIndicesCacheRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + ClearIndicesCacheResponse::fromXContent, emptySet()); + } + + /** + * Clears the cache of one or more indices using the Clear Cache API. *

* See * Clear Cache API on elastic.co + * @deprecated Prefer {@link #clearCache(ClearIndicesCacheRequest, RequestOptions)} */ + @Deprecated public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, ClearIndicesCacheResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously clears the cache of one or more indices using the Clear Cache API + * Asynchronously clears the cache of one or more indices using the Clear Cache API. + * See + * Clear Cache API on elastic.co + * @param clearIndicesCacheRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, options, + ClearIndicesCacheResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously clears the cache of one or more indices using the Clear Cache API. *

* See * Clear Cache API on elastic.co + * @deprecated Prefer {@link #clearCacheAsync(ClearIndicesCacheRequest, RequestOptions, ActionListener)} */ + @Deprecated public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache, @@ -387,17 +744,57 @@ public final class IndicesClient { /** * Checks if the index (indices) exists or not. - *

* See * Indices Exists API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request */ - public boolean exists(GetIndexRequest request, Header... headers) throws IOException { + public boolean exists(GetIndexRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequest( request, RequestConverters::indicesExist, + options, RestHighLevelClient::convertExistsResponse, - Collections.emptySet(), - headers + Collections.emptySet() + ); + } + + /** + * Checks if the index (indices) exists or not. + *

+ * See + * Indices Exists API on elastic.co + * @deprecated Prefer {@link #exists(GetIndexRequest, RequestOptions)} + */ + @Deprecated + public boolean exists(GetIndexRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequest( + request, + RequestConverters::indicesExist, + RestHighLevelClient::convertExistsResponse, + Collections.emptySet(), + headers + ); + } + + /** + * Asynchronously checks if the index (indices) exists or not. + * See + * Indices Exists API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void existsAsync(GetIndexRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsync( + request, + RequestConverters::indicesExist, + options, + RestHighLevelClient::convertExistsResponse, + listener, + Collections.emptySet() ); } @@ -406,7 +803,9 @@ public final class IndicesClient { *

* See * Indices Exists API on elastic.co + * @deprecated Prefer {@link #existsAsync(GetIndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public void existsAsync(GetIndexRequest request, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsync( request, @@ -419,88 +818,213 @@ public final class IndicesClient { } /** - * Shrinks an index using the Shrink Index API + * Shrinks an index using the Shrink Index API. + * See + * Shrink Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ResizeResponse shrink(ResizeRequest resizeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, options, + ResizeResponse::fromXContent, emptySet()); + } + + /** + * Shrinks an index using the Shrink Index API. *

* See * Shrink Index API on elastic.co + * @deprecated Prefer {@link #shrink(ResizeRequest, RequestOptions)} */ + @Deprecated public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously shrinks an index using the Shrink index API + * Asynchronously shrinks an index using the Shrink index API. + * See + * Shrink Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void shrinkAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, options, + ResizeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously shrinks an index using the Shrink index API. *

* See * Shrink Index API on elastic.co + * @deprecated Prefer {@link #shrinkAsync(ResizeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void shrinkAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent, listener, emptySet(), headers); } /** - * Splits an index using the Split Index API + * Splits an index using the Split Index API. + * See + * Split Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ResizeResponse split(ResizeRequest resizeRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, options, + ResizeResponse::fromXContent, emptySet()); + } + + /** + * Splits an index using the Split Index API. *

* See * Split Index API on elastic.co + * @deprecated {@link #split(ResizeRequest, RequestOptions)} */ + @Deprecated public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously splits an index using the Split Index API + * Asynchronously splits an index using the Split Index API. + * See + * Split Index API on elastic.co + * @param resizeRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void splitAsync(ResizeRequest resizeRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, options, + ResizeResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously splits an index using the Split Index API. *

* See * Split Index API on elastic.co + * @deprecated Prefer {@link #splitAsync(ResizeRequest, RequestOptions, ActionListener)} */ + @Deprecated public void splitAsync(ResizeRequest resizeRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent, listener, emptySet(), headers); } /** - * Rolls over an index using the Rollover Index API + * Rolls over an index using the Rollover Index API. + * See + * Rollover Index API on elastic.co + * @param rolloverRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RolloverResponse rollover(RolloverRequest rolloverRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + RolloverResponse::fromXContent, emptySet()); + } + + /** + * Rolls over an index using the Rollover Index API. *

* See * Rollover Index API on elastic.co + * @deprecated Prefer {@link #rollover(RolloverRequest, RequestOptions)} */ + @Deprecated public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously rolls over an index using the Rollover Index API + * Asynchronously rolls over an index using the Rollover Index API. + * See + * Rollover Index API on elastic.co + * @param rolloverRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void rolloverAsync(RolloverRequest rolloverRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, options, + RolloverResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously rolls over an index using the Rollover Index API. *

* See * Rollover Index API on elastic.co + * @deprecated Prefer {@link #rolloverAsync(RolloverRequest, RequestOptions, ActionListener)} */ + @Deprecated public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates specific index level settings using the Update Indices Settings API + * Updates specific index level settings using the Update Indices Settings API. + * See Update Indices Settings + * API on elastic.co + * @param updateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + UpdateSettingsResponse::fromXContent, emptySet()); + } + + /** + * Updates specific index level settings using the Update Indices Settings API. *

* See Update Indices Settings * API on elastic.co + * @deprecated Prefer {@link #putSettings(UpdateSettingsRequest, RequestOptions)} */ + @Deprecated public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, UpdateSettingsResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates specific index level settings using the Update Indices Settings API + * Asynchronously updates specific index level settings using the Update Indices Settings API. + * See Update Indices Settings + * API on elastic.co + * @param updateSettingsRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, options, + UpdateSettingsResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously updates specific index level settings using the Update Indices Settings API. *

* See Update Indices Settings * API on elastic.co + * @deprecated Prefer {@link #putSettingsAsync(UpdateSettingsRequest, RequestOptions, ActionListener)} */ + @Deprecated public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, Header... headers) { restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings, @@ -508,25 +1032,31 @@ public final class IndicesClient { } /** - * Puts an index template using the Index Templates API - *

+ * Puts an index template using the Index Templates API. * See Index Templates API * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public PutIndexTemplateResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, - PutIndexTemplateResponse::fromXContent, emptySet(), headers); + public PutIndexTemplateResponse putTemplate(PutIndexTemplateRequest putIndexTemplateRequest, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + PutIndexTemplateResponse::fromXContent, emptySet()); } /** - * Asynchronously puts an index template using the Index Templates API - *

+ * Asynchronously puts an index template using the Index Templates API. * See Index Templates API * on elastic.co + * @param putIndexTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, - PutIndexTemplateResponse::fromXContent, listener, emptySet(), headers); + public void putTemplateAsync(PutIndexTemplateRequest putIndexTemplateRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putIndexTemplateRequest, RequestConverters::putTemplate, options, + PutIndexTemplateResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java index 72b1813f939..5c5a82b52f4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.GetPipelineRequest; @@ -45,70 +44,85 @@ public final class IngestClient { } /** - * Add a pipeline or update an existing pipeline - *

+ * Add a pipeline or update an existing pipeline. * See * Put Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public WritePipelineResponse putPipeline(PutPipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, - WritePipelineResponse::fromXContent, emptySet(), headers); + public WritePipelineResponse putPipeline(PutPipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::putPipeline, options, + WritePipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously add a pipeline or update an existing pipeline - *

+ * Asynchronously add a pipeline or update an existing pipeline. * See * Put Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void putPipelineAsync(PutPipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, - WritePipelineResponse::fromXContent, listener, emptySet(), headers); + public void putPipelineAsync(PutPipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::putPipeline, options, + WritePipelineResponse::fromXContent, listener, emptySet()); } /** - * Get an existing pipeline - *

+ * Get an existing pipeline. * See * Get Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetPipelineResponse getPipeline(GetPipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, - GetPipelineResponse::fromXContent, emptySet(), headers); + public GetPipelineResponse getPipeline(GetPipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::getPipeline, options, + GetPipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously get an existing pipeline - *

+ * Asynchronously get an existing pipeline. * See * Get Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getPipelineAsync(GetPipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, - GetPipelineResponse::fromXContent, listener, emptySet(), headers); + public void getPipelineAsync(GetPipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::getPipeline, options, + GetPipelineResponse::fromXContent, listener, emptySet()); } /** - * Delete an existing pipeline - *

+ * Delete an existing pipeline. * See * * Delete Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public WritePipelineResponse deletePipeline(DeletePipelineRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, - WritePipelineResponse::fromXContent, emptySet(), headers); + public WritePipelineResponse deletePipeline(DeletePipelineRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::deletePipeline, options, + WritePipelineResponse::fromXContent, emptySet()); } /** - * Asynchronously delete an existing pipeline - *

+ * Asynchronously delete an existing pipeline. * See * * Delete Pipeline API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void deletePipelineAsync(DeletePipelineRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, - WritePipelineResponse::fromXContent, listener, emptySet(), headers); + public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options, + WritePipelineResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index e5a45e19fe0..96fb7e59de3 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -108,6 +109,17 @@ final class RequestConverters { // Contains only status utility methods } + static Request cancelTasks(CancelTasksRequest cancelTasksRequest) { + Request request = new Request(HttpPost.METHOD_NAME, "/_tasks/_cancel"); + Params params = new Params(request); + params.withTimeout(cancelTasksRequest.getTimeout()) + .withTaskId(cancelTasksRequest.getTaskId()) + .withNodes(cancelTasksRequest.getNodes()) + .withParentTaskId(cancelTasksRequest.getParentTaskId()) + .withActions(cancelTasksRequest.getActions()); + return request; + } + static Request delete(DeleteRequest deleteRequest) { String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); Request request = new Request(HttpDelete.METHOD_NAME, endpoint); @@ -710,7 +722,7 @@ final class RequestConverters { return request; } - static Request getSettings(GetSettingsRequest getSettingsRequest) throws IOException { + static Request getSettings(GetSettingsRequest getSettingsRequest) { String[] indices = getSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.indices(); String[] names = getSettingsRequest.names() == null ? Strings.EMPTY_ARRAY : getSettingsRequest.names(); @@ -1070,6 +1082,13 @@ final class RequestConverters { return this; } + Params withTaskId(TaskId taskId) { + if (taskId != null && taskId.isSet()) { + return putParam("task_id", taskId.toString()); + } + return this; + } + Params withParentTaskId(TaskId parentTaskId) { if (parentTaskId != null && parentTaskId.isSet()) { return putParam("parent_task_id", parentTaskId.toString()); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index a9587b73c19..8980508c487 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -285,16 +285,19 @@ public class RestHighLevelClient implements Closeable { } /** - * Executes a bulk request using the Bulk API - * + * Executes a bulk request using the Bulk API. * See Bulk API on elastic.co + * @param bulkRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final BulkResponse bulk(BulkRequest bulkRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, emptySet()); } /** - * Executes a bulk request using the Bulk API + * Executes a bulk request using the Bulk API. * * See Bulk API on elastic.co * @deprecated Prefer {@link #bulk(BulkRequest, RequestOptions)} @@ -305,16 +308,18 @@ public class RestHighLevelClient implements Closeable { } /** - * Asynchronously executes a bulk request using the Bulk API - * + * Asynchronously executes a bulk request using the Bulk API. * See Bulk API on elastic.co + * @param bulkRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, options, BulkResponse::fromXContent, listener, emptySet()); } /** - * Asynchronously executes a bulk request using the Bulk API + * Asynchronously executes a bulk request using the Bulk API. * * See Bulk API on elastic.co * @deprecated Prefer {@link #bulkAsync(BulkRequest, RequestOptions, ActionListener)} @@ -326,194 +331,482 @@ public class RestHighLevelClient implements Closeable { /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return true if the ping succeeded, false otherwise + * @throws IOException in case there is a problem sending the request */ + public final boolean ping(RequestOptions options) throws IOException { + return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), options, RestHighLevelClient::convertExistsResponse, + emptySet()); + } + + /** + * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise + * @deprecated Prefer {@link #ping(RequestOptions)} + */ + @Deprecated public final boolean ping(Header... headers) throws IOException { return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** - * Get the cluster info otherwise provided when sending an HTTP request to port 9200 + * Get the cluster info otherwise provided when sending an HTTP request to '/' + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ + public final MainResponse info(RequestOptions options) throws IOException { + return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), options, + MainResponse::fromXContent, emptySet()); + } + + /** + * Get the cluster info otherwise provided when sending an HTTP request to port 9200 + * @deprecated Prefer {@link #info(RequestOptions)} + */ + @Deprecated public final MainResponse info(Header... headers) throws IOException { return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(), MainResponse::fromXContent, emptySet(), headers); } /** - * Retrieves a document by id using the Get API + * Retrieves a document by id using the Get API. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final GetResponse get(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, singleton(404)); + } + + /** + * Retrieves a document by id using the Get API. * * See Get API on elastic.co + * @deprecated Prefer {@link #get(GetRequest, RequestOptions)} */ + @Deprecated public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException { return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously retrieves a document by id using the Get API + * Asynchronously retrieves a document by id using the Get API. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void getAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, options, GetResponse::fromXContent, listener, + singleton(404)); + } + + /** + * Asynchronously retrieves a document by id using the Get API. * * See Get API on elastic.co + * @deprecated Prefer {@link #getAsync(GetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void getAsync(GetRequest getRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener, singleton(404), headers); } /** - * Retrieves multiple documents by id using the Multi Get API + * Retrieves multiple documents by id using the Multi Get API. + * See Multi Get API on elastic.co + * @param multiGetRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, + singleton(404)); + } + + /** + * Retrieves multiple documents by id using the Multi Get API. * * See Multi Get API on elastic.co + * @deprecated Prefer {@link #multiGet(MultiGetRequest, RequestOptions)} */ + @Deprecated public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously retrieves multiple documents by id using the Multi Get API + * Asynchronously retrieves multiple documents by id using the Multi Get API. + * See Multi Get API on elastic.co + * @param multiGetRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void multiGetAsync(MultiGetRequest multiGetRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, options, MultiGetResponse::fromXContent, listener, + singleton(404)); + } + + /** + * Asynchronously retrieves multiple documents by id using the Multi Get API. * * See Multi Get API on elastic.co + * @deprecated Prefer {@link #multiGetAsync(MultiGetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener, singleton(404), headers); } /** - * Checks for the existence of a document. Returns true if it exists, false otherwise + * Checks for the existence of a document. Returns true if it exists, false otherwise. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return true if the document exists, false otherwise + * @throws IOException in case there is a problem sending the request + */ + public final boolean exists(GetRequest getRequest, RequestOptions options) throws IOException { + return performRequest(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, emptySet()); + } + + /** + * Checks for the existence of a document. Returns true if it exists, false otherwise. * * See Get API on elastic.co + * @deprecated Prefer {@link #exists(GetRequest, RequestOptions)} */ + @Deprecated public final boolean exists(GetRequest getRequest, Header... headers) throws IOException { return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } /** - * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise + * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise. + * See Get API on elastic.co + * @param getRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void existsAsync(GetRequest getRequest, RequestOptions options, ActionListener listener) { + performRequestAsync(getRequest, RequestConverters::exists, options, RestHighLevelClient::convertExistsResponse, listener, + emptySet()); + } + + /** + * Asynchronously checks for the existence of a document. Returns true if it exists, false otherwise. * * See Get API on elastic.co + * @deprecated Prefer {@link #existsAsync(GetRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void existsAsync(GetRequest getRequest, ActionListener listener, Header... headers) { performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } /** - * Index a document using the Index API + * Index a document using the Index API. + * See Index API on elastic.co + * @param indexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final IndexResponse index(IndexRequest indexRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, emptySet()); + } + + /** + * Index a document using the Index API. * * See Index API on elastic.co + * @deprecated Prefer {@link #index(IndexRequest, RequestOptions)} */ + @Deprecated public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously index a document using the Index API + * Asynchronously index a document using the Index API. + * See Index API on elastic.co + * @param indexRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void indexAsync(IndexRequest indexRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, options, IndexResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously index a document using the Index API. * * See Index API on elastic.co + * @deprecated Prefer {@link #indexAsync(IndexRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void indexAsync(IndexRequest indexRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener, emptySet(), headers); } /** - * Updates a document using the Update API + * Updates a document using the Update API. + * See Update API on elastic.co + * @param updateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final UpdateResponse update(UpdateRequest updateRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, emptySet()); + } + + /** + * Updates a document using the Update API. *

* See Update API on elastic.co + * @deprecated Prefer {@link #update(UpdateRequest, RequestOptions)} */ + @Deprecated public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously updates a document using the Update API + * Asynchronously updates a document using the Update API. + * See Update API on elastic.co + * @param updateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void updateAsync(UpdateRequest updateRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, options, UpdateResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously updates a document using the Update API. *

* See Update API on elastic.co + * @deprecated Prefer {@link #updateAsync(UpdateRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener, emptySet(), headers); } /** - * Deletes a document by id using the Delete API + * Deletes a document by id using the Delete API. + * See Delete API on elastic.co + * @param deleteRequest the reuqest + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final DeleteResponse delete(DeleteRequest deleteRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, + singleton(404)); + } + + /** + * Deletes a document by id using the Delete API. * * See Delete API on elastic.co + * @deprecated Prefer {@link #delete(DeleteRequest, RequestOptions)} */ + @Deprecated public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, singleton(404), headers); } /** - * Asynchronously deletes a document by id using the Delete API - * + * Asynchronously deletes a document by id using the Delete API. * See Delete API on elastic.co + * @param deleteRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public final void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { - performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener, - Collections.singleton(404), headers); + public final void deleteAsync(DeleteRequest deleteRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, options, DeleteResponse::fromXContent, listener, + Collections.singleton(404)); } /** - * Executes a search using the Search API + * Asynchronously deletes a document by id using the Delete API. + * + * See Delete API on elastic.co + * @deprecated Prefer {@link #deleteAsync(DeleteRequest, RequestOptions, ActionListener)} + */ + @Deprecated + public final void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { + performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener, + Collections.singleton(404), headers); + } + + /** + * Executes a search request using the Search API. + * See Search API on elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final SearchResponse search(SearchRequest searchRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, emptySet()); + } + + /** + * Executes a search using the Search API. * * See Search API on elastic.co + * @deprecated Prefer {@link #search(SearchRequest, RequestOptions)} */ + @Deprecated public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously executes a search using the Search API + * Asynchronously executes a search using the Search API. + * See Search API on elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void searchAsync(SearchRequest searchRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, options, SearchResponse::fromXContent, listener, + emptySet()); + } + + /** + * Asynchronously executes a search using the Search API. * * See Search API on elastic.co + * @deprecated Prefer {@link #searchAsync(SearchRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void searchAsync(SearchRequest searchRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener, emptySet(), headers); } /** - * Executes a multi search using the msearch API + * Executes a multi search using the msearch API. + * See Multi search API on + * elastic.co + * @param multiSearchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, + emptySet()); + } + + /** + * Executes a multi search using the msearch API. * * See Multi search API on * elastic.co + * @deprecated Prefer {@link #multiSearch(MultiSearchRequest, RequestOptions)} */ + @Deprecated public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException { return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, emptySet(), headers); } /** - * Asynchronously executes a multi search using the msearch API + * Asynchronously executes a multi search using the msearch API. + * See Multi search API on + * elastic.co + * @param searchRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void multiSearchAsync(MultiSearchRequest searchRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, options, MultiSearchResponse::fromXContext, + listener, emptySet()); + } + + /** + * Asynchronously executes a multi search using the msearch API. * * See Multi search API on * elastic.co + * @deprecated Prefer {@link #multiSearchAsync(MultiSearchRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener, emptySet(), headers); } /** - * Executes a search using the Search Scroll API + * Executes a search using the Search Scroll API. + * See Search Scroll + * API on elastic.co + * @param searchScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, + emptySet()); + } + + /** + * Executes a search using the Search Scroll API. * * See Search Scroll * API on elastic.co + * @deprecated Prefer {@link #searchScroll(SearchScrollRequest, RequestOptions)} */ + @Deprecated public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously executes a search using the Search Scroll API + * Asynchronously executes a search using the Search Scroll API. + * See Search Scroll + * API on elastic.co + * @param searchScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, options, SearchResponse::fromXContent, + listener, emptySet()); + } + + /** + * Asynchronously executes a search using the Search Scroll API. * * See Search Scroll * API on elastic.co + * @deprecated Prefer {@link #searchScrollAsync(SearchScrollRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent, @@ -521,22 +814,54 @@ public class RestHighLevelClient implements Closeable { } /** - * Clears one or more scroll ids using the Clear Scroll API + * Clears one or more scroll ids using the Clear Scroll API. + * See + * Clear Scroll API on elastic.co + * @param clearScrollRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, + emptySet()); + } + + /** + * Clears one or more scroll ids using the Clear Scroll API. * * See * Clear Scroll API on elastic.co + * @deprecated Prefer {@link #clearScroll(ClearScrollRequest, RequestOptions)} */ + @Deprecated public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, emptySet(), headers); } /** - * Asynchronously clears one or more scroll ids using the Clear Scroll API + * Asynchronously clears one or more scroll ids using the Clear Scroll API. + * See + * Clear Scroll API on elastic.co + * @param clearScrollRequest the reuqest + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, options, ClearScrollResponse::fromXContent, + listener, emptySet()); + } + + /** + * Asynchronously clears one or more scroll ids using the Clear Scroll API. * * See * Clear Scroll API on elastic.co + * @deprecated Prefer {@link #clearScrollAsync(ClearScrollRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent, @@ -545,47 +870,79 @@ public class RestHighLevelClient implements Closeable { /** * Executes a request using the Search Template API. - * * See Search Template API * on elastic.co. + * @param searchTemplateRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, - Header... headers) throws IOException { - return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, - SearchTemplateResponse::fromXContent, emptySet(), headers); + RequestOptions options) throws IOException { + return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, + SearchTemplateResponse::fromXContent, emptySet()); } /** - * Asynchronously executes a request using the Search Template API + * Asynchronously executes a request using the Search Template API. * * See Search Template API * on elastic.co. */ - public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, - ActionListener listener, - Header... headers) { - performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, - SearchTemplateResponse::fromXContent, listener, emptySet(), headers); + public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, options, + SearchTemplateResponse::fromXContent, listener, emptySet()); } + /** + * Executes a request using the Ranking Evaluation API. + * See Ranking Evaluation API + * on elastic.co + * @param rankEvalRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, + emptySet()); + } /** * Executes a request using the Ranking Evaluation API. * * See Ranking Evaluation API * on elastic.co + * @deprecated Prefer {@link #rankEval(RankEvalRequest, RequestOptions)} */ + @Deprecated public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException { return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, emptySet(), headers); } + /** + * Asynchronously executes a request using the Ranking Evaluation API. + * See Ranking Evaluation API + * on elastic.co + * @param rankEvalRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void rankEvalAsync(RankEvalRequest rankEvalRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, options, RankEvalResponse::fromXContent, listener, + emptySet()); + } + /** * Asynchronously executes a request using the Ranking Evaluation API. * * See Ranking Evaluation API * on elastic.co + * @deprecated Prefer {@link #rankEvalAsync(RankEvalRequest, RequestOptions, ActionListener)} */ + @Deprecated public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener, emptySet(), headers); @@ -593,27 +950,31 @@ public class RestHighLevelClient implements Closeable { /** * Executes a request using the Field Capabilities API. - * * See Field Capabilities API * on elastic.co. + * @param fieldCapabilitiesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, - Header... headers) throws IOException { - return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, - FieldCapabilitiesResponse::fromXContent, emptySet(), headers); + RequestOptions options) throws IOException { + return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, + FieldCapabilitiesResponse::fromXContent, emptySet()); } /** * Asynchronously executes a request using the Field Capabilities API. - * * See Field Capabilities API * on elastic.co. + * @param fieldCapabilitiesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, - ActionListener listener, - Header... headers) { - performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, - FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); + public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps, options, + FieldCapabilitiesResponse::fromXContent, listener, emptySet()); } @Deprecated diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index 104bc912711..b7cd2d52732 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -19,7 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse; @@ -49,97 +48,117 @@ public final class SnapshotClient { /** * Gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all * registered repositories are returned. - *

* See Snapshot and Restore * API on elastic.co + * @param getRepositoriesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public GetRepositoriesResponse getRepositories(GetRepositoriesRequest getRepositoriesRequest, Header... headers) + public GetRepositoriesResponse getRepositories(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, - GetRepositoriesResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + GetRepositoriesResponse::fromXContent, emptySet()); } /** * Asynchronously gets a list of snapshot repositories. If the list of repositories is empty or it contains a single element "_all", all * registered repositories are returned. - *

* See Snapshot and Restore * API on elastic.co + * @param getRepositoriesRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, - GetRepositoriesResponse::fromXContent, listener, emptySet(), headers); + public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, options, + GetRepositoriesResponse::fromXContent, listener, emptySet()); } /** * Creates a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param putRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, - PutRepositoryResponse::fromXContent, emptySet(), headers); + public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + PutRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously creates a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param putRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, - PutRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, options, + PutRepositoryResponse::fromXContent, listener, emptySet()); } /** * Deletes a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param deleteRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, Header... headers) + public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, - DeleteRepositoryResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, + DeleteRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously deletes a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param deleteRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, - DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository, options, + DeleteRepositoryResponse::fromXContent, listener, emptySet()); } /** * Verifies a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param verifyRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, Header... headers) + public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, - VerifyRepositoryResponse::fromXContent, emptySet(), headers); + return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, + VerifyRepositoryResponse::fromXContent, emptySet()); } /** * Asynchronously verifies a snapshot repository. - *

* See Snapshot and Restore * API on elastic.co + * @param verifyRepositoryRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, - ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, - VerifyRepositoryResponse::fromXContent, listener, emptySet(), headers); + public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository, options, + VerifyRepositoryResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java index 214f1e7884a..f8f03d7f7d2 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TasksClient.java @@ -19,8 +19,9 @@ package org.elasticsearch.client; -import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -33,7 +34,7 @@ import static java.util.Collections.emptySet; *

* See Task Management API on elastic.co */ -public class TasksClient { +public final class TasksClient { private final RestHighLevelClient restHighLevelClient; TasksClient(RestHighLevelClient restHighLevelClient) { @@ -41,24 +42,70 @@ public class TasksClient { } /** - * Get current tasks using the Task Management API - *

+ * Get current tasks using the Task Management API. * See * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response */ - public ListTasksResponse list(ListTasksRequest request, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - emptySet(), headers); + public ListTasksResponse list(ListTasksRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, options, + ListTasksResponse::fromXContent, emptySet()); } /** - * Asynchronously get current tasks using the Task Management API - *

+ * Asynchronously get current tasks using the Task Management API. * See * Task Management API on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion */ - public void listAsync(ListTasksRequest request, ActionListener listener, Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, - listener, emptySet(), headers); + public void listAsync(ListTasksRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, options, + ListTasksResponse::fromXContent, listener, emptySet()); + } + + /** + * Cancel one or more cluster tasks using the Task Management API. + * + * See + * Task Management API on elastic.co + * @param cancelTasksRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + * + */ + public CancelTasksResponse cancel(CancelTasksRequest cancelTasksRequest, RequestOptions options ) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + cancelTasksRequest, + RequestConverters::cancelTasks, + options, + parser -> CancelTasksResponse.fromXContent(parser), + emptySet() + ); + } + + /** + * Asynchronously cancel one or more cluster tasks using the Task Management API. + * + * See + * Task Management API on elastic.co + * @param cancelTasksRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void cancelAsync(CancelTasksRequest cancelTasksRequest, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + cancelTasksRequest, + RequestConverters::cancelTasks, + options, + parser -> CancelTasksResponse.fromXContent(parser), + listener, + emptySet() + ); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 9782b1016b4..d41c47177f9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -20,8 +20,6 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -39,7 +37,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import java.util.Arrays; -import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -81,7 +78,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } } @@ -107,7 +104,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { assertThat(listener.afterCounts.get(), equalTo(1)); assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } } @@ -159,7 +156,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); } - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } public void testBulkProcessorWaitOnClose() throws Exception { @@ -190,7 +187,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { } assertThat(listener.bulkFailures.size(), equalTo(0)); assertResponseItems(listener.bulkItems, numDocs); - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), numDocs); } public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { @@ -267,7 +264,7 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase { } } - assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT), testDocs); } private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java index 597d35a9996..fe6aa6b1017 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java @@ -127,8 +127,8 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { } } - highLevelClient().indices().refresh(new RefreshRequest()); - int multiGetResponsesCount = highLevelClient().multiGet(multiGetRequest).getResponses().length; + highLevelClient().indices().refresh(new RefreshRequest(), RequestOptions.DEFAULT); + int multiGetResponsesCount = highLevelClient().multiGet(multiGetRequest, RequestOptions.DEFAULT).getResponses().length; if (rejectedExecutionExpected) { assertThat(multiGetResponsesCount, lessThanOrEqualTo(numberOfAsyncOps)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 9314bb2e36c..f1110163b25 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -57,6 +57,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase { setRequest.persistentSettings(map); ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync); assertAcked(setResponse); @@ -79,6 +80,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase { resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON); ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings, + highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync); assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null)); @@ -100,6 +102,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase { clusterUpdateSettingsRequest.transientSettings(Settings.builder().put(setting, value).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(clusterUpdateSettingsRequest, + highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync)); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index f384e5706b0..81c894f242f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -68,12 +68,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { // Testing deletion String docId = "id"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); if (randomBoolean()) { deleteRequest.version(1L); } - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -83,7 +85,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { // Testing non existing document String docId = "does_not_exist"; DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -92,10 +95,12 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { // Testing version conflict String docId = "version_conflict"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar"))); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); + () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + docId + "]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); @@ -104,10 +109,12 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { // Testing version type String docId = "version_type"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) - .versionType(VersionType.EXTERNAL).version(12)); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(13); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -116,11 +123,13 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { // Testing version type with a wrong version String docId = "wrong_version"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) - .versionType(VersionType.EXTERNAL).version(12)); + highLevelClient().index( + new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")) + .versionType(VersionType.EXTERNAL).version(12), RequestOptions.DEFAULT); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).versionType(VersionType.EXTERNAL).version(10); - execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][" + @@ -130,9 +139,11 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { // Testing routing String docId = "routing"; - highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo")); + highLevelClient().index(new IndexRequest("index", "type", docId).source(Collections.singletonMap("foo", "bar")).routing("foo"), + RequestOptions.DEFAULT); DeleteRequest deleteRequest = new DeleteRequest("index", "type", docId).routing("foo"); - DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); + DeleteResponse deleteResponse = execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync, + highLevelClient()::delete, highLevelClient()::deleteAsync); assertEquals("index", deleteResponse.getIndex()); assertEquals("type", deleteResponse.getType()); assertEquals(docId, deleteResponse.getId()); @@ -143,23 +154,27 @@ public class CrudIT extends ESRestHighLevelClientTestCase { public void testExists() throws IOException { { GetRequest getRequest = new GetRequest("index", "type", "id"); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } IndexRequest index = new IndexRequest("index", "type", "id"); index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - highLevelClient().index(index); + highLevelClient().index(index, RequestOptions.DEFAULT); { GetRequest getRequest = new GetRequest("index", "type", "id"); - assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist").version(1); - assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); + assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync, + highLevelClient()::exists, highLevelClient()::existsAsync)); } } @@ -167,7 +182,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { GetRequest getRequest = new GetRequest("index", "type", "id"); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); + () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -176,11 +192,12 @@ public class CrudIT extends ESRestHighLevelClientTestCase { String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; index.source(document, XContentType.JSON); index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); - highLevelClient().index(index); + highLevelClient().index(index, RequestOptions.DEFAULT); { GetRequest getRequest = new GetRequest("index", "type", "id").version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); + () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[type][id]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); @@ -191,7 +208,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { if (randomBoolean()) { getRequest.version(1L); } - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -202,7 +220,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } { GetRequest getRequest = new GetRequest("index", "type", "does_not_exist"); - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("does_not_exist", getResponse.getId()); @@ -214,7 +233,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { GetRequest getRequest = new GetRequest("index", "type", "id"); getRequest.fetchSourceContext(new FetchSourceContext(false, Strings.EMPTY_ARRAY, Strings.EMPTY_ARRAY)); - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -230,7 +250,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } else { getRequest.fetchSourceContext(new FetchSourceContext(true, Strings.EMPTY_ARRAY, new String[]{"field2"})); } - GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync); + GetResponse getResponse = execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync, + highLevelClient()::get, highLevelClient()::getAsync); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals("id", getResponse.getId()); @@ -248,7 +269,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); multiGetRequest.add("index", "type", "id2"); - MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync, + highLevelClient()::multiGet, highLevelClient()::multiGetAsync); assertEquals(2, response.getResponses().length); assertTrue(response.getResponses()[0].isFailed()); @@ -275,12 +297,13 @@ public class CrudIT extends ESRestHighLevelClientTestCase { index = new IndexRequest("index", "type", "id2"); index.source("{\"field\":\"value2\"}", XContentType.JSON); bulk.add(index); - highLevelClient().bulk(bulk); + highLevelClient().bulk(bulk, RequestOptions.DEFAULT); { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); multiGetRequest.add("index", "type", "id2"); - MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync); + MultiGetResponse response = execute(multiGetRequest, highLevelClient()::multiGet, highLevelClient()::multiGetAsync, + highLevelClient()::multiGet, highLevelClient()::multiGetAsync); assertEquals(2, response.getResponses().length); assertFalse(response.getResponses()[0].isFailed()); @@ -305,7 +328,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { IndexRequest indexRequest = new IndexRequest("index", "type"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("test", "test").endObject()); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); assertEquals("index", indexResponse.getIndex()); @@ -326,7 +350,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 1).endObject()); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -336,7 +361,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("version", 2).endObject()); - indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.OK, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -348,7 +374,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { wrongRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); wrongRequest.version(5L); - execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: " + @@ -361,7 +388,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); indexRequest.setPipeline("missing"); - execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.BAD_REQUEST, exception.status()); @@ -374,7 +402,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { indexRequest.version(12L); indexRequest.versionType(VersionType.EXTERNAL); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); @@ -386,14 +415,16 @@ public class CrudIT extends ESRestHighLevelClientTestCase { indexRequest.source(XContentBuilder.builder(xContentType.xContent()).startObject().field("field", "test").endObject()); indexRequest.opType(DocWriteRequest.OpType.CREATE); - IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + IndexResponse indexResponse = execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("with_create_op_type", indexResponse.getId()); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> { - execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync); + execute(indexRequest, highLevelClient()::index, highLevelClient()::indexAsync, + highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); @@ -408,7 +439,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> - execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync)); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertEquals("Elasticsearch exception [type=document_missing_exception, reason=[type][does_not_exist]: document missing]", exception.getMessage()); @@ -416,7 +448,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source(singletonMap("field", "value")); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); @@ -431,7 +463,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequestConflict.version(indexResponse.getVersion()); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> - execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync)); + execute(updateRequestConflict, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[type][id]: version conflict, " + "current version [2] is different than the one provided [1]]", exception.getMessage()); @@ -439,7 +472,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest("index", "type", "with_script"); indexRequest.source(singletonMap("counter", 12)); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "with_script"); @@ -447,7 +480,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.script(script); updateRequest.fetchSource(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(2L, updateResponse.getVersion()); @@ -459,7 +493,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { indexRequest.source("field_1", "one", "field_3", "three"); indexRequest.version(12L); indexRequest.versionType(VersionType.EXTERNAL); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(12L, indexResponse.getVersion()); @@ -467,7 +501,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.doc(singletonMap("field_2", "two"), randomFrom(XContentType.values())); updateRequest.fetchSource("field_*", "field_3"); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult()); assertEquals(13L, updateResponse.getVersion()); @@ -481,14 +516,15 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest("index", "type", "noop"); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(RestStatus.CREATED, indexResponse.status()); assertEquals(1L, indexResponse.getVersion()); UpdateRequest updateRequest = new UpdateRequest("index", "type", "noop"); updateRequest.doc(singletonMap("field", "value"), randomFrom(XContentType.values())); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.OK, updateResponse.status()); assertEquals(DocWriteResponse.Result.NOOP, updateResponse.getResult()); assertEquals(1L, updateResponse.getVersion()); @@ -506,7 +542,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.doc(singletonMap("doc_status", "updated")); updateRequest.fetchSource(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -521,7 +558,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.fetchSource(true); updateRequest.docAsUpsert(true); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -537,7 +575,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { updateRequest.scriptedUpsert(true); updateRequest.upsert(singletonMap("level", "A")); - UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + UpdateResponse updateResponse = execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); assertEquals(RestStatus.CREATED, updateResponse.status()); assertEquals("index", updateResponse.getIndex()); assertEquals("type", updateResponse.getType()); @@ -552,7 +591,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { UpdateRequest updateRequest = new UpdateRequest("index", "type", "id"); updateRequest.doc(new IndexRequest().source(Collections.singletonMap("field", "doc"), XContentType.JSON)); updateRequest.upsert(new IndexRequest().source(Collections.singletonMap("field", "upsert"), XContentType.YAML)); - execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync); + execute(updateRequest, highLevelClient()::update, highLevelClient()::updateAsync, + highLevelClient()::update, highLevelClient()::updateAsync); }); assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", exception.getMessage()); @@ -575,7 +615,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { if (opType == DocWriteRequest.OpType.DELETE) { if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); bulkRequest.add(deleteRequest); @@ -593,7 +634,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } else if (opType == DocWriteRequest.OpType.CREATE) { IndexRequest createRequest = new IndexRequest("index", "test", id).source(source, xContentType).create(true); if (erroneous) { - assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } bulkRequest.add(createRequest); @@ -602,14 +643,16 @@ public class CrudIT extends ESRestHighLevelClientTestCase { .doc(new IndexRequest().source(source, xContentType)); if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } bulkRequest.add(updateRequest); } } } - BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync); + BulkResponse bulkResponse = execute(bulkRequest, highLevelClient()::bulk, highLevelClient()::bulkAsync, + highLevelClient()::bulk, highLevelClient()::bulkAsync); assertEquals(RestStatus.OK, bulkResponse.status()); assertTrue(bulkResponse.getTook().getMillis() > 0); assertEquals(nbItems, bulkResponse.getItems().length); @@ -662,7 +705,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { if (opType == DocWriteRequest.OpType.DELETE) { if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } DeleteRequest deleteRequest = new DeleteRequest("index", "test", id); processor.add(deleteRequest); @@ -678,7 +722,7 @@ public class CrudIT extends ESRestHighLevelClientTestCase { } else if (opType == DocWriteRequest.OpType.CREATE) { IndexRequest createRequest = new IndexRequest("index", "test", id).source(xContentType, "id", i).create(true); if (erroneous) { - assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest).status()); + assertEquals(RestStatus.CREATED, highLevelClient().index(createRequest, RequestOptions.DEFAULT).status()); } processor.add(createRequest); @@ -687,7 +731,8 @@ public class CrudIT extends ESRestHighLevelClientTestCase { .doc(new IndexRequest().source(xContentType, "id", i)); if (erroneous == false) { assertEquals(RestStatus.CREATED, - highLevelClient().index(new IndexRequest("index", "test", id).source("field", -1)).status()); + highLevelClient().index( + new IndexRequest("index", "test", id).source("field", -1), RequestOptions.DEFAULT).status()); } processor.add(updateRequest); } @@ -739,14 +784,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest(indexPattern, "type", "id#1"); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals(expectedIndex, indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("id#1", indexResponse.getId()); } { GetRequest getRequest = new GetRequest(indexPattern, "type", "id#1"); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals(expectedIndex, getResponse.getIndex()); assertEquals("type", getResponse.getType()); @@ -757,21 +802,21 @@ public class CrudIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest("index", "type", docId); indexRequest.source("field", "value"); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals(docId, indexResponse.getId()); } { GetRequest getRequest = new GetRequest("index", "type", docId); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); assertEquals(docId, getResponse.getId()); } - assertTrue(highLevelClient().indices().exists(new GetIndexRequest().indices(indexPattern, "index"))); + assertTrue(highLevelClient().indices().exists(new GetIndexRequest().indices(indexPattern, "index"), RequestOptions.DEFAULT)); } public void testParamsEncode() throws IOException { @@ -781,14 +826,14 @@ public class CrudIT extends ESRestHighLevelClientTestCase { IndexRequest indexRequest = new IndexRequest("index", "type", "id"); indexRequest.source("field", "value"); indexRequest.routing(routing); - IndexResponse indexResponse = highLevelClient().index(indexRequest); + IndexResponse indexResponse = highLevelClient().index(indexRequest, RequestOptions.DEFAULT); assertEquals("index", indexResponse.getIndex()); assertEquals("type", indexResponse.getType()); assertEquals("id", indexResponse.getId()); } { GetRequest getRequest = new GetRequest("index", "type", "id").routing(routing); - GetResponse getResponse = highLevelClient().get(getRequest); + GetResponse getResponse = highLevelClient().get(getRequest, RequestOptions.DEFAULT); assertTrue(getResponse.isExists()); assertEquals("index", getResponse.getIndex()); assertEquals("type", getResponse.getType()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index f7a934405c2..14fe0e01d31 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -60,23 +60,60 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { * Executes the provided request using either the sync method or its async variant, both provided as functions */ protected static Resp execute(Req request, SyncMethod syncMethod, - AsyncMethod asyncMethod, Header... headers) throws IOException { + AsyncMethod asyncMethod) throws IOException { if (randomBoolean()) { - return syncMethod.execute(request, headers); + return syncMethod.execute(request, RequestOptions.DEFAULT); } else { PlainActionFuture future = PlainActionFuture.newFuture(); - asyncMethod.execute(request, future, headers); + asyncMethod.execute(request, RequestOptions.DEFAULT, future); return future.actionGet(); } } @FunctionalInterface protected interface SyncMethod { - Response execute(Request request, Header... headers) throws IOException; + Response execute(Request request, RequestOptions options) throws IOException; } @FunctionalInterface protected interface AsyncMethod { + void execute(Request request, RequestOptions options, ActionListener listener); + } + + /** + * Executes the provided request using either the sync method or its async variant, both provided as functions + */ + @Deprecated + protected static Resp execute(Req request, SyncMethod syncMethod, AsyncMethod asyncMethod, + SyncMethodWithHeaders syncMethodWithHeaders, + AsyncMethodWithHeaders asyncMethodWithHeaders) throws IOException { + switch(randomIntBetween(0, 3)) { + case 0: + return syncMethod.execute(request, RequestOptions.DEFAULT); + case 1: + PlainActionFuture future = PlainActionFuture.newFuture(); + asyncMethod.execute(request, RequestOptions.DEFAULT, future); + return future.actionGet(); + case 2: + return syncMethodWithHeaders.execute(request); + case 3: + PlainActionFuture futureWithHeaders = PlainActionFuture.newFuture(); + asyncMethodWithHeaders.execute(request, futureWithHeaders); + return futureWithHeaders.actionGet(); + default: + throw new UnsupportedOperationException(); + } + } + + @Deprecated + @FunctionalInterface + protected interface SyncMethodWithHeaders { + Response execute(Request request, Header... headers) throws IOException; + } + + @Deprecated + @FunctionalInterface + protected interface AsyncMethodWithHeaders { void execute(Request request, ActionListener listener, Header... headers); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 55357e06ab2..986c3380ff3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -110,6 +110,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertTrue(response); @@ -125,6 +127,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertFalse(response); @@ -143,6 +147,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { boolean response = execute( request, highLevelClient().indices()::exists, + highLevelClient().indices()::existsAsync, + highLevelClient().indices()::exists, highLevelClient().indices()::existsAsync ); assertFalse(response); @@ -160,7 +166,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync, + highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); assertTrue(indexExists(indexName)); @@ -188,7 +195,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndexRequest.mapping("type_name", mappingBuilder); CreateIndexResponse createIndexResponse = - execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync); + execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync, + highLevelClient().indices()::create, highLevelClient().indices()::createAsync); assertTrue(createIndexResponse.isAcknowledged()); Map getIndexResponse = getAsMap(indexName); @@ -323,7 +331,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { putMappingRequest.source(mappingBuilder); PutMappingResponse putMappingResponse = - execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); + execute(putMappingRequest, highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync, + highLevelClient().indices()::putMapping, highLevelClient().indices()::putMappingAsync); assertTrue(putMappingResponse.isAcknowledged()); Map getIndexResponse = getAsMap(indexName); @@ -375,7 +384,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName); DeleteIndexResponse deleteIndexResponse = - execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); + execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync, + highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync); assertTrue(deleteIndexResponse.isAcknowledged()); assertFalse(indexExists(indexName)); @@ -388,7 +398,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); + () -> execute(deleteIndexRequest, highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync, + highLevelClient().indices()::delete, highLevelClient().indices()::deleteAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -407,6 +418,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}"); aliasesAddRequest.addAliasAction(addAction); IndicesAliasesResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesAddResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(true)); @@ -425,6 +437,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { AliasActions removeAction = new AliasActions(AliasActions.Type.REMOVE).index(index).alias(alias); aliasesAddRemoveRequest.addAliasAction(removeAction); IndicesAliasesResponse aliasesAddRemoveResponse = execute(aliasesAddRemoveRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesAddRemoveResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(false)); @@ -436,6 +449,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { AliasActions removeIndexAction = new AliasActions(AliasActions.Type.REMOVE_INDEX).index(index); aliasesRemoveIndexRequest.addAliasAction(removeIndexAction); IndicesAliasesResponse aliasesRemoveIndexResponse = execute(aliasesRemoveIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync); assertTrue(aliasesRemoveIndexResponse.isAcknowledged()); assertThat(aliasExists(alias), equalTo(false)); @@ -453,7 +467,9 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { IndicesAliasesRequest nonExistentIndexRequest = new IndicesAliasesRequest(); nonExistentIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(nonExistentIndexRequest, - highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync, + highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); @@ -463,7 +479,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).indices(index).aliases(alias)); mixedRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE).indices(nonExistentIndex).alias(alias)); exception = expectThrows(ElasticsearchStatusException.class, - () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); + () -> execute(mixedRequest, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync, + highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); assertThat(exception.getMetadata("es.index"), hasItem(nonExistentIndex)); @@ -475,6 +492,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index(nonExistentIndex).alias(alias)); removeIndexRequest.addAliasAction(new AliasActions(AliasActions.Type.REMOVE_INDEX).indices(nonExistentIndex)); exception = expectThrows(ElasticsearchException.class, () -> execute(removeIndexRequest, highLevelClient().indices()::updateAliases, + highLevelClient().indices()::updateAliasesAsync, highLevelClient().indices()::updateAliases, highLevelClient().indices()::updateAliasesAsync)); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); @@ -495,6 +513,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { OpenIndexRequest openIndexRequest = new OpenIndexRequest(index); OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync, highLevelClient().indices()::open, highLevelClient().indices()::openAsync); assertTrue(openIndexResponse.isAcknowledged()); @@ -508,19 +527,22 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync, + highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::open, + highLevelClient().indices()::openAsync, highLevelClient().indices()::open, highLevelClient().indices()::openAsync); assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true)); OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndex); strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen()); ElasticsearchException strictException = expectThrows(ElasticsearchException.class, - () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); + () -> execute(openIndexRequest, highLevelClient().indices()::open, highLevelClient().indices()::openAsync, + highLevelClient().indices()::open, highLevelClient().indices()::openAsync)); assertEquals(RestStatus.NOT_FOUND, strictException.status()); } @@ -532,6 +554,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(index); CloseIndexResponse closeIndexResponse = execute(closeIndexRequest, highLevelClient().indices()::close, + highLevelClient().indices()::closeAsync, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync); assertTrue(closeIndexResponse.isAcknowledged()); @@ -547,7 +570,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { CloseIndexRequest closeIndexRequest = new CloseIndexRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); + () -> execute(closeIndexRequest, highLevelClient().indices()::close, highLevelClient().indices()::closeAsync, + highLevelClient().indices()::close, highLevelClient().indices()::closeAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } @@ -561,7 +585,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndex(index, settings); RefreshRequest refreshRequest = new RefreshRequest(index); RefreshResponse refreshResponse = - execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); + execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync, + highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync); assertThat(refreshResponse.getTotalShards(), equalTo(1)); assertThat(refreshResponse.getSuccessfulShards(), equalTo(1)); assertThat(refreshResponse.getFailedShards(), equalTo(0)); @@ -572,7 +597,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertFalse(indexExists(nonExistentIndex)); RefreshRequest refreshRequest = new RefreshRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); + () -> execute(refreshRequest, highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync, + highLevelClient().indices()::refresh, highLevelClient().indices()::refreshAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -587,7 +613,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndex(index, settings); FlushRequest flushRequest = new FlushRequest(index); FlushResponse flushResponse = - execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync); + execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync, + highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync); assertThat(flushResponse.getTotalShards(), equalTo(1)); assertThat(flushResponse.getSuccessfulShards(), equalTo(1)); assertThat(flushResponse.getFailedShards(), equalTo(0)); @@ -598,7 +625,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertFalse(indexExists(nonExistentIndex)); FlushRequest flushRequest = new FlushRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync)); + () -> execute(flushRequest, highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync, + highLevelClient().indices()::flush, highLevelClient().indices()::flushAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -646,7 +674,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndex(index, settings); ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(index); ClearIndicesCacheResponse clearCacheResponse = - execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync); + execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync, + highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync); assertThat(clearCacheResponse.getTotalShards(), equalTo(1)); assertThat(clearCacheResponse.getSuccessfulShards(), equalTo(1)); assertThat(clearCacheResponse.getFailedShards(), equalTo(0)); @@ -657,8 +686,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertFalse(indexExists(nonExistentIndex)); ClearIndicesCacheRequest clearCacheRequest = new ClearIndicesCacheRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(clearCacheRequest, highLevelClient().indices()::clearCache, - highLevelClient().indices()::clearCacheAsync)); + () -> execute(clearCacheRequest, highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync, + highLevelClient().indices()::clearCache, highLevelClient().indices()::clearCacheAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } @@ -673,7 +702,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { createIndex(index, settings); ForceMergeRequest forceMergeRequest = new ForceMergeRequest(index); ForceMergeResponse forceMergeResponse = - execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); + execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync, + highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync); assertThat(forceMergeResponse.getTotalShards(), equalTo(1)); assertThat(forceMergeResponse.getSuccessfulShards(), equalTo(1)); assertThat(forceMergeResponse.getFailedShards(), equalTo(0)); @@ -684,25 +714,30 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertFalse(indexExists(nonExistentIndex)); ForceMergeRequest forceMergeRequest = new ForceMergeRequest(nonExistentIndex); ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); + () -> execute(forceMergeRequest, highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync, + highLevelClient().indices()::forceMerge, highLevelClient().indices()::forceMergeAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } } public void testExistsAlias() throws IOException { GetAliasesRequest getAliasesRequest = new GetAliasesRequest("alias"); - assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertFalse(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); createIndex("index", Settings.EMPTY); client().performRequest(HttpPut.METHOD_NAME, "/index/_alias/alias"); - assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertTrue(execute(getAliasesRequest, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); GetAliasesRequest getAliasesRequest2 = new GetAliasesRequest(); getAliasesRequest2.aliases("alias"); getAliasesRequest2.indices("index"); - assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertTrue(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); getAliasesRequest2.indices("does_not_exist"); - assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); + assertFalse(execute(getAliasesRequest2, highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync, + highLevelClient().indices()::existsAlias, highLevelClient().indices()::existsAliasAsync)); } @SuppressWarnings("unchecked") @@ -722,7 +757,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { .putNull("index.routing.allocation.require._name") .build(); resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); - ResizeResponse resizeResponse = highLevelClient().indices().shrink(resizeRequest); + ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::shrink, + highLevelClient().indices()::shrinkAsync, highLevelClient().indices()::shrink, highLevelClient().indices()::shrinkAsync); assertTrue(resizeResponse.isAcknowledged()); assertTrue(resizeResponse.isShardsAcknowledged()); Map getIndexResponse = getAsMap("target"); @@ -744,7 +780,8 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { resizeRequest.setResizeType(ResizeType.SPLIT); Settings targetSettings = Settings.builder().put("index.number_of_shards", 4).put("index.number_of_replicas", 0).build(); resizeRequest.setTargetIndex(new CreateIndexRequest("target").settings(targetSettings).alias(new Alias("alias"))); - ResizeResponse resizeResponse = highLevelClient().indices().split(resizeRequest); + ResizeResponse resizeResponse = execute(resizeRequest, highLevelClient().indices()::split, highLevelClient().indices()::splitAsync, + highLevelClient().indices()::split, highLevelClient().indices()::splitAsync); assertTrue(resizeResponse.isAcknowledged()); assertTrue(resizeResponse.isShardsAcknowledged()); Map getIndexResponse = getAsMap("target"); @@ -757,12 +794,13 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { } public void testRollover() throws IOException { - highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias"))); + highLevelClient().indices().create(new CreateIndexRequest("test").alias(new Alias("alias")), RequestOptions.DEFAULT); RolloverRequest rolloverRequest = new RolloverRequest("alias", "test_new"); rolloverRequest.addMaxIndexDocsCondition(1); { RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertFalse(rolloverResponse.isRolledOver()); assertFalse(rolloverResponse.isDryRun()); @@ -773,15 +811,16 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { assertEquals("test_new", rolloverResponse.getNewIndex()); } - highLevelClient().index(new IndexRequest("test", "type", "1").source("field", "value")); + highLevelClient().index(new IndexRequest("test", "type", "1").source("field", "value"), RequestOptions.DEFAULT); highLevelClient().index(new IndexRequest("test", "type", "2").source("field", "value") - .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL)); + .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL), RequestOptions.DEFAULT); //without the refresh the rollover may not happen as the number of docs seen may be off { rolloverRequest.addMaxIndexAgeCondition(new TimeValue(1)); rolloverRequest.dryRun(true); RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertFalse(rolloverResponse.isRolledOver()); assertTrue(rolloverResponse.isDryRun()); @@ -796,6 +835,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { rolloverRequest.dryRun(false); rolloverRequest.addMaxIndexSizeCondition(new ByteSizeValue(1, ByteSizeUnit.MB)); RolloverResponse rolloverResponse = execute(rolloverRequest, highLevelClient().indices()::rollover, + highLevelClient().indices()::rolloverAsync, highLevelClient().indices()::rollover, highLevelClient().indices()::rolloverAsync); assertTrue(rolloverResponse.isRolledOver()); assertFalse(rolloverResponse.isDryRun()); @@ -830,6 +870,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); assertTrue(response.isAcknowledged()); @@ -840,6 +881,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.getMessage(), startsWith("Elasticsearch exception [type=illegal_argument_exception, " @@ -850,6 +892,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { closeIndex(index); response = execute(staticSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); assertTrue(response.isAcknowledged()); openIndex(index); @@ -860,6 +903,7 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.getMessage(), startsWith( "Elasticsearch exception [type=illegal_argument_exception, " @@ -887,12 +931,14 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); createIndex(index, Settings.EMPTY); exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo( diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java index b4d8828eb7e..057ea49f9a9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/PingAndInfoIT.java @@ -28,12 +28,12 @@ import java.util.Map; public class PingAndInfoIT extends ESRestHighLevelClientTestCase { public void testPing() throws IOException { - assertTrue(highLevelClient().ping()); + assertTrue(highLevelClient().ping(RequestOptions.DEFAULT)); } @SuppressWarnings("unchecked") public void testInfo() throws IOException { - MainResponse info = highLevelClient().info(); + MainResponse info = highLevelClient().info(RequestOptions.DEFAULT); // compare with what the low level client outputs Map infoAsMap = entityAsMap(adminClient().performRequest(HttpGet.METHOD_NAME, "/")); assertEquals(infoAsMap.get("cluster_name"), info.getClusterName().value()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java index 9497bdded05..1e12f3f5e62 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java @@ -82,8 +82,8 @@ public class RankEvalIT extends ESRestHighLevelClientTestCase { RankEvalSpec spec = new RankEvalSpec(specifications, metric); RankEvalRequest rankEvalRequest = new RankEvalRequest(spec, new String[] { "index", "index2" }); - RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, - highLevelClient()::rankEvalAsync); + RankEvalResponse response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync, + highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); // the expected Prec@ for the first query is 5/7 and the expected Prec@ for the second is 1/7, divided by 2 to get the average double expectedPrecision = (1.0 / 7.0 + 5.0 / 7.0) / 2.0; assertEquals(expectedPrecision, response.getEvaluationResult(), Double.MIN_VALUE); @@ -117,7 +117,8 @@ public class RankEvalIT extends ESRestHighLevelClientTestCase { // now try this when test2 is closed client().performRequest("POST", "index2/_close", Collections.emptyMap()); rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); - response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); + response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync, + highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); } private static List createRelevant(String indexName, String... docs) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index ee372e255e7..a0312118a8b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,6 +29,8 @@ import org.apache.http.entity.ByteArrayEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -1587,6 +1589,23 @@ public class RequestConvertersTests extends ESTestCase { assertEquals(expectedParams, request.getParameters()); } + public void testCancelTasks() { + CancelTasksRequest request = new CancelTasksRequest(); + Map expectedParams = new HashMap<>(); + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + TaskId parentTaskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setTaskId(taskId); + request.setParentTaskId(parentTaskId); + expectedParams.put("task_id", taskId.toString()); + expectedParams.put("parent_task_id", parentTaskId.toString()); + Request httpRequest = RequestConverters.cancelTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks/_cancel")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + public void testListTasks() { { ListTasksRequest request = new ListTasksRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index 5ca9b05f73a..9084a547c16 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -28,10 +27,7 @@ import org.apache.http.HttpResponse; import org.apache.http.ProtocolVersion; import org.apache.http.RequestLine; import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; @@ -77,9 +73,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalAggregationTestCase; import org.junit.Before; -import org.mockito.ArgumentMatcher; -import org.mockito.internal.matchers.ArrayEquals; -import org.mockito.internal.matchers.VarargMatcher; import java.io.IOException; import java.net.SocketTimeoutException; @@ -124,25 +117,22 @@ public class RestHighLevelClientTests extends ESTestCase { } public void testPingSuccessful() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK)); when(restClient.performRequest(any(Request.class))).thenReturn(response); - assertTrue(restHighLevelClient.ping(headers)); + assertTrue(restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testPing404NotFound() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); Response response = mock(Response.class); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND)); when(restClient.performRequest(any(Request.class))).thenReturn(response); - assertFalse(restHighLevelClient.ping(headers)); + assertFalse(restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testPingSocketTimeout() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); when(restClient.performRequest(any(Request.class))).thenThrow(new SocketTimeoutException()); - expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers)); + expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(RequestOptions.DEFAULT)); } public void testInfo() throws IOException { @@ -150,18 +140,17 @@ public class RestHighLevelClientTests extends ESTestCase { MainResponse testInfo = new MainResponse("nodeName", Version.CURRENT, new ClusterName("clusterName"), "clusterUuid", Build.CURRENT); mockResponse(testInfo); - MainResponse receivedInfo = restHighLevelClient.info(headers); + MainResponse receivedInfo = restHighLevelClient.info(RequestOptions.DEFAULT); assertEquals(testInfo, receivedInfo); } public void testSearchScroll() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); SearchResponse mockSearchResponse = new SearchResponse(new SearchResponseSections(SearchHits.empty(), InternalAggregations.EMPTY, null, false, false, null, 1), randomAlphaOfLengthBetween(5, 10), 5, 5, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); mockResponse(mockSearchResponse); - SearchResponse searchResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)), - headers); + SearchResponse searchResponse = restHighLevelClient.searchScroll( + new SearchScrollRequest(randomAlphaOfLengthBetween(5, 10)), RequestOptions.DEFAULT); assertEquals(mockSearchResponse.getScrollId(), searchResponse.getScrollId()); assertEquals(0, searchResponse.getHits().totalHits); assertEquals(5, searchResponse.getTotalShards()); @@ -170,12 +159,11 @@ public class RestHighLevelClientTests extends ESTestCase { } public void testClearScroll() throws IOException { - Header[] headers = randomHeaders(random(), "Header"); ClearScrollResponse mockClearScrollResponse = new ClearScrollResponse(randomBoolean(), randomIntBetween(0, Integer.MAX_VALUE)); mockResponse(mockClearScrollResponse); ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10)); - ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers); + ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded()); assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index e147642fc73..80d09acf281 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -164,7 +164,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { public void testSearchMatchQuery() throws IOException { SearchRequest searchRequest = new SearchRequest("index"); searchRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getAggregations()); assertNull(searchResponse.getSuggest()); @@ -190,7 +191,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.aggregation(new TermsAggregationBuilder("agg1", ValueType.STRING).field("type.keyword")); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -216,7 +218,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { searchRequest.source(searchSourceBuilder); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, - () -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync)); + () -> execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync)); assertEquals(RestStatus.BAD_REQUEST, exception.status()); } @@ -226,7 +229,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { .addRange("first", 0, 30).addRange("second", 31, 200)); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -257,7 +261,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.aggregation(agg); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -308,7 +313,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.aggregation(new MatrixStatsAggregationBuilder("agg1").fields(Arrays.asList("num", "num2"))); searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -397,7 +403,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { SearchRequest searchRequest = new SearchRequest(indexName); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getSuggest()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -437,7 +444,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.size(0); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); assertSearchHeader(searchResponse); assertNull(searchResponse.getAggregations()); assertEquals(Collections.emptyMap(), searchResponse.getProfileResults()); @@ -469,7 +477,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("null"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -479,7 +488,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("new HashMap()"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -491,7 +501,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { { SearchRequest searchRequest = new SearchRequest("test").source(SearchSourceBuilder.searchSource() .scriptField("result", new Script("new String[]{}"))); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); SearchHit searchHit = searchResponse.getHits().getAt(0); List values = searchHit.getFields().get("result").getValues(); assertNotNull(values); @@ -513,7 +524,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35).sort("field", SortOrder.ASC); SearchRequest searchRequest = new SearchRequest("test").scroll(TimeValue.timeValueMinutes(2)).source(searchSourceBuilder); - SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync, + highLevelClient()::search, highLevelClient()::searchAsync); try { long counter = 0; @@ -525,6 +537,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { } searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)), + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync); assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L)); @@ -534,6 +547,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase { } searchResponse = execute(new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)), + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync); assertThat(searchResponse.getHits().getTotalHits(), equalTo(100L)); @@ -545,14 +559,14 @@ public class SearchIT extends ESRestHighLevelClientTestCase { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(searchResponse.getScrollId()); ClearScrollResponse clearScrollResponse = execute(clearScrollRequest, - // Not using a method reference to work around https://bugs.eclipse.org/bugs/show_bug.cgi?id=517951 - (request, headers) -> highLevelClient().clearScroll(request, headers), - (request, listener, headers) -> highLevelClient().clearScrollAsync(request, listener, headers)); + highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync, + highLevelClient()::clearScroll, highLevelClient()::clearScrollAsync); assertThat(clearScrollResponse.getNumFreed(), greaterThan(0)); assertTrue(clearScrollResponse.isSucceeded()); SearchScrollRequest scrollRequest = new SearchScrollRequest(searchResponse.getScrollId()).scroll(TimeValue.timeValueMinutes(2)); ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, () -> execute(scrollRequest, + highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync, highLevelClient()::searchScroll, highLevelClient()::searchScrollAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); assertThat(exception.getRootCause(), instanceOf(ElasticsearchException.class)); @@ -574,7 +588,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L)); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); @@ -616,7 +631,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L)); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); @@ -664,7 +680,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { multiSearchRequest.add(searchRequest3); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L)); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(3)); @@ -727,7 +744,8 @@ public class SearchIT extends ESRestHighLevelClientTestCase { multiSearchRequest.add(searchRequest2); MultiSearchResponse multiSearchResponse = - execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); + execute(multiSearchRequest, highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync, + highLevelClient()::multiSearch, highLevelClient()::multiSearchAsync); assertThat(multiSearchResponse.getTook().millis(), Matchers.greaterThanOrEqualTo(0L)); assertThat(multiSearchResponse.getResponses().length, Matchers.equalTo(2)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java index fc7d70a36e1..baa97cfa5b4 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TasksIT.java @@ -19,9 +19,12 @@ package org.elasticsearch.client; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; @@ -58,4 +61,26 @@ public class TasksIT extends ESRestHighLevelClientTestCase { assertTrue("List tasks were not found", listTasksFound); } + public void testCancelTasks() throws IOException { + ListTasksRequest listRequest = new ListTasksRequest(); + ListTasksResponse listResponse = execute( + listRequest, + highLevelClient().tasks()::list, + highLevelClient().tasks()::listAsync + ); + // in this case, probably no task will actually be cancelled. + // this is ok, that case is covered in TasksIT.testTasksCancellation + TaskInfo firstTask = listResponse.getTasks().get(0); + String node = listResponse.getPerNodeTasks().keySet().iterator().next(); + + CancelTasksRequest cancelTasksRequest = new CancelTasksRequest(); + cancelTasksRequest.setTaskId(new TaskId(node, firstTask.getId())); + cancelTasksRequest.setReason("testreason"); + CancelTasksResponse response = execute(cancelTasksRequest, + highLevelClient().tasks()::cancel, + highLevelClient().tasks()::cancelAsync); + // Since the task may or may not have been cancelled, assert that we received a response only + // The actual testing of task cancellation is covered by TasksIT.testTasksCancellation + assertThat(response, notNullValue()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 6641aa2fc7d..ef92e28a072 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -48,6 +48,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; @@ -72,13 +73,12 @@ import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.arrayWithSize; -import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.not; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; /** * This class is used to generate the Java CRUD API documentation. @@ -112,7 +112,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1") .source(jsonMap); // <1> //end::index-request-map - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); } { @@ -128,7 +128,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1") .source(builder); // <1> //end::index-request-xcontent - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -138,7 +138,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { "postDate", new Date(), "message", "trying out Elasticsearch"); // <1> //end::index-request-shortcut - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -156,7 +156,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //end::index-request-string // tag::index-execute - IndexResponse indexResponse = client.index(request); + IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); // end::index-execute assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED); @@ -214,7 +214,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .source("field", "value") .version(1); try { - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -228,7 +228,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .source("field", "value") .opType(DocWriteRequest.OpType.CREATE); try { - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -257,7 +257,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::index-execute-async - client.indexAsync(request, listener); // <1> + client.indexAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -268,7 +268,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { RestHighLevelClient client = highLevelClient(); { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); Request request = new Request("POST", "/_scripts/increment-field"); @@ -297,7 +297,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { "ctx._source.field += params.count", parameters); // <2> request.script(inline); // <3> //end::update-request-with-inline-script - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertEquals(4, updateResponse.getGetResult().getSource().get("field")); @@ -307,7 +307,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { new Script(ScriptType.STORED, null, "increment-field", parameters); // <1> request.script(stored); // <2> //end::update-request-with-stored-script - updateResponse = client.update(request); + updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertEquals(8, updateResponse.getGetResult().getSource().get("field")); } @@ -319,7 +319,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { UpdateRequest request = new UpdateRequest("posts", "doc", "1") .doc(jsonMap); // <1> //end::update-request-with-doc-as-map - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -334,7 +334,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { UpdateRequest request = new UpdateRequest("posts", "doc", "1") .doc(builder); // <1> //end::update-request-with-doc-as-xcontent - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -343,7 +343,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .doc("updated", new Date(), "reason", "daily update"); // <1> //end::update-request-shortcut - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); } { @@ -357,7 +357,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //end::update-request-with-doc-as-string request.fetchSource(true); // tag::update-execute - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); // end::update-execute assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); @@ -406,7 +406,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { UpdateRequest request = new UpdateRequest("posts", "type", "does_not_exist") .doc("field", "value"); try { - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); } catch (ElasticsearchException e) { if (e.status() == RestStatus.NOT_FOUND) { // <1> @@ -420,7 +420,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .doc("field", "value") .version(1); try { - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); } catch(ElasticsearchException e) { if (e.status() == RestStatus.CONFLICT) { // <1> @@ -433,7 +433,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //tag::update-request-no-source request.fetchSource(true); // <1> //end::update-request-no-source - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); assertNotNull(updateResponse.getGetResult()); assertEquals(3, updateResponse.getGetResult().sourceAsMap().size()); @@ -445,7 +445,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { String[] excludes = Strings.EMPTY_ARRAY; request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-include - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); @@ -459,7 +459,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { String[] excludes = new String[]{"updated"}; request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1> //end::update-request-source-exclude - UpdateResponse updateResponse = client.update(request); + UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED); Map sourceAsMap = updateResponse.getGetResult().sourceAsMap(); assertEquals(2, sourceAsMap.size()); @@ -525,7 +525,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::update-execute-async - client.updateAsync(request, listener); // <1> + client.updateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::update-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -537,7 +537,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { { IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", "value"); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); } @@ -550,7 +550,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // end::delete-request // tag::delete-execute - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); // end::delete-execute assertSame(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); @@ -595,7 +595,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { { // tag::delete-notfound DeleteRequest request = new DeleteRequest("posts", "doc", "does_not_exist"); - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { // <1> } @@ -603,13 +603,14 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { } { - IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value")); + IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value") + , RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); // tag::delete-conflict try { DeleteRequest request = new DeleteRequest("posts", "doc", "1").version(2); - DeleteResponse deleteResponse = client.delete(request); + DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.CONFLICT) { // <1> @@ -618,7 +619,8 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // end::delete-conflict } { - IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value")); + IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value"), + RequestOptions.DEFAULT); assertSame(indexResponse.status(), RestStatus.CREATED); DeleteRequest request = new DeleteRequest("posts", "doc", "async"); @@ -642,7 +644,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::delete-execute-async - client.deleteAsync(request, listener); // <1> + client.deleteAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -662,7 +664,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .source(XContentType.JSON,"field", "baz")); // end::bulk-request // tag::bulk-execute - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); // end::bulk-execute assertSame(bulkResponse.status(), RestStatus.OK); assertFalse(bulkResponse.hasFailures()); @@ -676,7 +678,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new IndexRequest("posts", "doc", "4") // <3> .source(XContentType.JSON,"field", "baz")); // end::bulk-request-with-mixed-operations - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(bulkResponse.status(), RestStatus.OK); assertFalse(bulkResponse.hasFailures()); @@ -775,7 +777,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { .source("user", "kimchy", "postDate", new Date(), "message", "trying out Elasticsearch"); - IndexResponse indexResponse = client.index(indexRequest); + IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT); assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED); } { @@ -787,7 +789,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //end::get-request //tag::get-execute - GetResponse getResponse = client.get(getRequest); + GetResponse getResponse = client.get(getRequest, RequestOptions.DEFAULT); //end::get-execute assertTrue(getResponse.isExists()); assertEquals(3, getResponse.getSourceAsMap().size()); @@ -810,7 +812,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //tag::get-request-no-source request.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); // <1> //end::get-request-no-source - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); assertNull(getResponse.getSourceInternal()); } { @@ -822,7 +824,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { new FetchSourceContext(true, includes, excludes); request.fetchSourceContext(fetchSourceContext); // <1> //end::get-request-source-include - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); Map sourceAsMap = getResponse.getSourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("trying out Elasticsearch", sourceAsMap.get("message")); @@ -837,7 +839,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { new FetchSourceContext(true, includes, excludes); request.fetchSourceContext(fetchSourceContext); // <1> //end::get-request-source-exclude - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); Map sourceAsMap = getResponse.getSourceAsMap(); assertEquals(2, sourceAsMap.size()); assertEquals("kimchy", sourceAsMap.get("user")); @@ -847,7 +849,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { GetRequest request = new GetRequest("posts", "doc", "1"); //tag::get-request-stored request.storedFields("message"); // <1> - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); String message = getResponse.getField("message").getValue(); // <2> //end::get-request-stored assertEquals("trying out Elasticsearch", message); @@ -897,7 +899,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); //tag::get-execute-async - client.getAsync(request, listener); // <1> + client.getAsync(request, RequestOptions.DEFAULT, listener); // <1> //end::get-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -906,7 +908,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { //tag::get-indexnotfound GetRequest request = new GetRequest("does_not_exist", "doc", "1"); try { - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); } catch (ElasticsearchException e) { if (e.status() == RestStatus.NOT_FOUND) { // <1> @@ -918,7 +920,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // tag::get-conflict try { GetRequest request = new GetRequest("posts", "doc", "1").version(2); - GetResponse getResponse = client.get(request); + GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.CONFLICT) { // <1> @@ -940,7 +942,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // end::exists-request { // tag::exists-execute - boolean exists = client.exists(getRequest); + boolean exists = client.exists(getRequest, RequestOptions.DEFAULT); // end::exists-execute assertFalse(exists); } @@ -964,7 +966,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::exists-execute-async - client.existsAsync(getRequest, listener); // <1> + client.existsAsync(getRequest, RequestOptions.DEFAULT, listener); // <1> // end::exists-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1091,7 +1093,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { source.put("baz", "val3"); client.index(new IndexRequest("index", "type", "example_id") .source(source) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE)); + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), RequestOptions.DEFAULT); { // tag::multi-get-request @@ -1120,7 +1122,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // end::multi-get-request-top-level-extras // tag::multi-get-execute - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); // end::multi-get-execute // tag::multi-get-response @@ -1184,7 +1186,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)); // <1> // end::multi-get-request-no-source - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertNull(item.getResponse().getSource()); } { @@ -1197,7 +1199,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(fetchSourceContext)); // <1> // end::multi-get-request-source-include - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertThat(item.getResponse().getSource(), hasEntry("foo", "val1")); assertThat(item.getResponse().getSource(), hasEntry("bar", "val2")); assertThat(item.getResponse().getSource(), not(hasKey("baz"))); @@ -1212,7 +1214,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new MultiGetRequest.Item("index", "type", "example_id") .fetchSourceContext(fetchSourceContext)); // <1> // end::multi-get-request-source-exclude - MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request)); + MultiGetItemResponse item = unwrapAndAssertExample(client.multiGet(request, RequestOptions.DEFAULT)); assertThat(item.getResponse().getSource(), not(hasKey("foo"))); assertThat(item.getResponse().getSource(), not(hasKey("bar"))); assertThat(item.getResponse().getSource(), hasEntry("baz", "val3")); @@ -1222,7 +1224,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { // tag::multi-get-request-stored request.add(new MultiGetRequest.Item("index", "type", "example_id") .storedFields("foo")); // <1> - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); MultiGetItemResponse item = response.getResponses()[0]; String value = item.getResponse().getField("foo").getValue(); // <2> // end::multi-get-request-stored @@ -1234,7 +1236,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase { MultiGetRequest request = new MultiGetRequest(); request.add(new MultiGetRequest.Item("index", "type", "example_id") .version(1000L)); - MultiGetResponse response = client.multiGet(request); + MultiGetResponse response = client.multiGet(request, RequestOptions.DEFAULT); MultiGetItemResponse item = response.getResponses()[0]; assertNull(item.getResponse()); // <1> Exception e = item.getFailure().getFailure(); // <2> diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 304c5010a47..75902cf02ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -23,27 +23,19 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.ingest.GetPipelineRequest; -import org.elasticsearch.action.ingest.GetPipelineResponse; -import org.elasticsearch.action.ingest.PutPipelineRequest; -import org.elasticsearch.action.ingest.DeletePipelineRequest; -import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; -import org.elasticsearch.ingest.PipelineConfiguration; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; -import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -134,7 +126,7 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase // end::put-settings-request-masterTimeout // tag::put-settings-execute - ClusterUpdateSettingsResponse response = client.cluster().putSettings(request); + ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, RequestOptions.DEFAULT); // end::put-settings-execute // tag::put-settings-response @@ -150,7 +142,7 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1> // tag::put-settings-request-reset-transient request.persistentSettings(Settings.builder().putNull(persistentSettingKey)); - ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request); + ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, RequestOptions.DEFAULT); assertTrue(resetResponse.isAcknowledged()); } @@ -180,10 +172,11 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::put-settings-execute-async - client.cluster().putSettingsAsync(request, listener); // <1> + client.cluster().putSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index c3decd93a17..2b81e4a4adc 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -64,6 +64,7 @@ import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -111,7 +112,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -130,7 +131,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::indices-exists-request-optionals // tag::indices-exists-response - boolean exists = client.indices().exists(request); + boolean exists = client.indices().exists(request, RequestOptions.DEFAULT); // end::indices-exists-response assertTrue(exists); } @@ -140,7 +141,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -167,7 +168,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::indices-exists-async - client.indices().existsAsync(request, listener); // <1> + client.indices().existsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::indices-exists-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -177,7 +178,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -199,7 +200,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::delete-index-request-indicesOptions // tag::delete-index-execute - DeleteIndexResponse deleteIndexResponse = client.indices().delete(request); + DeleteIndexResponse deleteIndexResponse = client.indices().delete(request, RequestOptions.DEFAULT); // end::delete-index-execute // tag::delete-index-response @@ -212,7 +213,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::delete-index-notfound try { DeleteIndexRequest request = new DeleteIndexRequest("does_not_exist"); - client.indices().delete(request); + client.indices().delete(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -226,7 +227,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("posts"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -253,7 +254,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::delete-index-execute-async - client.indices().deleteAsync(request, listener); // <1> + client.indices().deleteAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -289,7 +290,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase "}", // <2> XContentType.JSON); // end::create-index-request-mappings - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -306,7 +307,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase jsonMap.put("tweet", tweet); request.mapping("tweet", jsonMap); // <1> //end::create-index-mappings-map - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } { @@ -332,7 +333,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase builder.endObject(); request.mapping("tweet", builder); // <1> //end::create-index-mappings-xcontent - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } { @@ -340,7 +341,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase //tag::create-index-mappings-shortcut request.mapping("tweet", "message", "type=text"); // <1> //end::create-index-mappings-shortcut - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -362,7 +363,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase request.waitForActiveShards(ActiveShardCount.DEFAULT); // <2> // end::create-index-request-waitForActiveShards { - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -387,7 +388,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::create-index-whole-source // tag::create-index-execute - CreateIndexResponse createIndexResponse = client.indices().create(request); + CreateIndexResponse createIndexResponse = client.indices().create(request, RequestOptions.DEFAULT); // end::create-index-execute // tag::create-index-response @@ -426,7 +427,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::create-index-execute-async - client.indices().createAsync(request, listener); // <1> + client.indices().createAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::create-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -437,7 +438,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -459,7 +460,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase "}", // <1> XContentType.JSON); // end::put-mapping-request-source - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -473,7 +474,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase jsonMap.put("properties", properties); request.source(jsonMap); // <1> //end::put-mapping-map - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } { @@ -494,14 +495,14 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase builder.endObject(); request.source(builder); // <1> //end::put-mapping-xcontent - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } { //tag::put-mapping-shortcut request.source("message", "type=text"); // <1> //end::put-mapping-shortcut - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); assertTrue(putMappingResponse.isAcknowledged()); } @@ -515,7 +516,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::put-mapping-request-masterTimeout // tag::put-mapping-execute - PutMappingResponse putMappingResponse = client.indices().putMapping(request); + PutMappingResponse putMappingResponse = client.indices().putMapping(request, RequestOptions.DEFAULT); // end::put-mapping-execute // tag::put-mapping-response @@ -529,7 +530,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase final RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("twitter"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -556,7 +557,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::put-mapping-execute-async - client.indices().putMappingAsync(request, listener); // <1> + client.indices().putMappingAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-mapping-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -601,7 +602,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::get-mapping-request-indicesOptions // tag::get-mapping-execute - GetMappingsResponse getMappingResponse = client.indices().getMappings(request); + GetMappingsResponse getMappingResponse = client.indices().getMappings(request, RequestOptions.DEFAULT); // end::get-mapping-execute // tag::get-mapping-response @@ -683,7 +684,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase }); // tag::get-mapping-execute-async - client.indices().getMappingsAsync(request, listener); // <1> + client.indices().getMappingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-mapping-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -694,7 +695,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -721,7 +722,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::open-index-request-indicesOptions // tag::open-index-execute - OpenIndexResponse openIndexResponse = client.indices().open(request); + OpenIndexResponse openIndexResponse = client.indices().open(request, RequestOptions.DEFAULT); // end::open-index-execute // tag::open-index-response @@ -751,7 +752,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::open-index-execute-async - client.indices().openAsync(request, listener); // <1> + client.indices().openAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::open-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -761,7 +762,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::open-index-notfound try { OpenIndexRequest request = new OpenIndexRequest("does_not_exist"); - client.indices().open(request); + client.indices().open(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.BAD_REQUEST) { // <1> @@ -790,7 +791,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::refresh-request-indicesOptions // tag::refresh-execute - RefreshResponse refreshResponse = client.indices().refresh(request); + RefreshResponse refreshResponse = client.indices().refresh(request, RequestOptions.DEFAULT); // end::refresh-execute // tag::refresh-response @@ -819,7 +820,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::refresh-execute-async - client.indices().refreshAsync(request, listener); // <1> + client.indices().refreshAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::refresh-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -829,7 +830,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::refresh-notfound try { RefreshRequest request = new RefreshRequest("does_not_exist"); - client.indices().refresh(request); + client.indices().refresh(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -866,7 +867,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::flush-request-force // tag::flush-execute - FlushResponse flushResponse = client.indices().flush(request); + FlushResponse flushResponse = client.indices().flush(request, RequestOptions.DEFAULT); // end::flush-execute // tag::flush-response @@ -895,7 +896,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::flush-execute-async - client.indices().flushAsync(request, listener); // <1> + client.indices().flushAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::flush-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -905,7 +906,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::flush-notfound try { FlushRequest request = new FlushRequest("does_not_exist"); - client.indices().flush(request); + client.indices().flush(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -934,7 +935,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::flush-synced-request-indicesOptions // tag::flush-synced-execute - SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request); + SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, RequestOptions.DEFAULT); // end::flush-synced-execute // tag::flush-synced-response @@ -978,7 +979,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::flush-synced-execute-async - client.indices().flushSyncedAsync(request, listener); // <1> + client.indices().flushSyncedAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::flush-synced-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -988,7 +989,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::flush-synced-notfound try { SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); - client.indices().flushSynced(request); + client.indices().flushSynced(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1003,7 +1004,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { Settings settings = Settings.builder().put("number_of_shards", 3).build(); - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index", settings)); + CreateIndexResponse createIndexResponse = client.indices().create( + new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1020,7 +1022,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::get-settings-request-indicesOptions // tag::get-settings-execute - GetSettingsResponse getSettingsResponse = client.indices().getSettings(request); + GetSettingsResponse getSettingsResponse = client.indices().getSettings(request, RequestOptions.DEFAULT); // end::get-settings-execute // tag::get-settings-response @@ -1055,7 +1057,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::get-settings-execute-async - client.indices().getSettingsAsync(request, listener); // <1> + client.indices().getSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1066,7 +1068,8 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { Settings settings = Settings.builder().put("number_of_shards", 3).build(); - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index", settings)); + CreateIndexResponse createIndexResponse = client.indices().create( + new CreateIndexRequest("index", settings), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1077,7 +1080,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase request.includeDefaults(true); // <1> // end::get-settings-request-include-defaults - GetSettingsResponse getSettingsResponse = client.indices().getSettings(request); + GetSettingsResponse getSettingsResponse = client.indices().getSettings(request, RequestOptions.DEFAULT); String numberOfShardsString = getSettingsResponse.getSetting("index", "index.number_of_shards"); Settings indexSettings = getSettingsResponse.getIndexToSettings().get("index"); Integer numberOfShards = indexSettings.getAsInt("index.number_of_shards", null); @@ -1107,7 +1110,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - client.indices().getSettingsAsync(request, listener); + client.indices().getSettingsAsync(request, RequestOptions.DEFAULT, listener); assertTrue(latch.await(30L, TimeUnit.SECONDS)); } @@ -1142,7 +1145,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::force-merge-request-flush // tag::force-merge-execute - ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request); + ForceMergeResponse forceMergeResponse = client.indices().forceMerge(request, RequestOptions.DEFAULT); // end::force-merge-execute // tag::force-merge-response @@ -1167,14 +1170,14 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::force-merge-execute-listener // tag::force-merge-execute-async - client.indices().forceMergeAsync(request, listener); // <1> + client.indices().forceMergeAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::force-merge-execute-async } { // tag::force-merge-notfound try { ForceMergeRequest request = new ForceMergeRequest("does_not_exist"); - client.indices().forceMerge(request); + client.indices().forceMerge(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1219,7 +1222,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::clear-cache-request-fields // tag::clear-cache-execute - ClearIndicesCacheResponse clearCacheResponse = client.indices().clearCache(request); + ClearIndicesCacheResponse clearCacheResponse = client.indices().clearCache(request, RequestOptions.DEFAULT); // end::clear-cache-execute // tag::clear-cache-response @@ -1248,7 +1251,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::clear-cache-execute-async - client.indices().clearCacheAsync(request, listener); // <1> + client.indices().clearCacheAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::clear-cache-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1258,7 +1261,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::clear-cache-notfound try { ClearIndicesCacheRequest request = new ClearIndicesCacheRequest("does_not_exist"); - client.indices().clearCache(request); + client.indices().clearCache(request, RequestOptions.DEFAULT); } catch (ElasticsearchException exception) { if (exception.status() == RestStatus.NOT_FOUND) { // <1> @@ -1272,7 +1275,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1295,7 +1298,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::close-index-request-indicesOptions // tag::close-index-execute - CloseIndexResponse closeIndexResponse = client.indices().close(request); + CloseIndexResponse closeIndexResponse = client.indices().close(request, RequestOptions.DEFAULT); // end::close-index-execute // tag::close-index-response @@ -1323,7 +1326,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::close-index-execute-async - client.indices().closeAsync(request, listener); // <1> + client.indices().closeAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::close-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1335,7 +1338,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase { CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index") - .alias(new Alias("alias"))); + .alias(new Alias("alias")), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1363,7 +1366,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::exists-alias-request-local // tag::exists-alias-execute - boolean exists = client.indices().existsAlias(request); + boolean exists = client.indices().existsAlias(request, RequestOptions.DEFAULT); // end::exists-alias-execute assertTrue(exists); @@ -1386,7 +1389,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::exists-alias-execute-async - client.indices().existsAliasAsync(request, listener); // <1> + client.indices().existsAliasAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::exists-alias-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1397,13 +1400,13 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index1"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index2")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index2"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index3")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index3"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); - createIndexResponse = client.indices().create(new CreateIndexRequest("index4")); + createIndexResponse = client.indices().create(new CreateIndexRequest("index4"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1448,7 +1451,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::update-aliases-execute IndicesAliasesResponse indicesAliasesResponse = - client.indices().updateAliases(request); + client.indices().updateAliases(request, RequestOptions.DEFAULT); // end::update-aliases-execute // tag::update-aliases-response @@ -1482,7 +1485,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::update-aliases-execute-async - client.indices().updateAliasesAsync(request, listener); // <1> + client.indices().updateAliasesAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::update-aliases-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1527,7 +1530,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::shrink-index-request-aliases // tag::shrink-index-execute - ResizeResponse resizeResponse = client.indices().shrink(request); + ResizeResponse resizeResponse = client.indices().shrink(request, RequestOptions.DEFAULT); // end::shrink-index-execute // tag::shrink-index-response @@ -1556,7 +1559,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::shrink-index-execute-async - client.indices().shrinkAsync(request, listener); // <1> + client.indices().shrinkAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::shrink-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1597,7 +1600,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::split-index-request-aliases // tag::split-index-execute - ResizeResponse resizeResponse = client.indices().split(request); + ResizeResponse resizeResponse = client.indices().split(request, RequestOptions.DEFAULT); // end::split-index-execute // tag::split-index-response @@ -1626,7 +1629,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::split-index-execute-async - client.indices().splitAsync(request,listener); // <1> + client.indices().splitAsync(request, RequestOptions.DEFAULT,listener); // <1> // end::split-index-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1636,7 +1639,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - client.indices().create(new CreateIndexRequest("index-1").alias(new Alias("alias"))); + client.indices().create(new CreateIndexRequest("index-1").alias(new Alias("alias")), RequestOptions.DEFAULT); } // tag::rollover-request @@ -1673,7 +1676,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // end::rollover-request-alias // tag::rollover-execute - RolloverResponse rolloverResponse = client.indices().rollover(request); + RolloverResponse rolloverResponse = client.indices().rollover(request, RequestOptions.DEFAULT); // end::rollover-execute // tag::rollover-response @@ -1712,7 +1715,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::rollover-execute-async - client.indices().rolloverAsync(request,listener); // <1> + client.indices().rolloverAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::rollover-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1722,7 +1725,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT); assertTrue(createIndexResponse.isAcknowledged()); } @@ -1785,7 +1788,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase // tag::put-settings-execute UpdateSettingsResponse updateSettingsResponse = - client.indices().putSettings(request); + client.indices().putSettings(request, RequestOptions.DEFAULT); // end::put-settings-execute // tag::put-settings-response @@ -1814,7 +1817,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::put-settings-execute-async - client.indices().putSettingsAsync(request,listener); // <1> + client.indices().putSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-settings-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1849,7 +1852,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase "}", // <2> XContentType.JSON); // end::put-template-request-mappings-json - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-map @@ -1863,7 +1866,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase jsonMap.put("tweet", tweet); request.mapping("tweet", jsonMap); // <1> //end::put-template-request-mappings-map - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-xcontent @@ -1887,13 +1890,13 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase builder.endObject(); request.mapping("tweet", builder); // <1> //end::put-template-request-mappings-xcontent - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } { //tag::put-template-request-mappings-shortcut request.mapping("tweet", "message", "type=text"); // <1> //end::put-template-request-mappings-shortcut - assertTrue(client.indices().putTemplate(request).isAcknowledged()); + assertTrue(client.indices().putTemplate(request, RequestOptions.DEFAULT).isAcknowledged()); } // tag::put-template-request-aliases @@ -1947,7 +1950,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase request.create(false); // make test happy // tag::put-template-execute - PutIndexTemplateResponse putTemplateResponse = client.indices().putTemplate(request); + PutIndexTemplateResponse putTemplateResponse = client.indices().putTemplate(request, RequestOptions.DEFAULT); // end::put-template-execute // tag::put-template-response @@ -1975,7 +1978,7 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::put-template-execute-async - client.indices().putTemplateAsync(request, listener); // <1> + client.indices().putTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-template-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java index 7971e49da44..f5bdc9f2f3e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.ingest.GetPipelineResponse; import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.WritePipelineResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; @@ -86,7 +87,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { // end::put-pipeline-request-masterTimeout // tag::put-pipeline-execute - WritePipelineResponse response = client.ingest().putPipeline(request); // <1> + WritePipelineResponse response = client.ingest().putPipeline(request, RequestOptions.DEFAULT); // <1> // end::put-pipeline-execute // tag::put-pipeline-response @@ -129,7 +130,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::put-pipeline-execute-async - client.ingest().putPipelineAsync(request, listener); // <1> + client.ingest().putPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -154,7 +155,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { // end::get-pipeline-request-masterTimeout // tag::get-pipeline-execute - GetPipelineResponse response = client.ingest().getPipeline(request); // <1> + GetPipelineResponse response = client.ingest().getPipeline(request, RequestOptions.DEFAULT); // <1> // end::get-pipeline-execute // tag::get-pipeline-response @@ -199,7 +200,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::get-pipeline-execute-async - client.ingest().getPipelineAsync(request, listener); // <1> + client.ingest().getPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -229,7 +230,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { // end::delete-pipeline-request-masterTimeout // tag::delete-pipeline-execute - WritePipelineResponse response = client.ingest().deletePipeline(request); // <1> + WritePipelineResponse response = client.ingest().deletePipeline(request, RequestOptions.DEFAULT); // <1> // end::delete-pipeline-execute // tag::delete-pipeline-response @@ -269,7 +270,7 @@ public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::delete-pipeline-execute-async - client.ingest().deletePipelineAsync(request, listener); // <1> + client.ingest().deletePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-pipeline-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index 489d4d9b1ed..b56fb3359ff 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -19,10 +19,6 @@ package org.elasticsearch.client.documentation; -import org.apache.http.HttpEntity; -import org.apache.http.HttpStatus; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; @@ -31,12 +27,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; @@ -45,11 +39,6 @@ import java.io.IOException; import java.io.InputStream; import java.util.Map; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; - /** * This class is used to generate the documentation for the * docs/java-rest/high-level/migration.asciidoc page. @@ -98,14 +87,14 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { //end::migration-request-ctor //tag::migration-request-ctor-execution - IndexResponse response = client.index(request); + IndexResponse response = client.index(request, RequestOptions.DEFAULT); //end::migration-request-ctor-execution assertEquals(RestStatus.CREATED, response.status()); } { //tag::migration-request-async-execution DeleteRequest request = new DeleteRequest("index", "doc", "id"); // <1> - client.deleteAsync(request, new ActionListener() { // <2> + client.deleteAsync(request, RequestOptions.DEFAULT, new ActionListener() { // <2> @Override public void onResponse(DeleteResponse deleteResponse) { // <3> @@ -117,12 +106,12 @@ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { } }); //end::migration-request-async-execution - assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "doc", "id")))); + assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "doc", "id"), RequestOptions.DEFAULT))); } { //tag::migration-request-sync-execution DeleteRequest request = new DeleteRequest("index", "doc", "id"); - DeleteResponse response = client.delete(request); // <1> + DeleteResponse response = client.delete(request, RequestOptions.DEFAULT); // <1> //end::migration-request-sync-execution assertEquals(RestStatus.NOT_FOUND, response.status()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java index 504ea797c35..2186bd8ebfd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.ClusterName; @@ -40,7 +41,7 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase RestHighLevelClient client = highLevelClient(); { //tag::main-execute - MainResponse response = client.info(); + MainResponse response = client.info(RequestOptions.DEFAULT); //end::main-execute //tag::main-response ClusterName clusterName = response.getClusterName(); // <1> @@ -60,7 +61,7 @@ public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase public void testPing() throws IOException { RestHighLevelClient client = highLevelClient(); //tag::ping-execute - boolean response = client.ping(); + boolean response = client.ping(RequestOptions.DEFAULT); //end::ping-execute assertTrue(response); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 463c5f7d12f..cf6409bab64 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -42,6 +42,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; @@ -143,7 +144,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // tag::search-request-preference searchRequest.preference("_local"); // <1> // end::search-request-preference - assertNotNull(client.search(searchRequest)); + assertNotNull(client.search(searchRequest, RequestOptions.DEFAULT)); } { // tag::search-source-basics @@ -176,7 +177,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::search-source-setter // tag::search-execute - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // end::search-execute // tag::search-execute-listener @@ -198,7 +199,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::search-execute-async - client.searchAsync(searchRequest, listener); // <1> + client.searchAsync(searchRequest, RequestOptions.DEFAULT, listener); // <1> // end::search-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -296,7 +297,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "company", "Elastic", "age", 40)); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -312,7 +313,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::search-request-aggregations searchSourceBuilder.query(QueryBuilders.matchAllQuery()); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-aggregations-get Aggregations aggregations = searchResponse.getAggregations(); @@ -369,7 +370,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new IndexRequest("posts", "doc", "3").source(XContentType.JSON, "user", "tlrx")); request.add(new IndexRequest("posts", "doc", "4").source(XContentType.JSON, "user", "cbuescher")); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -384,7 +385,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.suggest(suggestBuilder); // end::search-request-suggestion searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-suggestion-get Suggest suggest = searchResponse.getSuggest(); // <1> @@ -416,7 +417,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user", Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value"))); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -437,7 +438,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { .should(matchQuery("title", "Elasticsearch")) .should(matchQuery("user", "kimchy"))); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); { // tag::search-request-highlighting-get SearchHits hits = searchResponse.getHits(); @@ -472,7 +473,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { IndexRequest request = new IndexRequest("posts", "doc", "1") .source(XContentType.JSON, "tags", "elasticsearch", "comments", 123); request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - IndexResponse indexResponse = client.index(request); + IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); assertSame(RestStatus.CREATED, indexResponse.status()); } { @@ -485,7 +486,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.aggregation(AggregationBuilders.histogram("by_comments").field("comments").interval(100)); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // tag::search-request-profiling-get Map profilingResults = searchResponse.getProfileResults(); // <1> @@ -548,7 +549,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new IndexRequest("posts", "doc", "3") .source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch")); request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = client.bulk(request); + BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } @@ -561,7 +562,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.size(size); // <1> searchRequest.source(searchSourceBuilder); searchRequest.scroll(TimeValue.timeValueMinutes(1L)); // <2> - SearchResponse searchResponse = client.search(searchRequest); + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); String scrollId = searchResponse.getScrollId(); // <3> SearchHits hits = searchResponse.getHits(); // <4> // end::search-scroll-init @@ -572,7 +573,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // tag::search-scroll2 SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <1> scrollRequest.scroll(TimeValue.timeValueSeconds(30)); - SearchResponse searchScrollResponse = client.searchScroll(scrollRequest); + SearchResponse searchScrollResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); scrollId = searchScrollResponse.getScrollId(); // <2> hits = searchScrollResponse.getHits(); // <3> assertEquals(3, hits.getTotalHits()); @@ -582,14 +583,14 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); clearScrollRequest.addScrollId(scrollId); - ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest); + ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); assertTrue(clearScrollResponse.isSucceeded()); } { SearchRequest searchRequest = new SearchRequest(); searchRequest.scroll("60s"); - SearchResponse initialSearchResponse = client.search(searchRequest); + SearchResponse initialSearchResponse = client.search(searchRequest, RequestOptions.DEFAULT); String scrollId = initialSearchResponse.getScrollId(); SearchScrollRequest scrollRequest = new SearchScrollRequest(); @@ -601,7 +602,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::scroll-request-arguments // tag::search-scroll-execute-sync - SearchResponse searchResponse = client.searchScroll(scrollRequest); + SearchResponse searchResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); // end::search-scroll-execute-sync assertEquals(0, searchResponse.getFailedShards()); @@ -648,7 +649,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::clear-scroll-add-scroll-ids // tag::clear-scroll-execute - ClearScrollResponse response = client.clearScroll(request); + ClearScrollResponse response = client.clearScroll(request, RequestOptions.DEFAULT); // end::clear-scroll-execute // tag::clear-scroll-response @@ -678,7 +679,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, clearScrollLatch); // tag::clear-scroll-execute-async - client.clearScrollAsync(request, listener); // <1> + client.clearScrollAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::clear-scroll-execute-async assertTrue(clearScrollLatch.await(30L, TimeUnit.SECONDS)); @@ -692,14 +693,14 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { searchSourceBuilder.query(matchQuery("title", "Elasticsearch")); searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest); // <1> + SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // <1> String scrollId = searchResponse.getScrollId(); SearchHit[] searchHits = searchResponse.getHits().getHits(); while (searchHits != null && searchHits.length > 0) { // <2> SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId); // <3> scrollRequest.scroll(scroll); - searchResponse = client.searchScroll(scrollRequest); + searchResponse = client.searchScroll(scrollRequest, RequestOptions.DEFAULT); scrollId = searchResponse.getScrollId(); searchHits = searchResponse.getHits().getHits(); // <4> @@ -707,7 +708,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { ClearScrollRequest clearScrollRequest = new ClearScrollRequest(); // <5> clearScrollRequest.addScrollId(scrollId); - ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest); + ClearScrollResponse clearScrollResponse = client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT); boolean succeeded = clearScrollResponse.isSucceeded(); // end::search-scroll-example assertTrue(succeeded); @@ -737,7 +738,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::search-template-request-inline // tag::search-template-response - SearchTemplateResponse response = client.searchTemplate(request); + SearchTemplateResponse response = client.searchTemplate(request, RequestOptions.DEFAULT); SearchResponse searchResponse = response.getResponse(); // end::search-template-response @@ -749,7 +750,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::render-search-template-request // tag::render-search-template-response - SearchTemplateResponse renderResponse = client.searchTemplate(request); + SearchTemplateResponse renderResponse = client.searchTemplate(request, RequestOptions.DEFAULT); BytesReference source = renderResponse.getSource(); // <1> // end::render-search-template-response @@ -802,7 +803,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::search-template-request-options // tag::search-template-execute - SearchTemplateResponse response = client.searchTemplate(request); + SearchTemplateResponse response = client.searchTemplate(request, RequestOptions.DEFAULT); // end::search-template-execute SearchResponse searchResponse = response.getResponse(); @@ -828,7 +829,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::search-template-execute-async - client.searchTemplateAsync(request, listener); // <1> + client.searchTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::search-template-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -849,7 +850,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::field-caps-request-indicesOptions // tag::field-caps-execute - FieldCapabilitiesResponse response = client.fieldCaps(request); + FieldCapabilitiesResponse response = client.fieldCaps(request, RequestOptions.DEFAULT); // end::field-caps-execute // tag::field-caps-response @@ -892,7 +893,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::field-caps-execute-async - client.fieldCapsAsync(request, listener); // <1> + client.fieldCapsAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::field-caps-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -918,7 +919,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { // end::rank-eval-request-basic // tag::rank-eval-execute - RankEvalResponse response = client.rankEval(request); + RankEvalResponse response = client.rankEval(request, RequestOptions.DEFAULT); // end::rank-eval-execute // tag::rank-eval-response @@ -962,7 +963,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::rank-eval-execute-async - client.rankEvalAsync(request, listener); // <1> + client.rankEvalAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::rank-eval-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -987,7 +988,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { request.add(secondSearchRequest); // end::multi-search-request-basic // tag::multi-search-execute - MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse response = client.multiSearch(request, RequestOptions.DEFAULT); // end::multi-search-execute // tag::multi-search-response MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // <1> @@ -1019,7 +1020,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::multi-search-execute-async - client.multiSearchAsync(request, listener); // <1> + client.multiSearchAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::multi-search-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -1030,7 +1031,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { request.add(new SearchRequest("posts") // <1> .types("doc")); // <2> // end::multi-search-request-index - MultiSearchResponse response = client.multiSearch(request); + MultiSearchResponse response = client.multiSearch(request, RequestOptions.DEFAULT); MultiSearchResponse.Item firstResponse = response.getResponses()[0]; assertNull(firstResponse.getFailure()); SearchResponse searchResponse = firstResponse.getResponse(); @@ -1041,12 +1042,12 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { private void indexSearchTestData() throws IOException { CreateIndexRequest authorsRequest = new CreateIndexRequest("authors") .mapping("doc", "user", "type=keyword,doc_values=false"); - CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); CreateIndexRequest reviewersRequest = new CreateIndexRequest("contributors") .mapping("doc", "user", "type=keyword"); - CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest); + CreateIndexResponse reviewersResponse = highLevelClient().indices().create(reviewersRequest, RequestOptions.DEFAULT); assertTrue(reviewersResponse.isAcknowledged()); BulkRequest bulkRequest = new BulkRequest(); @@ -1067,7 +1068,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase { bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 2890ad50c26..8c158a91a51 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResp import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; @@ -134,7 +135,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase // end::create-repository-request-verify // tag::create-repository-execute - PutRepositoryResponse response = client.snapshot().createRepository(request); + PutRepositoryResponse response = client.snapshot().createRepository(request, RequestOptions.DEFAULT); // end::create-repository-execute // tag::create-repository-response @@ -168,7 +169,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::create-repository-execute-async - client.snapshot().createRepositoryAsync(request, listener); // <1> + client.snapshot().createRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::create-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -197,7 +198,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase // end::get-repository-request-masterTimeout // tag::get-repository-execute - GetRepositoriesResponse response = client.snapshot().getRepositories(request); + GetRepositoriesResponse response = client.snapshot().getRepositories(request, RequestOptions.DEFAULT); // end::get-repository-execute // tag::get-repository-response @@ -232,7 +233,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::get-repository-execute-async - client.snapshot().getRepositoriesAsync(request, listener); // <1> + client.snapshot().getRepositoriesAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::get-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -258,7 +259,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase // end::delete-repository-request-timeout // tag::delete-repository-execute - DeleteRepositoryResponse response = client.snapshot().deleteRepository(request); + DeleteRepositoryResponse response = client.snapshot().deleteRepository(request, RequestOptions.DEFAULT); // end::delete-repository-execute // tag::delete-repository-response @@ -292,7 +293,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::delete-repository-execute-async - client.snapshot().deleteRepositoryAsync(request, listener); // <1> + client.snapshot().deleteRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::delete-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -317,7 +318,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase // end::verify-repository-request-timeout // tag::verify-repository-execute - VerifyRepositoryResponse response = client.snapshot().verifyRepository(request); + VerifyRepositoryResponse response = client.snapshot().verifyRepository(request, RequestOptions.DEFAULT); // end::verify-repository-execute // tag::verify-repository-response @@ -352,7 +353,7 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase listener = new LatchedActionListener<>(listener, latch); // tag::verify-repository-execute-async - client.snapshot().verifyRepositoryAsync(request, listener); // <1> + client.snapshot().verifyRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::verify-repository-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); @@ -363,6 +364,6 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); request.type(FsRepository.TYPE); request.settings("{\"location\": \".\"}", XContentType.JSON); - assertTrue(highLevelClient().snapshot().createRepository(request).isAcknowledged()); + assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java index faf447a4143..8a45195757c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/TasksClientDocumentationIT.java @@ -23,10 +23,13 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.tasks.TaskId; @@ -90,7 +93,7 @@ public class TasksClientDocumentationIT extends ESRestHighLevelClientTestCase { ListTasksRequest request = new ListTasksRequest(); // tag::list-tasks-execute - ListTasksResponse response = client.tasks().list(request); + ListTasksResponse response = client.tasks().list(request, RequestOptions.DEFAULT); // end::list-tasks-execute assertThat(response, notNullValue()); @@ -139,10 +142,80 @@ public class TasksClientDocumentationIT extends ESRestHighLevelClientTestCase { listener = new LatchedActionListener<>(listener, latch); // tag::list-tasks-execute-async - client.tasks().listAsync(request, listener); // <1> + client.tasks().listAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::list-tasks-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testCancelTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::cancel-tasks-request + CancelTasksRequest request = new CancelTasksRequest(); + // end::cancel-tasks-request + + // tag::cancel-tasks-request-filter + request.setTaskId(new TaskId("nodeId1", 42)); //<1> + request.setActions("cluster:*"); // <2> + request.setNodes("nodeId1", "nodeId2"); // <3> + // end::cancel-tasks-request-filter + + } + + CancelTasksRequest request = new CancelTasksRequest(); + request.setTaskId(TaskId.EMPTY_TASK_ID); + + // tag::cancel-tasks-execute + CancelTasksResponse response = client.tasks().cancel(request, RequestOptions.DEFAULT); + // end::cancel-tasks-execute + + assertThat(response, notNullValue()); + + // tag::cancel-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::cancel-tasks-response-tasks + + + // tag::cancel-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + } + + public void testAsyncCancelTasks() throws InterruptedException { + + RestHighLevelClient client = highLevelClient(); + { + CancelTasksRequest request = new CancelTasksRequest(); + + // tag::cancel-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(CancelTasksResponse response) { + // <1> + } + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::cancel-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::cancel-tasks-execute-async + client.tasks().cancelAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::cancel-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest/build.gradle b/client/rest/build.gradle index bcb928495c5..b1ed05a8342 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -69,7 +69,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in elasticsearch-core +// TODO: Not anymore. Now in :libs:core jarHell.enabled=false namingConventions { diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index e226656dbd2..41146e0b7ec 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -72,7 +72,7 @@ dependencyLicenses { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in elasticsearch-core +// TODO: Not anymore. Now in :libs:core jarHell.enabled=false namingConventions { diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java index f13d1751104..ed2744df31c 100644 --- a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java +++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/ElasticsearchHostsSnifferTests.java @@ -30,7 +30,6 @@ import com.sun.net.httpserver.HttpServer; import org.apache.http.Consts; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; -import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -148,8 +147,6 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase { return httpServer; } - //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes - @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { private final int sniffTimeoutMillis; private final SniffResponse sniffResponse; diff --git a/client/test/build.gradle b/client/test/build.gradle index fd5777cc8df..59c45186fe7 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -21,7 +21,6 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.JavaVersion apply plugin: 'elasticsearch.build' -apply plugin: 'ru.vyarus.animalsniffer' targetCompatibility = JavaVersion.VERSION_1_7 sourceCompatibility = JavaVersion.VERSION_1_7 @@ -31,8 +30,6 @@ dependencies { compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" compile "junit:junit:${versions.junit}" compile "org.hamcrest:hamcrest-all:${versions.hamcrest}" - compile "org.codehaus.mojo:animal-sniffer-annotations:1.15" - signature "org.codehaus.mojo.signature:java17:1.0@signature" } forbiddenApisMain { @@ -49,7 +46,7 @@ forbiddenApisTest { } // JarHell is part of es server, which we don't want to pull in -// TODO: Not anymore. Now in elasticsearch-core +// TODO: Not anymore. Now in :libs:core jarHell.enabled=false // TODO: should we have licenses for our test deps? diff --git a/distribution/src/bin/elasticsearch-cli b/distribution/src/bin/elasticsearch-cli index c49c1a51619..5699b3feb58 100644 --- a/distribution/src/bin/elasticsearch-cli +++ b/distribution/src/bin/elasticsearch-cli @@ -24,5 +24,5 @@ exec \ -Des.distribution.flavor="$ES_DISTRIBUTION_FLAVOR" \ -Des.distribution.type="$ES_DISTRIBUTION_TYPE" \ -cp "$ES_CLASSPATH" \ - $1 \ - "${@:2}" + "$ES_MAIN_CLASS" \ + "$@" diff --git a/distribution/src/bin/elasticsearch-cli.bat b/distribution/src/bin/elasticsearch-cli.bat index e85abdee448..b668a7c06c2 100644 --- a/distribution/src/bin/elasticsearch-cli.bat +++ b/distribution/src/bin/elasticsearch-cli.bat @@ -6,11 +6,6 @@ if defined ES_ADDITIONAL_SOURCES ( ) ) -for /f "tokens=1*" %%a in ("%*") do ( - set main_class=%%a - set arguments=%%b -) - if defined ES_ADDITIONAL_CLASSPATH_DIRECTORIES ( for %%a in ("%ES_ADDITIONAL_CLASSPATH_DIRECTORIES:;=","%") do ( set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/%%a/* @@ -24,5 +19,5 @@ if defined ES_ADDITIONAL_CLASSPATH_DIRECTORIES ( -Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^ -Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^ -cp "%ES_CLASSPATH%" ^ - %main_class% ^ - %arguments% + "%ES_MAIN_CLASS%" ^ + %* diff --git a/distribution/src/bin/elasticsearch-keystore b/distribution/src/bin/elasticsearch-keystore index ebe24179a0e..49e1aa7437a 100755 --- a/distribution/src/bin/elasticsearch-keystore +++ b/distribution/src/bin/elasticsearch-keystore @@ -1,5 +1,5 @@ #!/bin/bash -"`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.common.settings.KeyStoreCli \ +ES_MAIN_CLASS=org.elasticsearch.common.settings.KeyStoreCli \ + "`dirname "$0"`"/elasticsearch-cli \ "$@" diff --git a/distribution/src/bin/elasticsearch-keystore.bat b/distribution/src/bin/elasticsearch-keystore.bat index 380a3e501d5..b43182a273f 100644 --- a/distribution/src/bin/elasticsearch-keystore.bat +++ b/distribution/src/bin/elasticsearch-keystore.bat @@ -3,8 +3,8 @@ setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.common.settings.KeyStoreCli call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.common.settings.KeyStoreCli ^ %%* ^ || exit /b 1 diff --git a/distribution/src/bin/elasticsearch-plugin b/distribution/src/bin/elasticsearch-plugin index adfb4a88ad2..1c11cfb35f2 100755 --- a/distribution/src/bin/elasticsearch-plugin +++ b/distribution/src/bin/elasticsearch-plugin @@ -1,6 +1,6 @@ #!/bin/bash -ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli \ +ES_MAIN_CLASS=org.elasticsearch.plugins.PluginCli \ + ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.plugins.PluginCli \ "$@" diff --git a/distribution/src/bin/elasticsearch-plugin.bat b/distribution/src/bin/elasticsearch-plugin.bat index 5d7b1d7a828..7e71de790f0 100644 --- a/distribution/src/bin/elasticsearch-plugin.bat +++ b/distribution/src/bin/elasticsearch-plugin.bat @@ -3,9 +3,9 @@ setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.plugins.PluginCli set ES_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.plugins.PluginCli ^ %%* ^ || exit /b 1 diff --git a/distribution/src/bin/elasticsearch-translog b/distribution/src/bin/elasticsearch-translog index 33350aaf0b6..aa5bfb32df1 100755 --- a/distribution/src/bin/elasticsearch-translog +++ b/distribution/src/bin/elasticsearch-translog @@ -1,5 +1,5 @@ #!/bin/bash -"`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.index.translog.TranslogToolCli \ +ES_MAIN_CLASS=org.elasticsearch.index.translog.TranslogToolCli \ + "`dirname "$0"`"/elasticsearch-cli \ "$@" diff --git a/distribution/src/bin/elasticsearch-translog.bat b/distribution/src/bin/elasticsearch-translog.bat index 9c4cefcf2fe..6a2e3046205 100644 --- a/distribution/src/bin/elasticsearch-translog.bat +++ b/distribution/src/bin/elasticsearch-translog.bat @@ -3,8 +3,8 @@ setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.index.translog.TranslogToolCli call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.index.translog.TranslogToolCli ^ %%* ^ || exit /b 1 diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index 27e8712ffcb..ff0f4c473a4 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -21,14 +21,11 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.JavaVersion apply plugin: 'elasticsearch.build' -apply plugin: 'ru.vyarus.animalsniffer' sourceCompatibility = JavaVersion.VERSION_1_7 targetCompatibility = JavaVersion.VERSION_1_7 dependencies { - signature "org.codehaus.mojo.signature:java17:1.0@signature" - testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 34149bee528..783cc773e96 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -140,5 +140,7 @@ include::snapshot/verify_repository.asciidoc[] The Java High Level REST Client supports the following Tasks APIs: * <> +* <> include::tasks/list_tasks.asciidoc[] +include::tasks/cancel_tasks.asciidoc[] diff --git a/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc new file mode 100644 index 00000000000..089f87c00a2 --- /dev/null +++ b/docs/java-rest/high-level/tasks/cancel_tasks.asciidoc @@ -0,0 +1,82 @@ +[[java-rest-high-cluster-cancel-tasks]] +=== Cancel Tasks API + +The Cancel Tasks API allows cancellation of a currently running task. + +==== Cancel Tasks Request + +A `CancelTasksRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-request] +-------------------------------------------------- +There are no required parameters. The task cancellation command supports the same +task selection parameters as the list tasks command. + +==== Parameters + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-request-filter] +-------------------------------------------------- +<1> Cancel a task +<2> Cancel only cluster-related tasks +<3> Cancel all tasks running on nodes nodeId1 and nodeId2 + +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-execute] +-------------------------------------------------- + +==== Asynchronous Execution + +The asynchronous execution requires `CancelTasksRequest` instance and an +`ActionListener` instance to be passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-execute-async] +-------------------------------------------------- +<1> The `CancelTasksRequest` to execute and the `ActionListener` to use +when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `CancelTasksResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[cancel-tasks-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +==== Cancel Tasks Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-tasks] +-------------------------------------------------- +<1> List of cancelled tasks + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-calc] +-------------------------------------------------- +<1> List of cancelled tasks grouped by a node +<2> List of cancelled tasks grouped by a parent task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/TasksClientDocumentationIT.java[list-tasks-response-failures] +-------------------------------------------------- +<1> List of node failures +<2> List of task cancellation failures + diff --git a/docs/painless/painless-casting.asciidoc b/docs/painless/painless-casting.asciidoc index a3624f90831..4bcd14cbfc6 100644 --- a/docs/painless/painless-casting.asciidoc +++ b/docs/painless/painless-casting.asciidoc @@ -4,8 +4,11 @@ A cast converts the value of an original type to the equivalent value of a target type. An implicit cast infers the target type and automatically occurs during certain <>. An explicit cast specifies -the target type and forcefully occurs as its own operation. Use the *cast -operator* to specify an explicit cast. +the target type and forcefully occurs as its own operation. Use the `cast +operator '()'` to specify an explicit cast. + +Refer to the <> for a quick reference on all +allowed casts. *Errors* @@ -13,6 +16,7 @@ operator* to specify an explicit cast. * If an implicit cast is given, but an explicit cast is required. *Grammar* + [source,ANTLR4] ---- cast: '(' TYPE ')' expression @@ -31,15 +35,15 @@ cast: '(' TYPE ')' expression + <1> declare `int i`; explicit cast `long 5` to `int 5` -> `int 5`; - assign `int 5` to `i` + store `int 5` to `i` <2> declare `Map m`; allocate `HashMap` instance -> `HashMap reference`; implicit cast `HashMap reference` to `Map reference` -> `Map reference`; - assign `Map reference` to `m` + store `Map reference` to `m` <3> declare `HashMap hm`; - access `m` -> `Map reference`; + load from `m` -> `Map reference`; explicit cast `Map reference` to `HashMap reference` -> `HashMap reference`; - assign `HashMap reference` to `hm` + store `HashMap reference` to `hm` [[numeric-type-casting]] ==== Numeric Type Casting @@ -78,19 +82,19 @@ following table: ---- + <1> declare `int a`; - assign `int 1` to `a` + store `int 1` to `a` <2> declare `long b`; - access `a` -> `int 1`; + load from `a` -> `int 1`; implicit cast `int 1` to `long 1` -> `long 1`; - assign `long 1` to `b` + store `long 1` to `b` <3> declare `short c`; - access `b` -> `long 1`; + load from `b` -> `long 1`; explicit cast `long 1` to `short 1` -> `short 1`; - assign `short 1` value to `c` + store `short 1` value to `c` <4> declare `double e`; - access `a` -> `int 1`; + load from `a` -> `int 1`; explicit cast `int 1` to `double 1.0`; - assign `double 1.0` to `e`; + store `double 1.0` to `e`; (note the explicit cast is extraneous since an implicit cast is valid) + * Invalid numeric type casts resulting in errors. @@ -106,9 +110,9 @@ following table: *error* -> cannot implicit cast `double 1.0` to `int 1`; (note an explicit cast is valid) <2> declare `int b`; - assign `int 2` to `b` + store `int 2` to `b` <3> declare byte `c`; - access `b` -> `int 2`; + load from `b` -> `int 2`; *error* -> cannot implicit cast `int 2` to `byte 2`; (note an explicit cast is valid) @@ -136,21 +140,21 @@ or the target type is a descendant of the original type. ---- + <1> declare `List x`; - assign default value `null` to `x` + store default value `null` to `x` <2> declare `ArrayList y`; allocate `ArrayList` instance -> `ArrayList reference`; - assign `ArrayList reference` to `y`; -<3> access `y` -> `ArrayList reference`; + store `ArrayList reference` to `y`; +<3> load from `y` -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `x`; + store `List reference` to `x`; (note `ArrayList` is a descendant of `List`) -<4> access `x` -> `List reference`; +<4> load from `x` -> `List reference`; explicit cast `List reference` to `ArrayList reference` -> `ArrayList reference`; - assign `ArrayList reference` to `y`; -<5> access `y` -> `ArrayList reference`; + store `ArrayList reference` to `y`; +<5> load from `y` -> `ArrayList reference`; explicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `x`; + store `List reference` to `x`; (note the explicit cast is extraneous, and an implicit cast is valid) + * Invalid reference type casts resulting in errors. @@ -165,16 +169,16 @@ or the target type is a descendant of the original type. <1> declare `List x`; allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `x` + store `List reference` to `x` <2> declare `ArrayList y`; - access `x` -> `List reference`; + load from `x` -> `List reference`; *error* -> cannot implicit cast `List reference` to `ArrayList reference`; (note an explicit cast is valid since `ArrayList` is a descendant of `List`) <3> declare `ArrayList y`; - access `x` -> `List reference`; + load from `x` -> `List reference`; *error* -> cannot explicit cast `List reference` to `Map reference`; - (note no cast would be valid since neither `List` nor `Map` is a descendant - of the other) + (note no cast is valid since neither `List` nor `Map` is a descendant of the + other) [[dynamic-type-casting]] ==== Dynamic Type Casting @@ -206,24 +210,24 @@ based on the current type value the `def` type value represents. + <1> declare `def d0`; implicit cast `int 3` to `def`; - assign `int 3` to `d0` + store `int 3` to `d0` <2> allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `def` -> `def`; - assign `def` to `d0` + store `def` to `d0` <3> declare `Object o`; allocate `HashMap` instance -> `HashMap reference`; implicit cast `HashMap reference` to `Object reference` -> `Object reference`; - assign `Object reference` to `o` + store `Object reference` to `o` <4> declare `def d1`; - access `o` -> `Object reference`; + load from `o` -> `Object reference`; implicit cast `Object reference` to `def` -> `def`; - assign `def` to `d1` + store `def` to `d1` <5> declare `int i`; - access `d1` -> `def`; + load from `d1` -> `def`; implicit cast `def` to `HashMap reference` -> HashMap reference`; call `size` on `HashMap reference` -> `int 0`; - assign `int 0` to `i`; + store `int 0` to `i`; (note `def` was implicit cast to `HashMap reference` since `HashMap` is the child-most descendant type value that the `def` type value represents) @@ -242,29 +246,29 @@ based on the current type value the `def` type value represents. + <1> declare `def d`; implicit cast `double 1.0` to `def` -> `def`; - assign `def` to `d` + store `def` to `d` <2> declare `int i`; - access `d` -> `def`; + load from `d` -> `def`; implicit cast `def` to `double 1.0` -> `double 1.0`; explicit cast `double 1.0` to `int 1` -> `int 1`; - assign `int 1` to `i`; - (note the explicit cast is necessary since a `double` value cannot be - converted to an `int` value implicitly) -<3> assign `int 1` to `d`; + store `int 1` to `i`; + (note the explicit cast is necessary since a `double` type value is not + converted to an `int` type value implicitly) +<3> store `int 1` to `d`; (note the switch in the type `d` represents from `double` to `int`) <4> declare `float i`; - access `d` -> `def`; + load from `d` -> `def`; implicit cast `def` to `int 1` -> `int 1`; implicit cast `int 1` to `float 1.0` -> `float 1.0`; - assign `float 1.0` to `f` + store `float 1.0` to `f` <5> allocate `ArrayList` instance -> `ArrayList reference`; - assign `ArrayList reference` to `d`; + store `ArrayList reference` to `d`; (note the switch in the type `d` represents from `int` to `ArrayList`) <6> declare `List l`; - access `d` -> `def`; + load from `d` -> `def`; implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `l` + store `List reference` to `l` + * Invalid dynamic type casts resulting in errors. + @@ -277,26 +281,26 @@ based on the current type value the `def` type value represents. ---- <1> declare `def d`; implicit cast `int 1` to `def` -> `def`; - assign `def` to `d` + store `def` to `d` <2> declare `short s`; - access `d` -> `def`; + load from `d` -> `def`; implicit cast `def` to `int 1` -> `int 1`; *error* -> cannot implicit cast `int 1` to `short 1`; (note an explicit cast is valid) <3> allocate `HashMap` instance -> `HashMap reference`; implicit cast `HashMap reference` to `def` -> `def`; - assign `def` to `d` + store `def` to `d` <4> declare `List l`; - access `d` -> `def`; + load from `d` -> `def`; implicit cast `def` to `HashMap reference`; *error* -> cannot implicit cast `HashMap reference` to `List reference`; - (note no cast would be valid since neither `HashMap` nor `List` is a - descendant of the other) + (note no cast is valid since neither `HashMap` nor `List` is a descendant of + the other) [[string-character-casting]] ==== String to Character Casting -Use the *cast operator* to convert a <> value into a +Use the cast operator to convert a <> value into a <> value. *Errors* @@ -310,17 +314,17 @@ Use the *cast operator* to convert a <> value into a + [source,Painless] ---- -<1> char c = (char)"C" -<2> c = (char)'c' +<1> char c = (char)"C"; +<2> c = (char)'c'; ---- + <1> declare `char c`; explicit cast `String "C"` to `char C` -> `char C`; - assign `char C` to `c` + store `char C` to `c` <2> explicit cast `String 'c'` to `char c` -> `char c`; - assign `char c` to `c` + store `char c` to `c` + -* Casting a `String` reference into a `char` value. +* Casting a `String` reference into a `char` type value. + [source,Painless] ---- @@ -328,11 +332,11 @@ Use the *cast operator* to convert a <> value into a <2> char c = (char)s; ---- <1> declare `String s`; - assign `String "s"` to `s`; + store `String "s"` to `s`; <2> declare `char c` - access `s` -> `String "s"`; + load from `s` -> `String "s"`; explicit cast `String "s"` to `char s` -> `char s`; - assign `char s` to `c` + store `char s` to `c` [[boxing-unboxing]] ==== Boxing and Unboxing @@ -343,12 +347,12 @@ reference type to its corresponding primitive type. Implicit boxing/unboxing occurs during the following operations: -* Conversions between a `def` type and a primitive type will be implicitly +* Conversions between a `def` type and a primitive type are implicitly boxed/unboxed as necessary, though this is referred to as an implicit cast throughout the documentation. -* Method/function call arguments will be implicitly boxed/unboxed as necessary. -* A primitive type value will be implicitly boxed when a reference type method - call is invoked on it. +* Method/function call arguments are implicitly boxed/unboxed as necessary. +* A primitive type value is implicitly boxed when a reference type method + is called on it. Explicit boxing/unboxing is not allowed. Use the reference type API to explicitly convert a primitive type value to its respective reference type @@ -372,22 +376,22 @@ value and vice versa. + <1> declare `List l`; allocate `ArrayList` instance -> `ArrayList reference`; - assign `ArrayList reference` to `l`; -<2> access `l` -> `List reference`; + store `ArrayList reference` to `l`; +<2> load from `l` -> `List reference`; implicit cast `int 1` to `def` -> `def`; call `add` on `List reference` with arguments (`def`); (note internally `int 1` is boxed to `Integer 1` to store as a `def` type value) <3> declare `Integer I`; call `valueOf` on `Integer` with arguments of (`int 0`) -> `Integer 0`; - assign `Integer 0` to `I`; + store `Integer 0` to `I`; <4> declare `int i`; - access `I` -> `Integer 0`; + load from `I` -> `Integer 0`; unbox `Integer 0` -> `int 0`; - access `l` -> `List reference`; + load from `l` -> `List reference`; call `get` on `List reference` with arguments (`int 0`) -> `def`; implicit cast `def` to `int 1` -> `int 1`; - assign `int 1` to `i`; + store `int 1` to `i`; (note internally `int 1` is unboxed from `Integer 1` when loaded from a `def` type value) + @@ -419,8 +423,8 @@ Promotion is when a single value is implicitly cast to a certain type or multiple values are implicitly cast to the same type as required for evaluation by certain operations. Each operation that requires promotion has a promotion table that shows all required implicit casts based on the type(s) of value(s). A -value can be promoted to a `def` type at compile-time; however, the promoted -type value is derived from what the `def` type value represents at run-time. +value promoted to a `def` type at compile-time is promoted again at run-time +based on the type the `def` value represents. *Errors* @@ -438,19 +442,83 @@ type value is derived from what the `def` type value represents at run-time. <3> float f = x + 2.0F; ---- <1> declare `double d`; - promote `int 2` and `double 2.0 @0` -> `double 2.0 @0`; + promote `int 2` and `double 2.0 @0`: result `double`; implicit cast `int 2` to `double 2.0 @1` -> `double 2.0 @1`; add `double 2.0 @1` and `double 2.0 @0` -> `double 4.0`; - assign `double 4.0` to `d` + store `double 4.0` to `d` <2> declare `def x`; implicit cast `int 1` to `def` -> `def`; - assign `def` to `x`; + store `def` to `x`; <3> declare `float f`; - access `x` -> `def`; + load from `x` -> `def`; implicit cast `def` to `int 1` -> `int 1`; - promote `int 1` and `float 2.0` -> `float 2.0`; + promote `int 1` and `float 2.0`: result `float`; implicit cast `int 1` to `float 1.0` -> `float `1.0`; add `float 1.0` and `float 2.0` -> `float 3.0`; - assign `float 3.0` to `f`; + store `float 3.0` to `f`; (note this example illustrates promotion done at run-time as promotion done at compile-time would have resolved to a `def` type value) + +[[allowed-casts]] +==== Allowed Casts + +The following tables show all allowed casts. Read the tables row by row, where +the original type is shown in the first column, and each subsequent column +indicates whether a cast to the specified target type is implicit (I), explicit +(E), or is not allowed (-). + +*Primitive/Reference Types* + +[cols="<3,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | o | b | s | c | i | j | f | d | O | B | S | C | I | L | F | D | T | R | def +| boolean ( o ) | | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | I +| byte ( b ) | - | | I | I | I | I | I | I | - | - | - | - | - | - | - | - | - | - | I +| short ( s ) | - | E | | E | I | I | I | I | - | - | - | - | - | - | - | - | - | - | I +| char ( c ) | - | E | E | | I | I | I | I | - | - | - | - | - | - | - | - | E | - | I +| int ( i ) | - | E | E | E | | I | I | I | - | - | - | - | - | - | - | - | - | - | I +| long ( j ) | - | E | E | E | E | | I | I | - | - | - | - | - | - | - | - | - | - | I +| float ( f ) | - | E | E | E | E | E | | I | - | - | - | - | - | - | - | - | - | - | I +| double ( d ) | - | E | E | E | E | E | E | | - | - | - | - | - | - | - | - | - | - | I +| Boolean ( O ) | - | - | - | - | - | - | - | - | - | - | - | | - | - | - | - | - | - | I +| Byte ( B ) | - | - | - | - | - | - | - | - | - | | - | - | - | - | - | - | - | - | I +| Short ( S ) | - | - | - | - | - | - | - | - | - | - | | - | - | - | - | - | - | - | I +| Character ( C ) | - | - | - | - | - | - | - | - | - | - | - | | - | - | - | - | - | - | I +| Integer ( I ) | - | - | - | - | - | - | - | - | - | - | - | - | | - | - | - | - | - | I +| Long ( L ) | - | - | - | - | - | - | - | - | - | - | - | - | - | | - | - | - | - | I +| Float ( F ) | - | - | - | - | - | - | - | - | - | - | - | - | - | - | | - | - | - | I +| Double ( D ) | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | | - | - | I +| String ( T ) | - | - | - | E | - | - | - | - | - | - | - | - | - | - | - | - | | - | I +| Reference ( R ) | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | @ | I +|==== + +@ See <> for allowed reference + type casts. + +*`def` Type* + +[cols="<3,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | o | b | s | c | i | j | f | d | O | B | S | C | I | L | F | D | T | R | def +| def as boolean | I | - | - | - | - | - | - | - | I | - | - | - | - | - | - | - | - | - | +| def as byte | - | I | I | I | I | I | I | I | - | I | I | I | I | I | I | I | - | - | +| def as short | - | E | I | E | I | I | I | I | - | E | I | E | I | I | I | I | - | - | +| def as char | - | E | E | I | I | I | I | I | - | E | E | I | I | I | I | I | E | - | +| def as int | - | E | E | E | I | I | I | I | - | E | E | E | I | I | I | I | - | - | +| def as long | - | E | E | E | E | I | I | I | - | E | E | E | E | I | I | I | - | - | +| def as float | - | E | E | E | E | E | I | I | - | E | E | E | E | E | I | I | - | - | +| def as double | - | E | E | E | E | E | E | I | - | E | E | E | E | E | E | I | - | - | +| def as Boolean | I | - | - | - | - | - | - | - | I | - | - | - | | - | - | - | - | - | +| def as Byte | - | I | I | I | I | I | I | I | - | I | I | I | I | I | I | I | - | - | +| def as Short | - | E | I | E | I | I | I | I | - | E | I | E | I | I | I | I | - | - | +| def as Character | - | E | E | I | I | I | I | I | - | E | E | I | I | I | I | I | - | - | +| def as Integer | - | E | E | E | I | I | I | I | - | E | E | E | I | I | I | I | - | - | +| def as Long | - | E | E | E | E | I | I | I | - | E | E | E | E | I | I | I | - | - | +| def as Float | - | E | E | E | E | E | I | I | - | E | E | E | E | E | I | I | - | - | +| def as Double | - | E | E | E | E | E | E | I | - | E | E | E | E | E | E | I | - | - | +| def as String | - | - | - | E | - | - | - | - | - | - | - | - | - | - | - | - | I | - | +| def as Reference | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | - | @ | +|==== + +@ See <> for allowed reference + type casts. diff --git a/docs/painless/painless-comments.asciidoc b/docs/painless/painless-comments.asciidoc index bde30e37e04..bfd3594431e 100644 --- a/docs/painless/painless-comments.asciidoc +++ b/docs/painless/painless-comments.asciidoc @@ -6,9 +6,10 @@ anywhere on a line to specify a single-line comment. All characters from the `//` token to the end of the line are ignored. Use an opening `/*` token and a closing `*/` token to specify a multi-line comment. Multi-line comments can start anywhere on a line, and all characters in between the `/*` token and `*/` -token are ignored. Comments can be included anywhere within a script. +token are ignored. A comment is included anywhere within a script. *Grammar* + [source,ANTLR4] ---- SINGLE_LINE_COMMENT: '//' .*? [\n\r]; diff --git a/docs/painless/painless-functions.asciidoc b/docs/painless/painless-functions.asciidoc new file mode 100644 index 00000000000..20f3e821f1e --- /dev/null +++ b/docs/painless/painless-functions.asciidoc @@ -0,0 +1,24 @@ +[[painless-functions]] +=== Functions + +A function is a named piece of code comprised of one-to-many statements to +perform a specific task. A function is called multiple times in a single script +to repeat its specific task. A parameter is a named type value available as a +<> within the statement(s) of a function. A +function specifies zero-to-many parameters, and when a function is called a +value is specified per parameter. An argument is a value passed into a function +at the point of call. A function specifies a return type value, though if the +type is <> then no value is returned. Any non-void type return +value is available for use within an <> or is +discarded otherwise. + +You can declare functions at the beginning of a Painless script, for example: + +[source,painless] +--------------------------------------------------------- +boolean isNegative(def x) { x < 0 } +... +if (isNegative(someVar)) { + ... +} +--------------------------------------------------------- \ No newline at end of file diff --git a/docs/painless/painless-general-syntax.asciidoc b/docs/painless/painless-general-syntax.asciidoc deleted file mode 100644 index 114bff80bfa..00000000000 --- a/docs/painless/painless-general-syntax.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -[[painless-general-syntax]] -=== General Syntax - -[[control-flow]] -==== Control flow - -Painless supports all of Java's https://docs.oracle.com/javase/tutorial/java/nutsandbolts/flow.html[ -control flow statements] except the `switch` statement. - -Painless also supports the `for in` syntax from Groovy: - -[source,painless] ---------------------------------------------------------- -for (item : list) { - ... -} ---------------------------------------------------------- - -[[functions]] -==== Functions - -You can declare functions at the beginning of a Painless script, for example: - -[source,painless] ---------------------------------------------------------- -boolean isNegative(def x) { x < 0 } -... -if (isNegative(someVar)) { - ... -} ---------------------------------------------------------- - -[[lambda-expressions]] -==== Lambda expressions -Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java]. - -[source,painless] ---------------------------------------------------------- -list.removeIf(item -> item == 2); -list.removeIf((int item) -> item == 2); -list.removeIf((int item) -> { item == 2 }); -list.sort((x, y) -> x - y); -list.sort(Integer::compare); ---------------------------------------------------------- - -You can make method references to functions within the script with `this`, -for example `list.sort(this::mycompare)`. - -[[patterns]] -==== Patterns - -Regular expression constants are directly supported. To ensure fast performance, -this is the only mechanism for creating patterns. Regular expressions -are always constants and compiled efficiently a single time. - -[source,painless] ---------------------------------------------------------- -Pattern p = /[aeiou]/ ---------------------------------------------------------- - -[[pattern-flags]] -===== Pattern flags - -You can define flags on patterns in Painless by adding characters after the -trailing `/` like `/foo/i` or `/foo \w #comment/iUx`. Painless exposes all of -the flags from Java's -https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[ -Pattern class] using these characters: - -[cols="<,<,<",options="header",] -|======================================================================= -| Character | Java Constant | Example -|`c` | CANON_EQ | `'å' ==~ /å/c` (open in hex editor to see) -|`i` | CASE_INSENSITIVE | `'A' ==~ /a/i` -|`l` | LITERAL | `'[a]' ==~ /[a]/l` -|`m` | MULTILINE | `'a\nb\nc' =~ /^b$/m` -|`s` | DOTALL (aka single line) | `'a\nb\nc' =~ /.b./s` -|`U` | UNICODE_CHARACTER_CLASS | `'Ɛ' ==~ /\\w/U` -|`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu` -|`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x` -|======================================================================= diff --git a/docs/painless/painless-identifiers.asciidoc b/docs/painless/painless-identifiers.asciidoc index 7762f56cb7b..d2678b528ea 100644 --- a/docs/painless/painless-identifiers.asciidoc +++ b/docs/painless/painless-identifiers.asciidoc @@ -3,8 +3,12 @@ Use an identifier as a named token to specify a <>, <>, -<>, <>, or function. -<> cannot be used as identifiers. +<>, <>, or +<>. + +*Errors* + +If a <> is used as an identifier. *Grammar* [source,ANTLR4] diff --git a/docs/painless/painless-keywords.asciidoc b/docs/painless/painless-keywords.asciidoc index 39a2201fd2b..9463902c8d3 100644 --- a/docs/painless/painless-keywords.asciidoc +++ b/docs/painless/painless-keywords.asciidoc @@ -1,9 +1,13 @@ [[painless-keywords]] === Keywords -Keywords are reserved tokens for built-in language features and cannot be used -as <> within a script. The following are -keywords: +Keywords are reserved tokens for built-in language features. + +*Errors* + +If a keyword is used as an <>. + +*Keywords* [cols="^1,^1,^1,^1,^1"] |==== diff --git a/docs/painless/painless-lambdas.asciidoc b/docs/painless/painless-lambdas.asciidoc new file mode 100644 index 00000000000..e6694229a0c --- /dev/null +++ b/docs/painless/painless-lambdas.asciidoc @@ -0,0 +1,15 @@ +[[painless-lambdas]] +=== Lambdas +Lambda expressions and method references work the same as in https://docs.oracle.com/javase/tutorial/java/javaOO/lambdaexpressions.html[Java]. + +[source,painless] +--------------------------------------------------------- +list.removeIf(item -> item == 2); +list.removeIf((int item) -> item == 2); +list.removeIf((int item) -> { item == 2 }); +list.sort((x, y) -> x - y); +list.sort(Integer::compare); +--------------------------------------------------------- + +You can make method references to functions within the script with `this`, +for example `list.sort(this::mycompare)`. \ No newline at end of file diff --git a/docs/painless/painless-lang-spec.asciidoc b/docs/painless/painless-lang-spec.asciidoc index 5e6b84d8c57..d50f3db2dc0 100644 --- a/docs/painless/painless-lang-spec.asciidoc +++ b/docs/painless/painless-lang-spec.asciidoc @@ -33,4 +33,22 @@ include::painless-casting.asciidoc[] include::painless-operators.asciidoc[] -include::painless-general-syntax.asciidoc[] +include::painless-operators-general.asciidoc[] + +include::painless-operators-numeric.asciidoc[] + +include::painless-operators-boolean.asciidoc[] + +include::painless-operators-reference.asciidoc[] + +include::painless-operators-array.asciidoc[] + +include::painless-statements.asciidoc[] + +include::painless-scripts.asciidoc[] + +include::painless-functions.asciidoc[] + +include::painless-lambdas.asciidoc[] + +include::painless-regexes.asciidoc[] diff --git a/docs/painless/painless-literals.asciidoc b/docs/painless/painless-literals.asciidoc index ebf7eaa07b6..621fc152be9 100644 --- a/docs/painless/painless-literals.asciidoc +++ b/docs/painless/painless-literals.asciidoc @@ -4,7 +4,7 @@ Use a literal to specify a value directly in an <>. -[[integers]] +[[integer-literals]] ==== Integers Use an integer literal to specify an integer type value in decimal, octal, or @@ -16,6 +16,7 @@ to specify an integer literal as octal, and use `0x` or `0X` as a prefix to specify an integer literal as hex. *Grammar* + [source,ANTLR4] ---- INTEGER: '-'? ( '0' | [1-9] [0-9]* ) [lLfFdD]?; @@ -44,7 +45,7 @@ HEX: '-'? '0' [xX] [0-9a-fA-F]+ [lL]?; <5> `int -18` in octal <6> `int 3882` in hex -[[floats]] +[[float-literals]] ==== Floats Use a floating point literal to specify a floating point type value of a @@ -53,6 +54,7 @@ single letter designations to specify the primitive type: `f` or `F` for `float` and `d` or `D` for `double`. If not specified, the type defaults to `double`. *Grammar* + [source,ANTLR4] ---- DECIMAL: '-'? ( '0' | [1-9] [0-9]* ) (DOT [0-9]+)? EXPONENT? [fFdD]?; @@ -78,7 +80,7 @@ EXPONENT: ( [eE] [+\-]? [0-9]+ ); <4> `double -126.34` <5> `float 89.9` -[[strings]] +[[string-literals]] ==== Strings Use a string literal to specify a <> value with @@ -88,6 +90,7 @@ include a single-quote as part of a single-quoted string literal. Use a `\\` token to include a backslash as part of any string literal. *Grammar* + [source,ANTLR4] ---- STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) @@ -114,9 +117,9 @@ STRING: ( '"' ( '\\"' | '\\\\' | ~[\\"] )*? '"' ) "double-quoted with non-escaped 'single-quotes'" ---- -[[characters]] +[[character-literals]] ==== Characters -A character literal cannot be specified directly. Instead, use the +Character literals are not specified directly. Instead, use the <> to convert a `String` type value into a `char` type value. diff --git a/docs/painless/painless-operators-array.asciidoc b/docs/painless/painless-operators-array.asciidoc new file mode 100644 index 00000000000..e91c07acef5 --- /dev/null +++ b/docs/painless/painless-operators-array.asciidoc @@ -0,0 +1,294 @@ +[[painless-operators-array]] +=== Operators: Array + +[[array-initialization-operator]] +==== Array Initialization + +Use the `array initialization operator '[] {}'` to allocate a single-dimensional +<> instance to the heap with a set of pre-defined +elements. Each value used to initialize an element in the array type instance is +cast to the specified element type value upon insertion. The order of specified +values is maintained. + +*Errors* + +* If a value is not castable to the specified type value. + +*Grammar* + +[source,ANTLR4] +---- +array_initialization: 'new' TYPE '[' ']' '{' expression_list '}' + | 'new' TYPE '[' ']' '{' '}'; +expression_list: expression (',' expression); +---- + +*Example:* + +* Array initialization with static values. ++ +[source,Painless] +---- +<1> int[] x = new int[] {1, 2, 3}; +---- ++ +<1> declare `int[] x`; + allocate `1-d int array` instance with `length [3]` + -> `1-d int array reference`; + store `int 1` to `index [0]` of `1-d int array reference`; + store `int 2` to `index [1]` of `1-d int array reference`; + store `int 3` to `index [2]` of `1-d int array reference`; + store `1-d int array reference` to `x`; ++ +* Array initialization with non-static values. ++ +[source,Painless] +---- +<1> int i = 1; +<2> long l = 2L; +<3> float f = 3.0F; +<4> double d = 4.0; +<5> String s = "5"; +<6> def array = new def[] {i, l, f*d, s}; +---- ++ +<1> declare `int i`; + store `int 1` to `i` +<2> declare `long l`; + store `long 2` to `l` +<3> declare `float f`; + store `float 3.0` to `f` +<4> declare `double d`; + store `double 4.0` to `d` +<5> declare `String s`; + store `String "5"` to `s` +<6> declare `def array`; + allocate `1-d def array` instance with `length [4]` + -> `1-d def array reference`; + load from `i` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `index [0]` of `1-d def array reference`; + load from `l` -> `long 2`; + implicit cast `long 2` to `def` -> `def`; + store `def` to `index [1]` of `1-d def array reference`; + load from `f` -> `float 3.0`; + load from `d` -> `double 4.0`; + promote `float 3.0` and `double 4.0`: result `double`; + implicit cast `float 3.0` to `double 3.0` -> `double 3.0`; + multiply `double 3.0` and `double 4.0` -> `double 12.0`; + implicit cast `double 12.0` to `def` -> `def`; + store `def` to `index [2]` of `1-d def array reference`; + load from `s` -> `String "5"`; + implicit cast `String "5"` to `def` -> `def`; + store `def` to `index [3]` of `1-d def array reference`; + implicit cast `1-d int array reference` to `def` -> `def`; + store `def` to `array` + +[[array-access-operator]] +==== Array Access + +Use the `array access operator '[]'` to store a value to or load a value from +an <> value. Each element of an array type value is +accessed with an `int` type value to specify the index to store/load. The range +of elements within an array that are accessible is `[0, size)` where size is the +number of elements specified at the time of allocation. Use a negative `int` +type value as an index to access an element in reverse from the end of an array +type value within a range of `[-size, -1]`. + +*Errors* + +* If a value other than an `int` type value or a value that is castable to an + `int` type value is provided as an index. +* If an element is accessed outside of the valid ranges. + +*Grammar* + +[source,ANTLR4] +---- +brace_access: '[' expression ']' +---- + +*Examples* + +* Array access with a single-dimensional array. ++ +[source,Painless] +---- +<1> int[] x = new int[2]; +<2> x[0] = 2; +<3> x[1] = 5; +<4> int y = x[0] + x[1]; +<5> int z = 1; +<6> int i = x[z]; +---- ++ +<1> declare `int[] x`; + allocate `1-d int array` instance with `length [2]` + -> `1-d int array reference`; + store `1-d int array reference` to `x` +<2> load from `x` -> `1-d int array reference`; + store `int 2` to `index [0]` of `1-d int array reference`; +<3> load from `x` -> `1-d int array reference`; + store `int 5` to `index [1]` of `1-d int array reference`; +<4> declare `int y`; + load from `x` -> `1-d int array reference`; + load from `index [0]` of `1-d int array reference` -> `int 2`; + load from `x` -> `1-d int array reference`; + load from `index [1]` of `1-d int array reference` -> `int 5`; + add `int 2` and `int 5` -> `int 7`; + store `int 7` to `y` +<5> declare `int z`; + store `int 1` to `z`; +<6> declare `int i`; + load from `x` -> `1-d int array reference`; + load from `z` -> `int 1`; + load from `index [1]` of `1-d int array reference` -> `int 5`; + store `int 5` to `i`; ++ +* Array access with the `def` type. ++ +[source,Painless] +---- +<1> def d = new int[2]; +<2> d[0] = 2; +<3> d[1] = 5; +<4> def x = d[0] + d[1]; +<5> def y = 1; +<6> def z = d[y]; +---- ++ +<1> declare `def d`; + allocate `1-d int array` instance with `length [2]` + -> `1-d int array reference`; + implicit cast `1-d int array reference` to `def` -> `def`; + store `def` to `d` +<2> load from `d` -> `def` + implicit cast `def` to `1-d int array reference` + -> `1-d int array reference`; + store `int 2` to `index [0]` of `1-d int array reference`; +<3> load from `d` -> `def` + implicit cast `def` to `1-d int array reference` + -> `1-d int array reference`; + store `int 5` to `index [1]` of `1-d int array reference`; +<4> declare `int x`; + load from `d` -> `def` + implicit cast `def` to `1-d int array reference` + -> `1-d int array reference`; + load from `index [0]` of `1-d int array reference` -> `int 2`; + load from `d` -> `def` + implicit cast `def` to `1-d int array reference` + -> `1-d int array reference`; + load from `index [1]` of `1-d int array reference` -> `int 5`; + add `int 2` and `int 5` -> `int 7`; + implicit cast `int 7` to `def` -> `def`; + store `def` to `x` +<5> declare `def y`; + implicit cast `int 1` to `def` -> `def`; + store `def ` to `y`; +<6> declare `int i`; + load from `d` -> `def` + implicit cast `def` to `1-d int array reference` + -> `1-d int array reference`; + load from `y` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + load from `index [1]` of `1-d int array reference` -> `int 5`; + implicit cast `int 5` to `def`; + store `def` to `z`; ++ +* Array access with a multi-dimensional array. ++ +[source,Painless] +---- +<1> int[][][] ia3 = new int[2][3][4]; +<2> ia3[1][2][3] = 99; +<3> int i = ia3[1][2][3]; +---- ++ +<1> declare `int[][][] ia`; + allocate `3-d int array` instance with length `[2, 3, 4]` + -> `3-d int array reference`; + store `3-d int array reference` to `ia3` +<2> load from `ia3` -> `3-d int array reference`; + store `int 99` to `index [1, 2, 3]` of `3-d int array reference` +<3> declare `int i`; + load from `ia3` -> `3-d int array reference`; + load from `index [1, 2, 3]` of `3-d int array reference` -> `int 99`; + store `int 99` to `i` + +[[array-length-operator]] +==== Array Length + +An array type value contains a read-only member field named `length`. The +`length` field stores the size of the array as an `int` type value where size is +the number of elements specified at the time of allocation. Use the +<> to load the field `length` +from an array type value. + +*Examples* + +* Access the `length` field. ++ +[source,Painless] +---- +<1> int[] x = new int[10]; +<2> int l = x.length; +---- +<1> declare `int[] x`; + allocate `1-d int array` instance with `length [2]` + -> `1-d int array reference`; + store `1-d int array reference` to `x` +<2> declare `int l`; + load `x` -> `1-d int array reference`; + load `length` from `1-d int array reference` -> `int 10`; + store `int 10` to `l`; + +[[new-array-operator]] +==== New Array + +Use the `new array operator 'new []'` to allocate an array type instance to +the heap. Specify the element type following the `new` token. Specify each +dimension with the `[` and `]` tokens following the element type name. The size +of each dimension is specified by an `int` type value in between each set of `[` +and `]` tokens. + +*Errors* + +* If a value other than an `int` type value or a value that is castable to an + `int` type value is specified for for a dimension's size. + +*Grammar* + +[source,ANTLR4] +---- +new_array: 'new' TYPE ('[' expression ']')+; +---- + +*Examples* + +* Allocation of different array types. ++ +[source,Painless] +---- +<1> int[] x = new int[5]; +<2> x = new int[10]; +<3> int y = 2; +<4> def z = new def[y][y*2]; +---- ++ +<1> declare `int[] x`; + allocate `1-d int array` instance with `length [5]` + -> `1-d int array reference`; + store `1-d int array reference` to `x` +<2> allocate `1-d int array` instance with `length [10]` + -> `1-d int array reference`; + store `1-d int array reference` to `x` +<3> declare `int y`; + store `int 2` to `y`; +<4> declare `def z`; + load from `y` -> `int 2 @0`; + load from `y` -> `int 2 @1`; + multiply `int 2 @1` by `int 2 @2` -> `int 4`; + allocate `2-d int array` instance with length `[2, 4]` + -> `2-d int array reference`; + implicit cast `2-d int array reference` to `def` -> `def`; + store `def` to `z`; diff --git a/docs/painless/painless-operators-boolean.asciidoc b/docs/painless/painless-operators-boolean.asciidoc new file mode 100644 index 00000000000..1223a8d56e7 --- /dev/null +++ b/docs/painless/painless-operators-boolean.asciidoc @@ -0,0 +1,1420 @@ +[[painless-operators-boolean]] +=== Operators: Boolean + +[[boolean-not-operator]] +==== Boolean Not + +Use the `boolean not operator '!'` to NOT a `boolean` type value where `true` is +flipped to `false` and `false` is flipped to `true`. + +*Errors* + +* If a value other than a `boolean` type value or a value that is castable to a + `boolean` type value is given. + +*Truth* + +[options="header",cols="<1,<1"] +|==== +| original | result +| true | false +| false | true +|==== + +*Grammar* + +[source,ANTLR4] +---- +boolean_not: '!' expression; +---- + +*Examples* + +* Boolean not with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean x = !false; +<2> boolean y = !x; +---- +<1> declare `boolean x`; + boolean not `boolean false` -> `boolean true`; + store `boolean true` to `x` +<2> declare `boolean y`; + load from `x` -> `boolean true`; + boolean not `boolean true` -> `boolean false`; + store `boolean false` to `y` ++ +* Boolean not with the `def` type. ++ +[source,Painless] +---- +<1> def y = true; +<2> def z = !y; +---- ++ +<1> declare `def y`; + implicit cast `boolean true` to `def` -> `def`; + store `true` to `y` +<2> declare `def z`; + load from `y` -> `def`; + implicit cast `def` to `boolean true` -> boolean `true`; + boolean not `boolean true` -> `boolean false`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `z` + +[[greater-than-operator]] +==== Greater Than + +Use the `greater than operator '>'` to COMPARE two numeric type values where a +resultant `boolean` type value is `true` if the left-hand side value is greater +than to the right-hand side value and `false` otherwise. + +*Errors* + +* If either the evaluated left-hand side or the evaluated right-hand side is a + non-numeric value. + +*Grammar* + +[source,ANTLR4] +---- +greater_than: expression '>' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Greater than with different numeric types. ++ +[source,Painless] +---- +<1> boolean x = 5 > 4; +<2> double y = 6.0; +<3> x = 6 > y; +---- ++ +<1> declare `boolean x`; + greater than `int 5` and `int 4` -> `boolean true`; + store `boolean true` to `x`; +<2> declare `double y`; + store `double 6.0` to `y`; +<3> load from `y` -> `double 6.0 @0`; + promote `int 6` and `double 6.0`: result `double`; + implicit cast `int 6` to `double 6.0 @1` -> `double 6.0 @1`; + greater than `double 6.0 @1` and `double 6.0 @0` -> `boolean false`; + store `boolean false` to `x` ++ +* Greater than with `def` type. ++ +[source,Painless] +---- +<1> int x = 5; +<2> def y = 7.0; +<3> def z = y > 6.5; +<4> def a = x > y; +---- ++ +<1> declare `int x`; + store `int 5` to `x` +<2> declare `def y`; + implicit cast `double 7.0` to `def` -> `def`; + store `def` to `y` +<3> declare `def z`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + greater than `double 7.0` and `double 6.5` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `z` +<4> declare `def a`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + load from `x` -> `int 5`; + promote `int 5` and `double 7.0`: result `double`; + implicit cast `int 5` to `double 5.0` -> `double 5.0`; + greater than `double 5.0` and `double 7.0` -> `boolean false`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `z` + +[[greater-than-or-equal-operator]] +==== Greater Than Or Equal + +Use the `greater than or equal operator '>='` to COMPARE two numeric type values +where a resultant `boolean` type value is `true` if the left-hand side value is +greater than or equal to the right-hand side value and `false` otherwise. + +*Errors* + +* If either the evaluated left-hand side or the evaluated right-hand side is a + non-numeric value. + +*Grammar* + +[source,ANTLR4] +---- +greater_than_or_equal: expression '>=' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Greater than or equal with different numeric types. ++ +[source,Painless] +---- +<1> boolean x = 5 >= 4; +<2> double y = 6.0; +<3> x = 6 >= y; +---- ++ +<1> declare `boolean x`; + greater than or equal `int 5` and `int 4` -> `boolean true`; + store `boolean true` to `x` +<2> declare `double y`; + store `double 6.0` to `y` +<3> load from `y` -> `double 6.0 @0`; + promote `int 6` and `double 6.0`: result `double`; + implicit cast `int 6` to `double 6.0 @1` -> `double 6.0 @1`; + greater than or equal `double 6.0 @1` and `double 6.0 @0` -> `boolean true`; + store `boolean true` to `x` ++ +* Greater than or equal with the `def` type. ++ +[source,Painless] +---- +<1> int x = 5; +<2> def y = 7.0; +<3> def z = y >= 7.0; +<4> def a = x >= y; +---- ++ +<1> declare `int x`; + store `int 5` to `x`; +<2> declare `def y` + implicit cast `double 7.0` to `def` -> `def`; + store `def` to `y` +<3> declare `def z`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0 @0` -> `double 7.0 @0`; + greater than or equal `double 7.0 @0` and `double 7.0 @1` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `z` +<4> declare `def a`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + load from `x` -> `int 5`; + promote `int 5` and `double 7.0`: result `double`; + implicit cast `int 5` to `double 5.0` -> `double 5.0`; + greater than or equal `double 5.0` and `double 7.0` -> `boolean false`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `z` + +[[less-than-operator]] +==== Less Than + +Use the `less than operator '<'` to COMPARE two numeric type values where a +resultant `boolean` type value is `true` if the left-hand side value is less +than to the right-hand side value and `false` otherwise. + +*Errors* + +* If either the evaluated left-hand side or the evaluated right-hand side is a + non-numeric value. + +*Grammar* + +[source,ANTLR4] +---- +less_than: expression '<' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Less than with different numeric types. ++ +[source,Painless] +---- +<1> boolean x = 5 < 4; +<2> double y = 6.0; +<3> x = 6 < y; +---- ++ +<1> declare `boolean x`; + less than `int 5` and `int 4` -> `boolean false`; + store `boolean false` to `x` +<2> declare `double y`; + store `double 6.0` to `y` +<3> load from `y` -> `double 6.0 @0`; + promote `int 6` and `double 6.0`: result `double`; + implicit cast `int 6` to `double 6.0 @1` -> `double 6.0 @1`; + less than `double 6.0 @1` and `double 6.0 @0` -> `boolean false`; + store `boolean false` to `x` ++ +* Less than with the `def` type. ++ +[source,Painless] +---- +<1> int x = 5; +<2> def y = 7.0; +<3> def z = y < 6.5; +<4> def a = x < y; +---- ++ +<1> declare `int x`; + store `int 5` to `x` +<2> declare `def y`; + implicit cast `double 7.0` to `def` -> `def`; + store `def` to `y` +<3> declare `def z`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + less than `double 7.0` and `double 6.5` -> `boolean false`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `z` +<4> declare `def a`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + load from `x` -> `int 5`; + promote `int 5` and `double 7.0`: result `double`; + implicit cast `int 5` to `double 5.0` -> `double 5.0`; + less than `double 5.0` and `double 7.0` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `z` + +[[less-than-or-equal-operator]] +==== Less Than Or Equal + +Use the `less than or equal operator '<='` to COMPARE two numeric type values +where a resultant `boolean` type value is `true` if the left-hand side value is +less than or equal to the right-hand side value and `false` otherwise. + +*Errors* + +* If either the evaluated left-hand side or the evaluated right-hand side is a + non-numeric value. + +*Grammar* + +[source,ANTLR4] +---- +greater_than_or_equal: expression '<=' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Less than or equal with different numeric types. ++ +[source,Painless] +---- +<1> boolean x = 5 <= 4; +<2> double y = 6.0; +<3> x = 6 <= y; +---- ++ +<1> declare `boolean x`; + less than or equal `int 5` and `int 4` -> `boolean false`; + store `boolean true` to `x` +<2> declare `double y`; + store `double 6.0` to `y` +<3> load from `y` -> `double 6.0 @0`; + promote `int 6` and `double 6.0`: result `double`; + implicit cast `int 6` to `double 6.0 @1` -> `double 6.0 @1`; + less than or equal `double 6.0 @1` and `double 6.0 @0` -> `boolean true`; + store `boolean true` to `x` ++ +* Less than or equal with the `def` type. ++ +[source,Painless] +---- +<1> int x = 5; +<2> def y = 7.0; +<3> def z = y <= 7.0; +<4> def a = x <= y; +---- ++ +<1> declare `int x`; + store `int 5` to `x`; +<2> declare `def y`; + implicit cast `double 7.0` to `def` -> `def`; + store `def` to `y`; +<3> declare `def z`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0 @0` -> `double 7.0 @0`; + less than or equal `double 7.0 @0` and `double 7.0 @1` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `z` +<4> declare `def a`; + load from `y` -> `def`; + implicit cast `def` to `double 7.0` -> `double 7.0`; + load from `x` -> `int 5`; + promote `int 5` and `double 7.0`: result `double`; + implicit cast `int 5` to `double 5.0` -> `double 5.0`; + less than or equal `double 5.0` and `double 7.0` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `z` + +[[instanceof-operator]] +==== Instanceof + +Use the `instanceof operator` to COMPARE the variable/field type to a +specified reference type using the reference type name where a resultant +`boolean` type value is `true` if the variable/field type is the same as or a +descendant of the specified reference type and false otherwise. + +*Errors* + +* If the reference type name doesn't exist as specified by the right-hand side. + +*Grammar* + +[source,ANTLR4] +---- +instance_of: ID 'instanceof' TYPE; +---- + +*Examples* + +* Instance of with different reference types. ++ +[source,Painless] +---- +<1> Map m = new HashMap(); +<2> boolean a = m instanceof HashMap; +<3> boolean b = m instanceof Map; +---- ++ +<1> declare `Map m`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference`; + store `Map reference` to `m` +<2> declare `boolean a`; + load from `m` -> `Map reference`; + implicit cast `Map reference` to `HashMap reference` -> `HashMap reference`; + instanceof `HashMap reference` and `HashMap` -> `boolean true`; + store `boolean true` to `a` +<3> declare `boolean b`; + load from `m` -> `Map reference`; + implicit cast `Map reference` to `HashMap reference` -> `HashMap reference`; + instanceof `HashMap reference` and `Map` -> `boolean true`; + store `true` to `b`; + (note `HashMap` is a descendant of `Map`) ++ +* Instance of with the `def` type. ++ +[source,Painless] +---- +<1> def d = new ArrayList(); +<2> boolean a = d instanceof List; +<3> boolean b = d instanceof Map; +---- ++ +<1> declare `def d`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def`; + store `def` to `d` +<2> declare `boolean a`; + load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + instanceof `ArrayList reference` and `List` -> `boolean true`; + store `boolean true` to `a`; + (note `ArrayList` is a descendant of `List`) +<3> declare `boolean b`; + load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + instanceof `ArrayList reference` and `Map` -> `boolean false`; + store `boolean false` to `a`; + (note `ArrayList` is not a descendant of `Map`) + +[[equality-equals-operator]] +==== Equality Equals + +Use the `equality equals operator '=='` to COMPARE two values where a resultant +`boolean` type value is `true` if the two values are equal and `false` +otherwise. The member method, `equals`, is implicitly called when the values are +reference type values where the first value is the target of the call and the +second value is the argument. This operation is null-safe where if both values +are `null` the resultant `boolean` type value is `true`, and if only one value +is `null` the resultant `boolean` type value is `false`. A valid comparison is +between `boolean` type values, numeric type values, or reference type values. + +*Errors* + +* If a comparison is made between a `boolean` type value and numeric type value. +* If a comparison is made between a primitive type value and a reference type + value. + +*Grammar* + +[source,ANTLR4] +---- +equality_equals: expression '==' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | boolean | byte | short | char | int | long | float | double | Reference | def +| boolean | boolean | - | - | - | - | - | - | - | - | def +| byte | - | int | int | int | int | long | float | double | - | def +| short | - | int | int | int | int | long | float | double | - | def +| char | - | int | int | int | int | long | float | double | - | def +| int | - | int | int | int | int | long | float | double | - | def +| long | - | long | long | long | long | long | float | double | - | def +| float | - | float | float | float | float | float | float | double | - | def +| double | - | double | double | double | double | double | double | double | - | def +| Reference | - | - | - | - | - | - | - | - | Object | def +| def | def | def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Equality equals with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean a = true; +<2> boolean b = false; +<3> a = a == false; +<4> b = a == b; +---- ++ +<1> declare `boolean a`; + store `boolean true` to `a` +<2> declare `boolean b`; + store `boolean false` to `b` +<3> load from `a` -> `boolean true`; + equality equals `boolean true` and `boolean false` -> `boolean false`; + store `boolean false` to `a` +<4> load from `a` -> `boolean false @0`; + load from `b` -> `boolean false @1`; + equality equals `boolean false @0` and `boolean false @1` + -> `boolean false`; + store `boolean false` to `b` ++ +* Equality equals with primitive types. ++ +[source,Painless] +---- +<1> int a = 1; +<2> double b = 2.0; +<3> boolean c = a == b; +<4> c = 1 == a; +---- ++ +<1> declare `int a`; + store `int 1` to `a` +<2> declare `double b`; + store `double 1.0` to `b` +<3> declare `boolean c`; + load from `a` -> `int 1`; + load from `b` -> `double 2.0`; + promote `int 1` and `double 2.0`: result `double`; + implicit cast `int 1` to `double 1.0` -> `double `1.0`; + equality equals `double 1.0` and `double 2.0` -> `boolean false`; + store `boolean false` to `c` +<4> load from `a` -> `int 1 @1`; + equality equals `int 1 @0` and `int 1 @1` -> `boolean true`; + store `boolean true` to `c` ++ +* Equal equals with reference types. ++ +[source,Painless] +---- +<1> List a = new ArrayList(); +<2> List b = new ArrayList(); +<3> a.add(1); +<4> boolean c = a == b; +<5> b.add(1); +<6> c = a == b; +---- ++ +<1> declare `List a`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `a` +<2> declare `List b`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `b` +<3> load from `a` -> `List reference`; + call `add` on `List reference` with arguments (`int 1)` +<4> declare `boolean c`; + load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + call `equals` on `List reference @0` with arguments (`List reference @1`) + -> `boolean false`; + store `boolean false` to `c` +<5> load from `b` -> `List reference`; + call `add` on `List reference` with arguments (`int 1`) +<6> load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + call `equals` on `List reference @0` with arguments (`List reference @1`) + -> `boolean true`; + store `boolean true` to `c` ++ +* Equality equals with `null`. ++ +[source,Painless] +---- +<1> Object a = null; +<2> Object b = null; +<3> boolean c = a == null; +<4> c = a == b; +<5> b = new Object(); +<6> c = a == b; +---- ++ +<1> declare `Object a`; + store `null` to `a` +<2> declare `Object b`; + store `null` to `b` +<3> declare `boolean c`; + load from `a` -> `null @0`; + equality equals `null @0` and `null @1` -> `boolean true`; + store `boolean true` to `c` +<4> load from `a` -> `null @0`; + load from `b` -> `null @1`; + equality equals `null @0` and `null @1` -> `boolean true`; + store `boolean true` to `c` +<5> allocate `Object` instance -> `Object reference`; + store `Object reference` to `b` +<6> load from `a` -> `Object reference`; + load from `b` -> `null`; + call `equals` on `Object reference` with arguments (`null`) + -> `boolean false`; + store `boolean false` to `c` ++ +* Equality equals with the `def` type. ++ +[source, Painless] +---- +<1> def a = 0; +<2> def b = 1; +<3> boolean c = a == b; +<4> def d = new HashMap(); +<5> def e = new ArrayList(); +<6> c = d == e; +---- ++ +<1> declare `def a`; + implicit cast `int 0` to `def` -> `def`; + store `def` to `a`; +<2> declare `def b`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `b`; +<3> declare `boolean c`; + load from `a` -> `def`; + implicit cast `a` to `int 0` -> `int 0`; + load from `b` -> `def`; + implicit cast `b` to `int 1` -> `int 1`; + equality equals `int 0` and `int 1` -> `boolean false`; + store `boolean false` to `c` +<4> declare `def d`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def` + store `def` to `d`; +<5> declare `def e`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def` + store `def` to `d`; +<6> load from `d` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + load from `e` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `equals` on `HashMap reference` with arguments (`ArrayList reference`) + -> `boolean false`; + store `boolean false` to `c` + +[[equality-not-equals-operator]] +==== Equality Not Equals + +Use the `equality not equals operator '!='` to COMPARE two values where a +resultant `boolean` type value is `true` if the two values are NOT equal and +`false` otherwise. The member method, `equals`, is implicitly called when the +values are reference type values where the first value is the target of the call +and the second value is the argument with the resultant `boolean` type value +flipped. This operation is `null-safe` where if both values are `null` the +resultant `boolean` type value is `false`, and if only one value is `null` the +resultant `boolean` type value is `true`. A valid comparison is between boolean +type values, numeric type values, or reference type values. + +*Errors* + +* If a comparison is made between a `boolean` type value and numeric type value. +* If a comparison is made between a primitive type value and a reference type + value. + +*Grammar* + +[source,ANTLR4] +---- +equality_not_equals: expression '!=' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | boolean | byte | short | char | int | long | float | double | Reference | def +| boolean | boolean | - | - | - | - | - | - | - | - | def +| byte | - | int | int | int | int | long | float | double | - | def +| short | - | int | int | int | int | long | float | double | - | def +| char | - | int | int | int | int | long | float | double | - | def +| int | - | int | int | int | int | long | float | double | - | def +| long | - | long | long | long | long | long | float | double | - | def +| float | - | float | float | float | float | float | float | double | - | def +| double | - | double | double | double | double | double | double | double | - | def +| Reference | - | - | - | - | - | - | - | - | Object | def +| def | def | def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Equality not equals with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean a = true; +<2> boolean b = false; +<3> a = a != false; +<4> b = a != b; +---- ++ +<1> declare `boolean a`; + store `boolean true` to `a` +<2> declare `boolean b`; + store `boolean false` to `b` +<3> load from `a` -> `boolean true`; + equality not equals `boolean true` and `boolean false` -> `boolean true`; + store `boolean true` to `a` +<4> load from `a` -> `boolean true`; + load from `b` -> `boolean false`; + equality not equals `boolean true` and `boolean false` -> `boolean true`; + store `boolean true` to `b` ++ +* Equality not equals with primitive types. ++ +[source,Painless] +---- +<1> int a = 1; +<2> double b = 2.0; +<3> boolean c = a != b; +<4> c = 1 != a; +---- ++ +<1> declare `int a`; + store `int 1` to `a` +<2> declare `double b`; + store `double 1.0` to `b` +<3> declare `boolean c`; + load from `a` -> `int 1`; + load from `b` -> `double 2.0`; + promote `int 1` and `double 2.0`: result `double`; + implicit cast `int 1` to `double 1.0` -> `double `1.0`; + equality not equals `double 1.0` and `double 2.0` -> `boolean true`; + store `boolean true` to `c` +<4> load from `a` -> `int 1 @1`; + equality not equals `int 1 @0` and `int 1 @1` -> `boolean false`; + store `boolean false` to `c` ++ +* Equality not equals with reference types. ++ +[source,Painless] +---- +<1> List a = new ArrayList(); +<2> List b = new ArrayList(); +<3> a.add(1); +<4> boolean c = a == b; +<5> b.add(1); +<6> c = a == b; +---- ++ +<1> declare `List a`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `a` +<2> declare `List b`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `b` +<3> load from `a` -> `List reference`; + call `add` on `List reference` with arguments (`int 1)` +<4> declare `boolean c`; + load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + call `equals` on `List reference @0` with arguments (`List reference @1`) + -> `boolean false`; + boolean not `boolean false` -> `boolean true` + store `boolean true` to `c` +<5> load from `b` -> `List reference`; + call `add` on `List reference` with arguments (`int 1`) +<6> load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + call `equals` on `List reference @0` with arguments (`List reference @1`) + -> `boolean true`; + boolean not `boolean true` -> `boolean false`; + store `boolean false` to `c` ++ +* Equality not equals with `null`. ++ +[source,Painless] +---- +<1> Object a = null; +<2> Object b = null; +<3> boolean c = a == null; +<4> c = a == b; +<5> b = new Object(); +<6> c = a == b; +---- ++ +<1> declare `Object a`; + store `null` to `a` +<2> declare `Object b`; + store `null` to `b` +<3> declare `boolean c`; + load from `a` -> `null @0`; + equality not equals `null @0` and `null @1` -> `boolean false`; + store `boolean false` to `c` +<4> load from `a` -> `null @0`; + load from `b` -> `null @1`; + equality not equals `null @0` and `null @1` -> `boolean false`; + store `boolean false` to `c` +<5> allocate `Object` instance -> `Object reference`; + store `Object reference` to `b` +<6> load from `a` -> `Object reference`; + load from `b` -> `null`; + call `equals` on `Object reference` with arguments (`null`) + -> `boolean false`; + boolean not `boolean false` -> `boolean true`; + store `boolean true` to `c` ++ +* Equality not equals with the `def` type. ++ +[source, Painless] +---- +<1> def a = 0; +<2> def b = 1; +<3> boolean c = a == b; +<4> def d = new HashMap(); +<5> def e = new ArrayList(); +<6> c = d == e; +---- ++ +<1> declare `def a`; + implicit cast `int 0` to `def` -> `def`; + store `def` to `a`; +<2> declare `def b`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `b`; +<3> declare `boolean c`; + load from `a` -> `def`; + implicit cast `a` to `int 0` -> `int 0`; + load from `b` -> `def`; + implicit cast `b` to `int 1` -> `int 1`; + equality equals `int 0` and `int 1` -> `boolean false`; + store `boolean false` to `c` +<4> declare `def d`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def` + store `def` to `d`; +<5> declare `def e`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def` + store `def` to `d`; +<6> load from `d` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + load from `e` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `equals` on `HashMap reference` with arguments (`ArrayList reference`) + -> `boolean false`; + store `boolean false` to `c` + +[[identity-equals-operator]] +==== Identity Equals + +Use the `identity equals operator '==='` to COMPARE two values where a resultant +`boolean` type value is `true` if the two values are equal and `false` +otherwise. A reference type value is equal to another reference type value if +both values refer to same instance on the heap or if both values are `null`. A +valid comparison is between `boolean` type values, numeric type values, or +reference type values. + +*Errors* + +* If a comparison is made between a `boolean` type value and numeric type value. +* If a comparison is made between a primitive type value and a reference type + value. + +*Grammar* + +[source,ANTLR4] +---- +identity_equals: expression '===' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | boolean | byte | short | char | int | long | float | double | Reference | def +| boolean | boolean | - | - | - | - | - | - | - | - | def +| byte | - | int | int | int | int | long | float | double | - | def +| short | - | int | int | int | int | long | float | double | - | def +| char | - | int | int | int | int | long | float | double | - | def +| int | - | int | int | int | int | long | float | double | - | def +| long | - | long | long | long | long | long | float | double | - | def +| float | - | float | float | float | float | float | float | double | - | def +| double | - | double | double | double | double | double | double | double | - | def +| Reference | - | - | - | - | - | - | - | - | Object | def +| def | def | def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Identity equals with reference types. ++ +[source,Painless] +---- +<1> List a = new ArrayList(); +<2> List b = new ArrayList(); +<3> List c = a; +<4> boolean c = a === b; +<5> c = a === c; +---- ++ +<1> declare `List a`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `a` +<2> declare `List b`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `b` +<3> load from `a` -> `List reference`; + store `List reference` to `c` +<4> declare `boolean c`; + load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + identity equals `List reference @0` and `List reference @1` + -> `boolean false` + store `boolean false` to `c` +<5> load from `a` -> `List reference @0`; + load from `c` -> `List reference @1`; + identity equals `List reference @0` and `List reference @1` + -> `boolean true` + store `boolean true` to `c` + (note `List reference @0` and `List reference @1` refer to the same + instance) ++ +* Identity equals with `null`. ++ +[source,Painless] +---- +<1> Object a = null; +<2> Object b = null; +<3> boolean c = a === null; +<4> c = a === b; +<5> b = new Object(); +<6> c = a === b; +---- ++ +<1> declare `Object a`; + store `null` to `a` +<2> declare `Object b`; + store `null` to `b` +<3> declare `boolean c`; + load from `a` -> `null @0`; + identity equals `null @0` and `null @1` -> `boolean true`; + store `boolean true` to `c` +<4> load from `a` -> `null @0`; + load from `b` -> `null @1`; + identity equals `null @0` and `null @1` -> `boolean true`; + store `boolean true` to `c` +<5> allocate `Object` instance -> `Object reference`; + store `Object reference` to `b` +<6> load from `a` -> `Object reference`; + load from `b` -> `null`; + identity equals `Object reference` and `null` -> `boolean false`; + store `boolean false` to `c` ++ +* Identity equals with the `def` type. ++ +[source, Painless] +---- +<1> def a = new HashMap(); +<2> def b = new ArrayList(); +<3> boolean c = a === b; +<4> b = a; +<5> c = a === b; +---- ++ +<1> declare `def d`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def` + store `def` to `d` +<2> declare `def e`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def` + store `def` to `d` +<3> declare `boolean c`; + load from `a` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + load from `b` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + identity equals `HashMap reference` and `ArrayList reference` + -> `boolean false`; + store `boolean false` to `c` +<4> load from `a` -> `def`; + store `def` to `b` +<5> load from `a` -> `def`; + implicit cast `def` to `HashMap reference @0` -> `HashMap reference @0`; + load from `b` -> `def`; + implicit cast `def` to `HashMap reference @1` -> `HashMap reference @1`; + identity equals `HashMap reference @0` and `HashMap reference @1` + -> `boolean true`; + store `boolean true` to `b`; + (note `HashMap reference @0` and `HashMap reference @1` refer to the same + instance) + +[[identity-not-equals-operator]] +==== Identity Not Equals + +Use the `identity not equals operator '!=='` to COMPARE two values where a +resultant `boolean` type value is `true` if the two values are NOT equal and +`false` otherwise. A reference type value is not equal to another reference type +value if both values refer to different instances on the heap or if one value is +`null` and the other is not. A valid comparison is between `boolean` type +values, numeric type values, or reference type values. + +*Errors* + +* If a comparison is made between a `boolean` type value and numeric type value. +* If a comparison is made between a primitive type value and a reference type + value. + +*Grammar* + +[source,ANTLR4] +---- +identity_not_equals: expression '!==' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | boolean | byte | short | char | int | long | float | double | Reference | def +| boolean | boolean | - | - | - | - | - | - | - | - | def +| byte | - | int | int | int | int | long | float | double | - | def +| short | - | int | int | int | int | long | float | double | - | def +| char | - | int | int | int | int | long | float | double | - | def +| int | - | int | int | int | int | long | float | double | - | def +| long | - | long | long | long | long | long | float | double | - | def +| float | - | float | float | float | float | float | float | double | - | def +| double | - | double | double | double | double | double | double | double | - | def +| Reference | - | - | - | - | - | - | - | - | Object | def +| def | def | def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Identity not equals with reference type values. ++ +[source,Painless] +---- +<1> List a = new ArrayList(); +<2> List b = new ArrayList(); +<3> List c = a; +<4> boolean c = a !== b; +<5> c = a !== c; +---- ++ +<1> declare `List a`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `a` +<2> declare `List b`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `b` +<3> load from `a` -> `List reference`; + store `List reference` to `c` +<4> declare `boolean c`; + load from `a` -> `List reference @0`; + load from `b` -> `List reference @1`; + identity not equals `List reference @0` and `List reference @1` + -> `boolean true` + store `boolean true` to `c` +<5> load from `a` -> `List reference @0`; + load from `c` -> `List reference @1`; + identity not equals `List reference @0` and `List reference @1` + -> `boolean false` + store `boolean false` to `c` + (note `List reference @0` and `List reference @1` refer to the same + instance) ++ +* Identity not equals with `null`. ++ +[source,Painless] +---- +<1> Object a = null; +<2> Object b = null; +<3> boolean c = a !== null; +<4> c = a !== b; +<5> b = new Object(); +<6> c = a !== b; +---- ++ +<1> declare `Object a`; + store `null` to `a` +<2> declare `Object b`; + store `null` to `b` +<3> declare `boolean c`; + load from `a` -> `null @0`; + identity not equals `null @0` and `null @1` -> `boolean false`; + store `boolean false` to `c` +<4> load from `a` -> `null @0`; + load from `b` -> `null @1`; + identity not equals `null @0` and `null @1` -> `boolean false`; + store `boolean false` to `c` +<5> allocate `Object` instance -> `Object reference`; + store `Object reference` to `b` +<6> load from `a` -> `Object reference`; + load from `b` -> `null`; + identity not equals `Object reference` and `null` -> `boolean true`; + store `boolean true` to `c` ++ +* Identity not equals with the `def` type. ++ +[source, Painless] +---- +<1> def a = new HashMap(); +<2> def b = new ArrayList(); +<3> boolean c = a !== b; +<4> b = a; +<5> c = a !== b; +---- ++ +<1> declare `def d`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def` + store `def` to `d` +<2> declare `def e`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def` + store `def` to `d` +<3> declare `boolean c`; + load from `a` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + load from `b` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + identity not equals `HashMap reference` and `ArrayList reference` + -> `boolean true`; + store `boolean true` to `c` +<4> load from `a` -> `def`; + store `def` to `b` +<5> load from `a` -> `def`; + implicit cast `def` to `HashMap reference @0` -> `HashMap reference @0`; + load from `b` -> `def`; + implicit cast `def` to `HashMap reference @1` -> `HashMap reference @1`; + identity not equals `HashMap reference @0` and `HashMap reference @1` + -> `boolean false`; + store `boolean false` to `b`; + (note `HashMap reference @0` and `HashMap reference @1` refer to the same + instance) + +[[boolean-xor-operator]] +==== Boolean Xor + +Use the `boolean xor operator '^'` to XOR together two `boolean` type values +where if one `boolean` type value is `true` and the other is `false` the +resultant `boolean` type value is `true` and `false` otherwise. + +*Errors* + +* If either evaluated value is a value other than a `boolean` type value or + a value that is castable to a `boolean` type value. + +*Truth* + +[cols="^1,^1,^1"] +|==== +| | true | false +| true | false | true +| false | true | false +|==== + +*Grammar* + +[source,ANTLR4] +---- +boolean_xor: expression '^' expression; +---- + +*Examples* + +* Boolean xor with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean x = false; +<2> boolean y = x ^ true; +<3> y = y ^ x; +---- ++ +<1> declare `boolean x`; + store `boolean false` to `x` +<2> declare `boolean y`; + load from `x` -> `boolean false` + boolean xor `boolean false` and `boolean true` -> `boolean true`; + store `boolean true` to `y` +<3> load from `y` -> `boolean true @0`; + load from `x` -> `boolean true @1`; + boolean xor `boolean true @0` and `boolean true @1` -> `boolean false`; + store `boolean false` to `y` ++ +* Boolean xor with the `def` type. ++ +[source,Painless] +---- +<1> def x = false; +<2> def y = x ^ true; +<3> y = y ^ x; +---- ++ +<1> declare `def x`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `boolean false` -> `boolean false`; + boolean xor `boolean false` and `boolean true` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `y` +<3> load from `y` -> `def`; + implicit cast `def` to `boolean true @0` -> `boolean true @0`; + load from `x` -> `def`; + implicit cast `def` to `boolean true @1` -> `boolean true @1`; + boolean xor `boolean true @0` and `boolean true @1` -> `boolean false`; + implicit cast `boolean false` -> `def`; + store `def` to `y` + +[[boolean-and-operator]] +==== Boolean And + +Use the `boolean and operator '&&'` to AND together two `boolean` type values +where if both `boolean` type values are `true` the resultant `boolean` type +value is `true` and `false` otherwise. + +*Errors* + +* If either evaluated value is a value other than a `boolean` type value or + a value that is castable to a `boolean` type value. + +*Truth* + +[cols="^1,^1,^1"] +|==== +| | true | false +| true | true | false +| false | false | false +|==== + +*Grammar* + +[source,ANTLR4] +---- +boolean_and: expression '&&' expression; +---- + +*Examples* + +* Boolean and with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean x = true; +<2> boolean y = x && true; +<3> x = false; +<4> y = y && x; +---- ++ +<1> declare `boolean x`; + store `boolean true` to `x` +<2> declare `boolean y`; + load from `x` -> `boolean true @0`; + boolean and `boolean true @0` and `boolean true @1` -> `boolean true`; + store `boolean true` to `y` +<3> store `boolean false` to `x` +<4> load from `y` -> `boolean true`; + load from `x` -> `boolean false`; + boolean and `boolean true` and `boolean false` -> `boolean false`; + store `boolean false` to `y` ++ +* Boolean and with the `def` type. ++ +[source,Painless] +---- +<1> def x = true; +<2> def y = x && true; +<3> x = false; +<4> y = y && x; +---- ++ +<1> declare `def x`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `boolean true @0` -> `boolean true @0`; + boolean and `boolean true @0` and `boolean true @1` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `y` +<3> implicit cast `boolean false` to `def` -> `def`; + store `def` to `x`; +<4> load from `y` -> `def`; + implicit cast `def` to `boolean true` -> `boolean true`; + load from `x` -> `def`; + implicit cast `def` to `boolean false` -> `boolean false`; + boolean and `boolean true` and `boolean false` -> `boolean false`; + implicit cast `boolean false` -> `def`; + store `def` to `y` + +[[boolean-or-operator]] +==== Boolean Or + +Use the `boolean or operator '||'` to OR together two `boolean` type values +where if either one of the `boolean` type values is `true` the resultant +`boolean` type value is `true` and `false` otherwise. + +*Errors* + +* If either evaluated value is a value other than a `boolean` type value or + a value that is castable to a `boolean` type value. + +*Truth* + +[cols="^1,^1,^1"] +|==== +| | true | false +| true | true | true +| false | true | false +|==== + +*Grammar:* +[source,ANTLR4] +---- +boolean_and: expression '||' expression; +---- + +*Examples* + +* Boolean or with the `boolean` type. ++ +[source,Painless] +---- +<1> boolean x = false; +<2> boolean y = x || true; +<3> y = false; +<4> y = y || x; +---- ++ +<1> declare `boolean x`; + store `boolean false` to `x` +<2> declare `boolean y`; + load from `x` -> `boolean false`; + boolean or `boolean false` and `boolean true` -> `boolean true`; + store `boolean true` to `y` +<3> store `boolean false` to `y` +<4> load from `y` -> `boolean false @0`; + load from `x` -> `boolean false @1`; + boolean or `boolean false @0` and `boolean false @1` -> `boolean false`; + store `boolean false` to `y` ++ +* Boolean or with the `def` type. ++ +[source,Painless] +---- +<1> def x = false; +<2> def y = x || true; +<3> y = false; +<4> y = y || x; +---- ++ +<1> declare `def x`; + implicit cast `boolean false` to `def` -> `def`; + store `def` to `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `boolean false` -> `boolean true`; + boolean or `boolean false` and `boolean true` -> `boolean true`; + implicit cast `boolean true` to `def` -> `def`; + store `def` to `y` +<3> implicit cast `boolean false` to `def` -> `def`; + store `def` to `y`; +<4> load from `y` -> `def`; + implicit cast `def` to `boolean false @0` -> `boolean false @0`; + load from `x` -> `def`; + implicit cast `def` to `boolean false @1` -> `boolean false @1`; + boolean or `boolean false @0` and `boolean false @1` -> `boolean false`; + implicit cast `boolean false` -> `def`; + store `def` to `y` diff --git a/docs/painless/painless-operators-general.asciidoc b/docs/painless/painless-operators-general.asciidoc new file mode 100644 index 00000000000..9bd057432fb --- /dev/null +++ b/docs/painless/painless-operators-general.asciidoc @@ -0,0 +1,432 @@ +[[painless-operators-general]] +=== Operators: General + +[[precedence-operator]] +==== Precedence + +Use the `precedence operator '()'` to guarantee the order of evaluation for an +expression. An expression encapsulated by the precedence operator (enclosed in +parentheses) overrides existing precedence relationships between operators and +is evaluated prior to other expressions in inward-to-outward order. + +*Grammar* + +[source,ANTLR4] +---- +precedence: '(' expression ')'; +---- + +*Examples* + +* Precedence with numeric operators. ++ +[source,Painless] +---- +<1> int x = (5+4)*6; +<2> int y = 12/(x-50); +---- ++ +<1> declare `int x`; + add `int 5` and `int 4` -> `int 9`; + multiply `int 9` and `int 6` -> `int 54`; + store `int 54` to `x`; + (note the add is evaluated before the multiply due to the precedence + operator) +<2> declare `int y`; + load from `x` -> `int 54`; + subtract `int 50` from `int 54` -> `int 4`; + divide `int 12` by `int 4` -> `int 3`; + store `int 3` to `y`; + (note the subtract is evaluated before the divide due to the precedence + operator) + +[[function-call-operator]] +==== Function Call + +Use the `function call operator ()` to call an existing function. A +<> is defined within a script. + +*Grammar* + +[source,ANTLR4] +---- +function_call: ID '(' ( expression (',' expression)* )? ')''; +---- + +*Examples* + +* A function call. ++ +[source,Painless] +---- +<1> int add(int x, int y) { + return x + y; + } + +<2> int z = add(1, 2); +---- ++ +<1> define function `add` that returns `int` and has parameters (`int x`, + `int y`) +<2> declare `int z`; + call `add` with arguments (`int 1`, `int 2`) -> `int 3`; + store `int 3` to `z` + +[[cast-operator]] +==== Cast + +An explicit cast converts the value of an original type to the equivalent value +of a target type forcefully as an operation. Use the `cast operator '()'` to +specify an explicit cast. Refer to <> for more +information. + +[[conditional-operator]] +==== Conditional + +A conditional consists of three expressions. The first expression is evaluated +with an expected boolean result type. If the first expression evaluates to true +then the second expression will be evaluated. If the first expression evaluates +to false then the third expression will be evaluated. The second and third +expressions will be <> if the evaluated values are not the +same type. Use the `conditional operator '? :'` as a shortcut to avoid the need +for a full if/else branch in certain expressions. + +*Errors* + +* If the first expression does not evaluate to a boolean type value. +* If the values for the second and third expressions cannot be promoted. + +*Grammar* + +[source,ANTLR4] +---- +conditional: expression '?' expression ':' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | Reference | def +| byte | int | int | int | int | long | float | double | - | def +| short | int | int | int | int | long | float | double | - | def +| char | int | int | int | int | long | float | double | - | def +| int | int | int | int | int | long | float | double | - | def +| long | long | long | long | long | long | float | double | - | def +| float | float | float | float | float | float | float | double | - | def +| double | double | double | double | double | double | double | double | - | def +| Reference | - | - | - | - | - | - | - | Object @ | def +| def | def | def | def | def | def | def | def | def | def +|==== + +@ If the two reference type values are the same then this promotion will not +occur. + +*Examples* + +* Evaluation of conditionals. ++ +[source,Painless] +---- +<1> boolean b = true; +<2> int x = b ? 1 : 2; +<3> List y = x > 1 ? new ArrayList() : null; +<4> def z = x < 2 ? x : 2.0; +---- ++ +<1> declare `boolean b`; + store `boolean true` to `b` +<2> declare `int x`; + load from `b` -> `boolean true` + evaluate 1st expression: `int 1` -> `int 1`; + store `int 1` to `x` +<3> declare `List y`; + load from `x` -> `int 1`; + `int 1` greater than `int 1` -> `boolean false`; + evaluate 2nd expression: `null` -> `null`; + store `null` to `y`; +<4> declare `def z`; + load from `x` -> `int 1`; + `int 1` less than `int 2` -> `boolean true`; + evaluate 1st expression: load from `x` -> `int 1`; + promote `int 1` and `double 2.0`: result `double`; + implicit cast `int 1` to `double 1.0` -> `double 1.0`; + implicit cast `double 1.0` to `def` -> `def`; + store `def` to `z`; + +[[assignment-operator]] +==== Assignment + +Use the `assignment operator '='` to store a value in a variable or reference +type member field for use in subsequent operations. Any operation that produces +a value can be assigned to any variable/field as long as the +<> are the same or the resultant type can be +<> to the variable/field type. + +See <> for examples using variables. + +*Errors* + +* If the type of value is unable to match the type of variable or field. + +*Grammar* + +[source,ANTLR4] +---- +assignment: field '=' expression +---- + +*Examples* + +The examples use the following reference type definition: + +[source,Painless] +---- +name: + Example + +non-static member fields: + * int x + * def y + * List z +---- + +* Field assignments of different type values. ++ +[source,Painless] +---- +<1> Example example = new Example(); +<2> example.x = 1; +<3> example.y = 2.0; +<4> example.z = new ArrayList(); +---- ++ +<1> declare `Example example`; + allocate `Example` instance -> `Example reference`; + store `Example reference` to `example` +<2> load from `example` -> `Example reference`; + store `int 1` to `x` of `Example reference` +<3> load from `example` -> `Example reference`; + implicit cast `double 2.0` to `def` -> `def`; + store `def` to `y` of `Example reference` +<4> load from `example` -> `Example reference`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `z` of `Example reference` ++ +* A field assignment from a field access. ++ +[source,Painless] +---- +<1> Example example = new Example(); +<2> example.x = 1; +<3> example.y = example.x; +---- ++ +<1> declare `Example example`; + allocate `Example` instance -> `Example reference`; + store `Example reference` to `example` +<2> load from `example` -> `Example reference`; + store `int 1` to `x` of `Example reference` +<3> load from `example` -> `Example reference @0`; + load from `example` -> `Example reference @1`; + load from `x` of `Example reference @1` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `y` of `Example reference @0`; + (note `Example reference @0` and `Example reference @1` are the same) + +[[compound-assignment-operator]] +==== Compound Assignment + +Use the `compound assignment operator '$='` as a shortcut for an assignment +where a binary operation would occur between the variable/field as the +left-hand side expression and a separate right-hand side expression. + +A compound assignment is equivalent to the expression below where V is the +variable/field and T is the type of variable/member. + +[source,Painless] +---- +V = (T)(V op expression); +---- + +*Operators* + +The table below shows the available operators for use in a compound assignment. +Each operator follows the casting/promotion rules according to their regular +definition. For numeric operations there is an extra implicit cast when +necessary to return the promoted numeric type value to the original numeric type +value of the variable/field and can result in data loss. + +|==== +|Operator|Compound Symbol +|Multiplication|*= +|Division|/= +|Remainder|%= +|Addition|+= +|Subtraction|-= +|Left Shift|<<= +|Right Shift|>>= +|Unsigned Right Shift|>>>= +|Bitwise And|&= +|Boolean And|&= +|Bitwise Xor|^= +|Boolean Xor|^= +|Bitwise Or|\|= +|Boolean Or|\|= +|String Concatenation|+= +|==== + +*Errors* + +* If the type of value is unable to match the type of variable or field. + +*Grammar* + +[source,ANTLR4] +---- +compound_assignment: ( ID | field ) '$=' expression; +---- + +Note the use of the `$=` represents the use of any of the possible binary +operators. + +*Examples* + +* Compound assignment for each numeric operator. ++ +[source,Painless] +---- +<1> int i = 10; +<2> i *= 2; +<3> i /= 5; +<4> i %= 3; +<5> i += 5; +<6> i -= 5; +<7> i <<= 2; +<8> i >>= 1; +<9> i >>>= 1; +<10> i &= 15; +<11> i ^= 12; +<12> i |= 2; +---- ++ +<1> declare `int i`; + store `int 10` to `i` +<2> load from `i` -> `int 10`; + multiply `int 10` and `int 2` -> `int 20`; + store `int 20` to `i`; + (note this is equivalent to `i = i*2`) +<3> load from `i` -> `int 20`; + divide `int 20` by `int 5` -> `int 4`; + store `int 4` to `i`; + (note this is equivalent to `i = i/5`) +<4> load from `i` -> `int 4`; + remainder `int 4` by `int 3` -> `int 1`; + store `int 1` to `i`; + (note this is equivalent to `i = i%3`) +<5> load from `i` -> `int 1`; + add `int 1` and `int 5` -> `int 6`; + store `int 6` to `i`; + (note this is equivalent to `i = i+5`) +<6> load from `i` -> `int 6`; + subtract `int 5` from `int 6` -> `int 1`; + store `int 1` to `i`; + (note this is equivalent to `i = i-5`) +<7> load from `i` -> `int 1`; + left shift `int 1` by `int 2` -> `int 4`; + store `int 4` to `i`; + (note this is equivalent to `i = i<<2`) +<8> load from `i` -> `int 4`; + right shift `int 4` by `int 1` -> `int 2`; + store `int 2` to `i`; + (note this is equivalent to `i = i>>1`) +<9> load from `i` -> `int 2`; + unsigned right shift `int 2` by `int 1` -> `int 1`; + store `int 1` to `i`; + (note this is equivalent to `i = i>>>1`) +<10> load from `i` -> `int 1`; + bitwise and `int 1` and `int 15` -> `int 1`; + store `int 1` to `i`; + (note this is equivalent to `i = i&2`) +<11> load from `i` -> `int 1`; + bitwise xor `int 1` and `int 12` -> `int 13`; + store `int 13` to `i`; + (note this is equivalent to `i = i^2`) +<12> load from `i` -> `int 13`; + bitwise or `int 13` and `int 2` -> `int 15`; + store `int 15` to `i`; + (note this is equivalent to `i = i|2`) ++ +* Compound assignment for each boolean operator. ++ +[source,Painless] +---- +<1> boolean b = true; +<2> b &= false; +<3> b ^= false; +<4> b |= true; +---- ++ +<1> declare `boolean b`; + store `boolean true` in `b`; +<2> load from `b` -> `boolean true`; + boolean and `boolean true` and `boolean false` -> `boolean false`; + store `boolean false` to `b`; + (note this is equivalent to `b = b && false`) +<3> load from `b` -> `boolean false`; + boolean xor `boolean false` and `boolean false` -> `boolean false`; + store `boolean false` to `b`; + (note this is equivalent to `b = b ^ false`) +<4> load from `b` -> `boolean true`; + boolean or `boolean false` and `boolean true` -> `boolean true`; + store `boolean true` to `b`; + (note this is equivalent to `b = b || true`) ++ +* A compound assignment with the string concatenation operator. ++ +[source,Painless] +---- +<1> String s = 'compound'; +<2> s += ' assignment'; +---- +<1> declare `String s`; + store `String 'compound'` to `s`; +<2> load from `s` -> `String 'compound'`; + string concat `String 'compound'` and `String ' assignment''` + -> `String 'compound assignment'`; + store `String 'compound assignment'` to `s`; + (note this is equivalent to `s = s + ' assignment'`) ++ +* A compound assignment with the `def` type. ++ +[source,Painless] +---- +<1> def x = 1; +<2> x += 2; +---- +<1> declare `def x`; + implicit cast `int 1` to `def`; + store `def` to `x`; +<2> load from `x` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + add `int 1` and `int 2` -> `int 3`; + implicit cast `int 3` to `def` -> `def`; + store `def` to `x`; + (note this is equivalent to `x = x+2`) ++ +* A compound assignment with an extra implicit cast. ++ +[source,Painless] +---- +<1> byte b = 1; +<2> b += 2; +---- +<1> declare `byte b`; + store `byte 1` to `x`; +<2> load from `x` -> `byte 1`; + implicit cast `byte 1 to `int 1` -> `int 1`; + add `int 1` and `int 2` -> `int 3`; + implicit cast `int 3` to `byte 3` -> `byte 3`; + store `byte 3` to `b`; + (note this is equivalent to `b = b+2`) diff --git a/docs/painless/painless-operators-numeric.asciidoc b/docs/painless/painless-operators-numeric.asciidoc new file mode 100644 index 00000000000..d39b895908f --- /dev/null +++ b/docs/painless/painless-operators-numeric.asciidoc @@ -0,0 +1,1339 @@ +[[painless-operators-numeric]] +=== Operators: Numeric + +[[post-increment-operator]] +==== Post Increment + +Use the `post increment operator '++'` to INCREASE the value of a numeric type +variable/field by `1`. An extra implicit cast is necessary to return the +promoted numeric type value to the original numeric type value of the +variable/field for the following types: `byte`, `short`, and `char`. If a +variable/field is read as part of an expression the value is loaded prior to the +increment. + +*Errors* + +* If the variable/field is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +post_increment: ( variable | field ) '++'; +---- + +*Promotion* + +[options="header",cols="<1,<1,<1"] +|==== +| original | promoted | implicit +| byte | int | byte +| short | int | short +| char | int | char +| int | int | +| long | long | +| float | float | +| double | double | +| def | def | +|==== + +*Examples* + +* Post increment with different numeric types. ++ +[source,Painless] +---- +<1> short i = 0; +<2> i++; +<3> long j = 1; +<4> long k; +<5> k = j++; +---- ++ +<1> declare `short i`; + store `short 0` to `i` +<2> load from `i` -> `short 0`; + promote `short 0`: result `int`; + add `int 0` and `int 1` -> `int 1`; + implicit cast `int 1` to `short 1`; + store `short 1` to `i` +<3> declare `long j`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `j` +<4> declare `long k`; + store default `long 0` to `k` +<5> load from `j` -> `long 1`; + store `long 1` to `k`; + add `long 1` and `long 1` -> `long 2`; + store `long 2` to `j` ++ +* Post increment with the `def` type. ++ +[source,Painless] +---- +<1> def x = 1; +<2> x++; +---- ++ +<1> declare `def x`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `x` +<2> load from `x` -> `def`; + implicit cast `def` to `int 1`; + add `int 1` and `int 1` -> `int 2`; + implicit cast `int 2` to `def`; + store `def` to `x` + +[[post-decrement-operator]] +==== Post Decrement + +Use the `post decrement operator '--'` to DECREASE the value of a numeric type +variable/field by `1`. An extra implicit cast is necessary to return the +promoted numeric type value to the original numeric type value of the +variable/field for the following types: `byte`, `short`, and `char`. If a +variable/field is read as part of an expression the value is loaded prior to +the decrement. + +*Errors* + +* If the variable/field is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +post_decrement: ( variable | field ) '--'; +---- + +*Promotion* + +[options="header",cols="<1,<1,<1"] +|==== +| original | promoted | implicit +| byte | int | byte +| short | int | short +| char | int | char +| int | int | +| long | long | +| float | float | +| double | double | +| def | def | +|==== + +*Examples* + +* Post decrement with different numeric types. ++ +[source,Painless] +---- +<1> short i = 0; +<2> i--; +<3> long j = 1; +<4> long k; +<5> k = j--; +---- ++ +<1> declare `short i`; + store `short 0` to `i` +<2> load from `i` -> `short 0`; + promote `short 0`: result `int`; + subtract `int 1` from `int 0` -> `int -1`; + implicit cast `int -1` to `short -1`; + store `short -1` to `i` +<3> declare `long j`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `j` +<4> declare `long k`; + store default `long 0` to `k` +<5> load from `j` -> `long 1`; + store `long 1` to `k`; + subtract `long 1` from `long 1` -> `long 0`; + store `long 0` to `j` ++ +* Post decrement with the `def` type. ++ +[source,Painless] +---- +<1> def x = 1; +<2> x--; +---- ++ +<1> declare `def x`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `x` +<2> load from `x` -> `def`; + implicit cast `def` to `int 1`; + subtract `int 1` from `int 1` -> `int 0`; + implicit cast `int 0` to `def`; + store `def` to `x` + +[[pre-increment-operator]] +==== Pre Increment + +Use the `pre increment operator '++'` to INCREASE the value of a numeric type +variable/field by `1`. An extra implicit cast is necessary to return the +promoted numeric type value to the original numeric type value of the +variable/field for the following types: `byte`, `short`, and `char`. If a +variable/field is read as part of an expression the value is loaded after the +increment. + +*Errors* + +* If the variable/field is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +pre_increment: '++' ( variable | field ); +---- + +*Promotion* + +[options="header",cols="<1,<1,<1"] +|==== +| original | promoted | implicit +| byte | int | byte +| short | int | short +| char | int | char +| int | int | +| long | long | +| float | float | +| double | double | +| def | def | +|==== + +*Examples* + +* Pre increment with different numeric types. ++ +[source,Painless] +---- +<1> short i = 0; +<2> ++i; +<3> long j = 1; +<4> long k; +<5> k = ++j; +---- ++ +<1> declare `short i`; + store `short 0` to `i` +<2> load from `i` -> `short 0`; + promote `short 0`: result `int`; + add `int 0` and `int 1` -> `int 1`; + implicit cast `int 1` to `short 1`; + store `short 1` to `i` +<3> declare `long j`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `j` +<4> declare `long k`; + store default `long 0` to `k` +<5> load from `j` -> `long 1`; + add `long 1` and `long 1` -> `long 2`; + store `long 2` to `j`; + store `long 2` to `k` ++ +* Pre increment with the `def` type. ++ +[source,Painless] +---- +<1> def x = 1; +<2> ++x; +---- ++ +<1> declare `def x`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `x` +<2> load from `x` -> `def`; + implicit cast `def` to `int 1`; + add `int 1` and `int 1` -> `int 2`; + implicit cast `int 2` to `def`; + store `def` to `x` + +[[pre-decrement-operator]] +==== Pre Decrement + +Use the `pre decrement operator '--'` to DECREASE the value of a numeric type +variable/field by `1`. An extra implicit cast is necessary to return the +promoted numeric type value to the original numeric type value of the +variable/field for the following types: `byte`, `short`, and `char`. If a +variable/field is read as part of an expression the value is loaded after the +decrement. + +*Errors* + +* If the variable/field is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +pre_increment: '--' ( variable | field ); +---- + +*Promotion* + +[options="header",cols="<1,<1,<1"] +|==== +| original | promoted | implicit +| byte | int | byte +| short | int | short +| char | int | char +| int | int | +| long | long | +| float | float | +| double | double | +| def | def | +|==== + +*Examples* + +* Pre decrement with different numeric types. ++ +[source,Painless] +---- +<1> short i = 0; +<2> --i; +<3> long j = 1; +<4> long k; +<5> k = --j; +---- ++ +<1> declare `short i`; + store `short 0` to `i` +<2> load from `i` -> `short 0`; + promote `short 0`: result `int`; + subtract `int 1` from `int 0` -> `int -1`; + implicit cast `int -1` to `short -1`; + store `short -1` to `i` +<3> declare `long j`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `j` +<4> declare `long k`; + store default `long 0` to `k` +<5> load from `j` -> `long 1`; + subtract `long 1` from `long 1` -> `long 0`; + store `long 0` to `j` + store `long 0` to `k`; ++ +* Pre decrement operator with the `def` type. ++ +[source,Painless] +---- +<1> def x = 1; +<2> --x; +---- ++ +<1> declare `def x`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `x` +<2> load from `x` -> `def`; + implicit cast `def` to `int 1`; + subtract `int 1` from `int 1` -> `int 0`; + implicit cast `int 0` to `def`; + store `def` to `x` + +[[unary-positive-operator]] +==== Unary Positive + +Use the `unary positive operator '+'` to the preserve the IDENTITY of a +numeric type value. + +*Errors* + +* If the value is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +unary_positive: '+' expression; +---- + +*Examples* + +* Unary positive with different numeric types. ++ +[source,Painless] +---- +<1> int x = +1; +<2> long y = +x; +---- ++ +<1> declare `int x`; + identity `int 1` -> `int 1`; + store `int 1` to `x` +<2> declare `long y`; + load from `x` -> `int 1`; + identity `int 1` -> `int 1`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `y` ++ +* Unary positive with the `def` type. ++ +[source,Painless] +---- +<1> def z = +1; +<2> int i = +z; +---- +<1> declare `def z`; + identity `int 1` -> `int 1`; + implicit cast `int 1` to `def`; + store `def` to `z` +<2> declare `int i`; + load from `z` -> `def`; + implicit cast `def` to `int 1`; + identity `int 1` -> `int 1`; + store `int 1` to `i`; + +[[unary-negative-operator]] +==== Unary Negative + +Use the `unary negative operator '-'` to NEGATE a numeric type value. + +*Errors* + +* If the value is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +unary_negative: '-' expression; +---- + +*Examples* + +* Unary negative with different numeric types. ++ +[source,Painless] +---- +<1> int x = -1; +<2> long y = -x; +---- ++ +<1> declare `int x`; + negate `int 1` -> `int -1`; + store `int -1` to `x` +<2> declare `long y`; + load from `x` -> `int 1`; + negate `int -1` -> `int 1`; + implicit cast `int 1` to `long 1` -> `long 1`; + store `long 1` to `y` ++ +* Unary negative with the `def` type. ++ +[source,Painless] +---- +<1> def z = -1; +<2> int i = -z; +---- +<1> declare `def z`; + negate `int 1` -> `int -1`; + implicit cast `int -1` to `def`; + store `def` to `z` +<2> declare `int i`; + load from `z` -> `def`; + implicit cast `def` to `int -1`; + negate `int -1` -> `int 1`; + store `int 1` to `i`; + +[[bitwise-not-operator]] +==== Bitwise Not + +Use the `bitwise not operator '~'` to NOT each bit in an integer type value +where a `1-bit` is flipped to a resultant `0-bit` and a `0-bit` is flipped to a +resultant `1-bit`. + +*Errors* + +* If the value is a non-integer type. + +*Bits* + +[options="header",cols="<1,<1"] +|==== +| original | result +| 1 | 0 +| 0 | 1 +|==== + +*Grammar* + +[source,ANTLR4] +---- +bitwise_not: '~' expression; +---- + +*Promotion* + +[options="header",cols="<1,<1"] +|==== +| original | promoted +| byte | int +| short | int +| char | int +| int | int +| long | long +| def | def +|==== + +*Examples* + +* Bitwise not with different numeric types. ++ +[source,Painless] +---- +<1> byte b = 1; +<2> int i = ~b; +<3> long l = ~i; +---- ++ +<1> declare `byte x`; + store `byte 1` to b +<2> declare `int i`; + load from `b` -> `byte 1`; + implicit cast `byte 1` to `int 1` -> `int 1`; + bitwise not `int 1` -> `int -2`; + store `int -2` to `i` +<3> declare `long l`; + load from `i` -> `int -2`; + implicit cast `int -2` to `long -2` -> `long -2`; + bitwise not `long -2` -> `long 1`; + store `long 1` to `l` ++ +* Bitwise not with the `def` type. ++ +[source,Painless] +---- +<1> def d = 1; +<2> def e = ~d; +---- ++ +<1> declare `def d`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `d`; +<2> declare `def e`; + load from `d` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + bitwise not `int 1` -> `int -2`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `e` + +[[multiplication-operator]] +==== Multiplication + +Use the `multiplication operator '*'` to MULTIPLY together two numeric type +values. Rules for resultant overflow and NaN values follow the JVM +specification. + +*Errors* + +* If either of the values is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +multiplication: expression '*' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Multiplication with different numeric types. ++ +[source,Painless] +---- +<1> int i = 5*4; +<2> double d = i*7.0; +---- ++ +<1> declare `int i`; + multiply `int 4` by `int 5` -> `int 20`; + store `int 20` in `i` +<2> declare `double d`; + load from `int i` -> `int 20`; + promote `int 20` and `double 7.0`: result `double`; + implicit cast `int 20` to `double 20.0` -> `double 20.0`; + multiply `double 20.0` by `double 7.0` -> `double 140.0`; + store `double 140.0` to `d` ++ +* Multiplication with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5*4; +<2> def y = x*2; +---- +<1> declare `def x`; + multiply `int 5` by `int 4` -> `int 20`; + implicit cast `int 20` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 20`; + multiply `int 20` by `int 2` -> `int 40`; + implicit cast `int 40` to `def` -> `def`; + store `def` to `y` + +[[division-operator]] +==== Division + +Use the `division operator '/'` to DIVIDE one numeric type value by another. +Rules for NaN values and division by zero follow the JVM specification. Division +with integer values drops the remainder of the resultant value. + +*Errors* + +* If either of the values is a non-numeric type. +* If a left-hand side integer type value is divided by a right-hand side integer + type value of `0`. + +*Grammar* + +[source,ANTLR4] +---- +division: expression '/' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Division with different numeric types. ++ +[source,Painless] +---- +<1> int i = 29/4; +<2> double d = i/7.0; +---- ++ +<1> declare `int i`; + divide `int 29` by `int 4` -> `int 7`; + store `int 7` in `i` +<2> declare `double d`; + load from `int i` -> `int 7`; + promote `int 7` and `double 7.0`: result `double`; + implicit cast `int 7` to `double 7.0` -> `double 7.0`; + divide `double 7.0` by `double 7.0` -> `double 1.0`; + store `double 1.0` to `d` ++ +* Division with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5/4; +<2> def y = x/2; +---- +<1> declare `def x`; + divide `int 5` by `int 4` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 1`; + divide `int 1` by `int 2` -> `int 0`; + implicit cast `int 0` to `def` -> `def`; + store `def` to `y` + +[[remainder-operator]] +==== Remainder + +Use the `remainder operator '%'` to calculate the REMAINDER for division +between two numeric type values. Rules for NaN values and division by zero follow the JVM +specification. + +*Errors* + +* If either of the values is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +remainder: expression '%' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Remainder with different numeric types. ++ +[source,Painless] +---- +<1> int i = 29%4; +<2> double d = i%7.0; +---- ++ +<1> declare `int i`; + remainder `int 29` by `int 4` -> `int 1`; + store `int 7` in `i` +<2> declare `double d`; + load from `int i` -> `int 1`; + promote `int 1` and `double 7.0`: result `double`; + implicit cast `int 1` to `double 1.0` -> `double 1.0`; + remainder `double 1.0` by `double 7.0` -> `double 1.0`; + store `double 1.0` to `d` ++ +* Remainder with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5%4; +<2> def y = x%2; +---- +<1> declare `def x`; + remainder `int 5` by `int 4` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 1`; + remainder `int 1` by `int 2` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `y` + +[[addition-operator]] +==== Addition + +Use the `addition operator '+'` to ADD together two numeric type values. Rules +for resultant overflow and NaN values follow the JVM specification. + +*Errors* + +* If either of the values is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +addition: expression '+' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Addition operator with different numeric types. ++ +[source,Painless] +---- +<1> int i = 29+4; +<2> double d = i+7.0; +---- ++ +<1> declare `int i`; + add `int 29` and `int 4` -> `int 33`; + store `int 33` in `i` +<2> declare `double d`; + load from `int i` -> `int 33`; + promote `int 33` and `double 7.0`: result `double`; + implicit cast `int 33` to `double 33.0` -> `double 33.0`; + add `double 33.0` and `double 7.0` -> `double 40.0`; + store `double 40.0` to `d` ++ +* Addition with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5+4; +<2> def y = x+2; +---- +<1> declare `def x`; + add `int 5` and `int 4` -> `int 9`; + implicit cast `int 9` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 9`; + add `int 9` and `int 2` -> `int 11`; + implicit cast `int 11` to `def` -> `def`; + store `def` to `y` + +[[subtraction-operator]] +==== Subtraction + +Use the `subtraction operator '-'` to SUBTRACT a right-hand side numeric type +value from a left-hand side numeric type value. Rules for resultant overflow +and NaN values follow the JVM specification. + +*Errors* + +* If either of the values is a non-numeric type. + +*Grammar* + +[source,ANTLR4] +---- +subtraction: expression '-' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | float | double | def +| byte | int | int | int | int | long | float | double | def +| short | int | int | int | int | long | float | double | def +| char | int | int | int | int | long | float | double | def +| int | int | int | int | int | long | float | double | def +| long | long | long | long | long | long | float | double | def +| float | float | float | float | float | float | float | double | def +| double | double | double | double | double | double | double | double | def +| def | def | def | def | def | def | def | def | def +|==== + +*Examples* + +* Subtraction with different numeric types. ++ +[source,Painless] +---- +<1> int i = 29-4; +<2> double d = i-7.5; +---- ++ +<1> declare `int i`; + subtract `int 4` from `int 29` -> `int 25`; + store `int 25` in `i` +<2> declare `double d` + load from `int i` -> `int 25`; + promote `int 25` and `double 7.5`: result `double`; + implicit cast `int 25` to `double 25.0` -> `double 25.0`; + subtract `double 33.0` by `double 7.5` -> `double 25.5`; + store `double 25.5` to `d` ++ +* Subtraction with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5-4; +<2> def y = x-2; +---- +<1> declare `def x`; + subtract `int 4` and `int 5` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 1`; + subtract `int 2` from `int 1` -> `int -1`; + implicit cast `int -1` to `def` -> `def`; + store `def` to `y` + +[[left-shift-operator]] +==== Left Shift + +Use the `left shift operator '<<'` to SHIFT lower order bits to higher order +bits in a left-hand side integer type value by the distance specified in a +right-hand side integer type value. + +*Errors* + +* If either of the values is a non-integer type. +* If the right-hand side value cannot be cast to an int type. + +*Grammar* + +[source,ANTLR4] +---- +left_shift: expression '<<' expression; +---- + +*Promotion* + +The left-hand side integer type value is promoted as specified in the table +below. The right-hand side integer type value is always implicitly cast to an +`int` type value and truncated to the number of bits of the promoted type value. + +[options="header",cols="<1,<1"] +|==== +| original | promoted +| byte | int +| short | int +| char | int +| int | int +| long | long +| def | def +|==== + +*Examples* + +* Left shift with different integer types. ++ +[source,Painless] +---- +<1> int i = 4 << 1; +<2> long l = i << 2L; +---- ++ +<1> declare `int i`; + left shift `int 4` by `int 1` -> `int 8`; + store `int 8` in `i` +<2> declare `long l` + load from `int i` -> `int 8`; + implicit cast `long 2` to `int 2` -> `int 2`; + left shift `int 8` by `int 2` -> `int 32`; + implicit cast `int 32` to `long 32` -> `long 32`; + store `long 32` to `l` ++ +* Left shift with the `def` type. ++ +[source,Painless] +---- +<1> def x = 4 << 2; +<2> def y = x << 1; +---- +<1> declare `def x`; + left shift `int 4` by `int 2` -> `int 16`; + implicit cast `int 16` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 16`; + left shift `int 16` by `int 1` -> `int 32`; + implicit cast `int 32` to `def` -> `def`; + store `def` to `y` + +[[right-shift-operator]] +==== Right Shift + +Use the `right shift operator '>>'` to SHIFT higher order bits to lower order +bits in a left-hand side integer type value by the distance specified in a +right-hand side integer type value. The highest order bit of the left-hand side +integer type value is preserved. + +*Errors* + +* If either of the values is a non-integer type. +* If the right-hand side value cannot be cast to an int type. + +*Grammar* + +[source,ANTLR4] +---- +right_shift: expression '>>' expression; +---- + +*Promotion* + +The left-hand side integer type value is promoted as specified in the table +below. The right-hand side integer type value is always implicitly cast to an +`int` type value and truncated to the number of bits of the promoted type value. + +[options="header",cols="<1,<1"] +|==== +| original | promoted +| byte | int +| short | int +| char | int +| int | int +| long | long +| def | def +|==== + +*Examples* + +* Right shift with different integer types. ++ +[source,Painless] +---- +<1> int i = 32 >> 1; +<2> long l = i >> 2L; +---- ++ +<1> declare `int i`; + right shift `int 32` by `int 1` -> `int 16`; + store `int 16` in `i` +<2> declare `long l` + load from `int i` -> `int 16`; + implicit cast `long 2` to `int 2` -> `int 2`; + right shift `int 16` by `int 2` -> `int 4`; + implicit cast `int 4` to `long 4` -> `long 4`; + store `long 4` to `l` ++ +* Right shift with the `def` type. ++ +[source,Painless] +---- +<1> def x = 16 >> 2; +<2> def y = x >> 1; +---- +<1> declare `def x`; + right shift `int 16` by `int 2` -> `int 4`; + implicit cast `int 4` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 4`; + right shift `int 4` by `int 1` -> `int 2`; + implicit cast `int 2` to `def` -> `def`; + store `def` to `y` + +[[unsigned-right-shift-operator]] +==== Unsigned Right Shift + +Use the `unsigned right shift operator '>>>'` to SHIFT higher order bits to +lower order bits in a left-hand side integer type value by the distance +specified in a right-hand side type integer value. The highest order bit of the +left-hand side integer type value is *not* preserved. + +*Errors* + +* If either of the values is a non-integer type. +* If the right-hand side value cannot be cast to an int type. + +*Grammar* + +[source,ANTLR4] +---- +unsigned_right_shift: expression '>>>' expression; +---- + +*Promotion* + +The left-hand side integer type value is promoted as specified in the table +below. The right-hand side integer type value is always implicitly cast to an +`int` type value and truncated to the number of bits of the promoted type value. + +[options="header",cols="<1,<1"] +|==== +| original | promoted +| byte | int +| short | int +| char | int +| int | int +| long | long +| def | def +|==== + +*Examples* + +* Unsigned right shift with different integer types. ++ +[source,Painless] +---- +<1> int i = -1 >>> 29; +<2> long l = i >>> 2L; +---- ++ +<1> declare `int i`; + unsigned right shift `int -1` by `int 29` -> `int 7`; + store `int 7` in `i` +<2> declare `long l` + load from `int i` -> `int 7`; + implicit cast `long 2` to `int 2` -> `int 2`; + unsigned right shift `int 7` by `int 2` -> `int 3`; + implicit cast `int 3` to `long 3` -> `long 3`; + store `long 3` to `l` ++ +* Unsigned right shift with the `def` type. ++ +[source,Painless] +---- +<1> def x = 16 >>> 2; +<2> def y = x >>> 1; +---- +<1> declare `def x`; + unsigned right shift `int 16` by `int 2` -> `int 4`; + implicit cast `int 4` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 4`; + unsigned right shift `int 4` by `int 1` -> `int 2`; + implicit cast `int 2` to `def` -> `def`; + store `def` to `y` + +[[bitwise-and-operator]] +==== Bitwise And + +Use the `bitwise and operator '&'` to AND together each bit within two +integer type values where if both bits at the same index are `1` the resultant +bit is `1` and `0` otherwise. + +*Errors* + +* If either of the values is a non-integer type. + +*Bits* + +[cols="^1,^1,^1"] +|==== +| | 1 | 0 +| 1 | 1 | 0 +| 0 | 0 | 0 +|==== + +*Grammar* + +[source,ANTLR4] +---- +bitwise_and: expression '&' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | def +| byte | int | int | int | int | long | def +| short | int | int | int | int | long | def +| char | int | int | int | int | long | def +| int | int | int | int | int | long | def +| long | long | long | long | long | long | def +| def | def | def | def | def | def | def +|==== + +*Examples* + +* Bitwise and with different integer types. ++ +[source,Painless] +---- +<1> int i = 5 & 6; +<2> long l = i & 5L; +---- ++ +<1> declare `int i`; + bitwise and `int 5` and `int 6` -> `int 4`; + store `int 4` in `i` +<2> declare `long l` + load from `int i` -> `int 4`; + promote `int 4` and `long 5`: result `long`; + implicit cast `int 4` to `long 4` -> `long 4`; + bitwise and `long 4` and `long 5` -> `long 4`; + store `long 4` to `l` ++ +* Bitwise and with the `def` type. ++ +[source,Painless] +---- +<1> def x = 15 & 6; +<2> def y = x & 5; +---- +<1> declare `def x`; + bitwise and `int 15` and `int 6` -> `int 6`; + implicit cast `int 6` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 6`; + bitwise and `int 6` and `int 5` -> `int 4`; + implicit cast `int 4` to `def` -> `def`; + store `def` to `y` + +[[bitwise-xor-operator]] +==== Bitwise Xor + +Use the `bitwise xor operator '^'` to XOR together each bit within two integer +type values where if one bit is a `1` and the other bit is a `0` at the same +index the resultant bit is `1` otherwise the resultant bit is `0`. + +*Errors* + +* If either of the values is a non-integer type. + +*Bits* + +The following table illustrates the resultant bit from the xoring of two bits. + +[cols="^1,^1,^1"] +|==== +| | 1 | 0 +| 1 | 0 | 1 +| 0 | 1 | 0 +|==== + +*Grammar* + +[source,ANTLR4] +---- +bitwise_and: expression '^' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | def +| byte | int | int | int | int | long | def +| short | int | int | int | int | long | def +| char | int | int | int | int | long | def +| int | int | int | int | int | long | def +| long | long | long | long | long | long | def +| def | def | def | def | def | def | def +|==== + +*Examples* + +* Bitwise xor with different integer types. ++ +[source,Painless] +---- +<1> int i = 5 ^ 6; +<2> long l = i ^ 5L; +---- ++ +<1> declare `int i`; + bitwise xor `int 5` and `int 6` -> `int 3`; + store `int 3` in `i` +<2> declare `long l` + load from `int i` -> `int 4`; + promote `int 3` and `long 5`: result `long`; + implicit cast `int 3` to `long 3` -> `long 3`; + bitwise xor `long 3` and `long 5` -> `long 6`; + store `long 6` to `l` ++ +* Bitwise xor with the `def` type. ++ +[source,Painless] +---- +<1> def x = 15 ^ 6; +<2> def y = x ^ 5; +---- +<1> declare `def x`; + bitwise xor `int 15` and `int 6` -> `int 9`; + implicit cast `int 9` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 9`; + bitwise xor `int 9` and `int 5` -> `int 12`; + implicit cast `int 12` to `def` -> `def`; + store `def` to `y` + +[[bitwise-or-operator]] +==== Bitwise Or + +Use the `bitwise or operator '|'` to OR together each bit within two integer +type values where if at least one bit is a `1` at the same index the resultant +bit is `1` otherwise the resultant bit is `0`. + +*Errors* + +* If either of the values is a non-integer type. + +*Bits* + +The following table illustrates the resultant bit from the oring of two bits. + +[cols="^1,^1,^1"] +|==== +| | 1 | 0 +| 1 | 1 | 1 +| 0 | 1 | 0 +|==== + +*Grammar* + +[source,ANTLR4] +---- +bitwise_and: expression '|' expression; +---- + +*Promotion* + +[cols="<1,^1,^1,^1,^1,^1,^1"] +|==== +| | byte | short | char | int | long | def +| byte | int | int | int | int | long | def +| short | int | int | int | int | long | def +| char | int | int | int | int | long | def +| int | int | int | int | int | long | def +| long | long | long | long | long | long | def +| def | def | def | def | def | def | def +|==== + +*Examples* + +* Bitwise or with different integer types. ++ +[source,Painless] +---- +<1> int i = 5 | 6; +<2> long l = i | 8L; +---- ++ +<1> declare `int i`; + bitwise or `int 5` and `int 6` -> `int 7`; + store `int 7` in `i` +<2> declare `long l` + load from `int i` -> `int 7`; + promote `int 7` and `long 8`: result `long`; + implicit cast `int 7` to `long 7` -> `long 7`; + bitwise or `long 7` and `long 8` -> `long 15`; + store `long 15` to `l` ++ +* Bitwise or with the `def` type. ++ +[source,Painless] +---- +<1> def x = 5 ^ 6; +<2> def y = x ^ 8; +---- +<1> declare `def x`; + bitwise or `int 5` and `int 6` -> `int 7`; + implicit cast `int 7` to `def` -> `def`; + store `def` in `x` +<2> declare `def y`; + load from `x` -> `def`; + implicit cast `def` to `int 7`; + bitwise or `int 7` and `int 8` -> `int 15`; + implicit cast `int 15` to `def` -> `def`; + store `def` to `y` \ No newline at end of file diff --git a/docs/painless/painless-operators-reference.asciidoc b/docs/painless/painless-operators-reference.asciidoc new file mode 100644 index 00000000000..487fcce15f3 --- /dev/null +++ b/docs/painless/painless-operators-reference.asciidoc @@ -0,0 +1,774 @@ +[[painless-operators-reference]] +=== Operators: Reference + +[[method-call-operator]] +==== Method Call + +Use the `method call operator '()'` to call a member method on a +<> value. Implicit +<> is evaluated as necessary per argument +during the method call. When a method call is made on a target `def` type value, +the parameters and return type value are considered to also be of the `def` type +and are evaluated at run-time. + +An overloaded method is one that shares the same name with two or more methods. +A method is overloaded based on arity where the same name is re-used for +multiple methods as long as the number of parameters differs. + +*Errors* + +* If the reference type value is `null`. +* If the member method name doesn't exist for a given reference type value. +* If the number of arguments passed in is different from the number of specified + parameters. +* If the arguments cannot be implicitly cast or implicitly boxed/unboxed to the + correct type values for the parameters. + +*Grammar* + +[source,ANTLR4] +---- +method_call: '.' ID arguments; +arguments: '(' (expression (',' expression)*)? ')'; +---- + +*Examples* + +* Method calls on different reference types. ++ +[source,Painless] +---- +<1> Map m = new HashMap(); +<2> m.put(1, 2); +<3> int z = m.get(1); +<4> def d = new ArrayList(); +<5> d.add(1); +<6> int i = Integer.parseInt(d.get(0).toString()); +---- ++ +<1> declare `Map m`; + allocate `HashMap` instance -> `HashMap reference`; + store `HashMap reference` to `m` +<2> load from `m` -> `Map reference`; + implicit cast `int 1` to `def` -> `def`; + implicit cast `int 2` to `def` -> `def`; + call `put` on `Map reference` with arguments (`int 1`, `int 2`) +<3> declare `int z`; + load from `m` -> `Map reference`; + call `get` on `Map reference` with arguments (`int 1`) -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + store `int 2` to `z` +<4> declare `def d`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList` to `def` -> `def`; + store `def` to `d` +<5> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference` + call `add` on `ArrayList reference` with arguments (`int 1`); +<6> declare `int i`; + load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference` + call `get` on `ArrayList reference` with arguments (`int 1`) -> `def`; + implicit cast `def` to `Integer 1 reference` -> `Integer 1 reference`; + call `toString` on `Integer 1 reference` -> `String '1'`; + call `parseInt` on `Integer` with arguments (`String '1'`) -> `int 1`; + store `int 1` in `i`; + +[[field-access-operator]] +==== Field Access + +Use the `field access operator '.'` to store a value to or load a value from a +<> member field. + +*Errors* + +* If the reference type value is `null`. +* If the member field name doesn't exist for a given reference type value. + +*Grammar* + +[source,ANTLR4] +---- +field_access: '.' ID; +---- + +*Examples* + +The examples use the following reference type definition: + +[source,Painless] +---- +name: + Example + +non-static member fields: + * int x + * def y + * List z +---- + +* Field access with the `Example` type. ++ +[source,Painless] +---- +<1> Example example = new Example(); +<2> example.x = 1; +<3> example.y = example.x; +<4> example.z = new ArrayList(); +<5> example.z.add(1); +<6> example.x = example.z.get(0); +---- ++ +<1> declare `Example example`; + allocate `Example` instance -> `Example reference`; + store `Example reference` to `example` +<2> load from `example` -> `Example reference`; + store `int 1` to `x` of `Example reference` +<3> load from `example` -> `Example reference @0`; + load from `example` -> `Example reference @1`; + load from `x` of `Example reference @1` -> `int 1`; + implicit cast `int 1` to `def` -> `def`; + store `def` to `y` of `Example reference @0`; + (note `Example reference @0` and `Example reference @1` are the same) +<4> load from `example` -> `Example reference`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `z` of `Example reference` +<5> load from `example` -> `Example reference`; + load from `z` of `Example reference` -> `List reference`; + call `add` on `List reference` with arguments (`int 1`) +<6> load from `example` -> `Example reference @0`; + load from `example` -> `Example reference @1`; + load from `z` of `Example reference @1` -> `List reference`; + call `get` on `List reference` with arguments (`int 0`) -> `int 1`; + store `int 1` in `x` of `List reference @0`; + (note `Example reference @0` and `Example reference @1` are the same) + +[[null-safe-operator]] +==== Null Safe + +Use the `null safe operator '?.'` instead of the method call operator or field +access operator to ensure a reference type value is `non-null` before +a method call or field access. A `null` value will be returned if the reference +type value is `null`, otherwise the method call or field access is evaluated. + +*Errors* + +* If the method call return type value or the field access type value is not + a reference type value and is not implicitly castable to a reference type + value. + +*Grammar* + +[source,ANTLR4] +---- +null_safe: null_safe_method_call + | null_safe_field_access + ; + +null_safe_method_call: '?.' ID arguments; +arguments: '(' (expression (',' expression)*)? ')'; + +null_safe_field_access: '?.' ID; +---- + +*Examples* + +The examples use the following reference type definition: + +[source,Painless] +---- +name: + Example + +non-static member methods: + * List factory() + +non-static member fields: + * List x +---- + +* Null safe without a `null` value. ++ +[source,Painless] +---- +<1> Example example = new Example(); +<2> List x = example?.factory(); +---- ++ +<1> declare `Example example`; + allocate `Example` instance -> `Example reference`; + store `Example reference` to `example` +<2> declare `List x`; + load from `example` -> `Example reference`; + null safe call `factory` on `Example reference` -> `List reference`; + store `List reference` to `x`; ++ +* Null safe with a `null` value; ++ +[source,Painless] +---- +<1> Example example = null; +<2> List x = example?.x; +---- +<1> declare `Example example`; + store `null` to `example` +<2> declare `List x`; + load from `example` -> `Example reference`; + null safe access `x` on `Example reference` -> `null`; + store `null` to `x`; + (note the *null safe operator* returned `null` because `example` is `null`) + +[[list-initialization-operator]] +==== List Initialization + +Use the `list initialization operator '[]'` to allocate an `List` type instance +to the heap with a set of pre-defined values. Each value used to initialize the +`List` type instance is cast to a `def` type value upon insertion into the +`List` type instance using the `add` method. The order of the specified values +is maintained. + +*Grammar* + +[source,ANTLR4] +---- +list_initialization: '[' expression (',' expression)* ']' + | '[' ']'; +---- + +*Examples* + +* List initialization of an empty `List` type value. ++ +[source,Painless] +---- +<1> List empty = []; +---- ++ +<1> declare `List empty`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `empty` ++ +* List initialization with static values. ++ +[source,Painless] +---- +<1> List list = [1, 2, 3]; +---- ++ +<1> declare `List list`; + allocate `ArrayList` instance -> `ArrayList reference`; + call `add` on `ArrayList reference` with arguments(`int 1`); + call `add` on `ArrayList reference` with arguments(`int 2`); + call `add` on `ArrayList reference` with arguments(`int 3`); + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `list` ++ +* List initialization with non-static values. ++ +[source,Painless] +---- +<1> int i = 1; +<2> long l = 2L; +<3> float f = 3.0F; +<4> double d = 4.0; +<5> String s = "5"; +<6> List list = [i, l, f*d, s]; +---- ++ +<1> declare `int i`; + store `int 1` to `i` +<2> declare `long l`; + store `long 2` to `l` +<3> declare `float f`; + store `float 3.0` to `f` +<4> declare `double d`; + store `double 4.0` to `d` +<5> declare `String s`; + store `String "5"` to `s` +<6> declare `List list`; + allocate `ArrayList` instance -> `ArrayList reference`; + load from `i` -> `int 1`; + call `add` on `ArrayList reference` with arguments(`int 1`); + load from `l` -> `long 2`; + call `add` on `ArrayList reference` with arguments(`long 2`); + load from `f` -> `float 3.0`; + load from `d` -> `double 4.0`; + promote `float 3.0` and `double 4.0`: result `double`; + implicit cast `float 3.0` to `double 3.0` -> `double 3.0`; + multiply `double 3.0` and `double 4.0` -> `double 12.0`; + call `add` on `ArrayList reference` with arguments(`double 12.0`); + load from `s` -> `String "5"`; + call `add` on `ArrayList reference` with arguments(`String "5"`); + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `list` + +[[list-access-operator]] +==== List Access + +Use the `list access operator '[]'` as a shortcut for a `set` method call or +`get` method call made on a `List` type value. + +*Errors* + +* If a value other than a `List` type value is accessed. +* If a non-integer type value is used as an index for a `set` method call or + `get` method call. + +*Grammar* + +[source,ANTLR4] +---- +list_access: '[' expression ']' +---- + +*Examples* + +* List access with the `List` type. ++ +[source,Painless] +---- +<1> List list = new ArrayList(); +<2> list.add(1); +<3> list.add(2); +<4> list.add(3); +<5> list[0] = 2; +<6> list[1] = 5; +<7> int x = list[0] + list[1]; +<8> int y = 1; +<9> int z = list[y]; +---- ++ +<1> declare `List list`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `list` +<2> load from `list` -> `List reference`; + call `add` on `List reference` with arguments(`int 1`) +<3> load from `list` -> `List reference`; + call `add` on `List reference` with arguments(`int 2`) +<4> load from `list` -> `List reference`; + call `add` on `List reference` with arguments(`int 3`) +<5> load from `list` -> `List reference`; + call `set` on `List reference` with arguments(`int 0`, `int 2`) +<6> load from `list` -> `List reference`; + call `set` on `List reference` with arguments(`int 1`, `int 5`) +<7> declare `int x`; + load from `list` -> `List reference`; + call `get` on `List reference` with arguments(`int 0`) -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + load from `list` -> `List reference`; + call `get` on `List reference` with arguments(`int 1`) -> `def`; + implicit cast `def` to `int 5` -> `int 5`; + add `int 2` and `int 5` -> `int 7`; + store `int 7` to `x` +<8> declare `int y`; + store `int 1` int `y` +<9> declare `int z`; + load from `list` -> `List reference`; + load from `y` -> `int 1`; + call `get` on `List reference` with arguments(`int 1`) -> `def`; + implicit cast `def` to `int 5` -> `int 5`; + store `int 5` to `z` ++ +* List access with the `def` type. ++ +[source,Painless] +---- +<1> def d = new ArrayList(); +<2> d.add(1); +<3> d.add(2); +<4> d.add(3); +<5> d[0] = 2; +<6> d[1] = 5; +<7> def x = d[0] + d[1]; +<8> def y = 1; +<9> def z = d[y]; +---- ++ +<1> declare `List d`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def`; + store `def` to `d` +<2> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `add` on `ArrayList reference` with arguments(`int 1`) +<3> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `add` on `ArrayList reference` with arguments(`int 2`) +<4> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `add` on `ArrayList reference` with arguments(`int 3`) +<5> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `set` on `ArrayList reference` with arguments(`int 0`, `int 2`) +<6> load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `set` on `ArrayList reference` with arguments(`int 1`, `int 5`) +<7> declare `def x`; + load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `get` on `ArrayList reference` with arguments(`int 0`) -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + load from `d` -> `def`; + implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; + call `get` on `ArrayList reference` with arguments(`int 1`) -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + add `int 2` and `int 5` -> `int 7`; + store `int 7` to `x` +<8> declare `int y`; + store `int 1` int `y` +<9> declare `int z`; + load from `d` -> `ArrayList reference`; + load from `y` -> `def`; + implicit cast `def` to `int 1` -> `int 1`; + call `get` on `ArrayList reference` with arguments(`int 1`) -> `def`; + store `def` to `z` + +[[map-initialization-operator]] +==== Map Initialization + +Use the `map initialization operator '[:]'` to allocate a `Map` type instance to +the heap with a set of pre-defined values. Each pair of values used to +initialize the `Map` type instance are cast to `def` type values upon insertion +into the `Map` type instance using the `put` method. + +*Grammar* + +[source,ANTLR4] +---- +map_initialization: '[' key_pair (',' key_pair)* ']' + | '[' ':' ']'; +key_pair: expression ':' expression +---- + +*Examples* + +* Map initialization of an empty `Map` type value. ++ +[source,Painless] +---- +<1> Map empty = [:]; +---- ++ +<1> declare `Map empty`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + store `Map reference` to `empty` ++ +* Map initialization with static values. ++ +[source,Painless] +---- +<1> Map map = [1:2, 3:4, 5:6]; +---- ++ +<1> declare `Map map`; + allocate `HashMap` instance -> `HashMap reference`; + call `put` on `HashMap reference` with arguments(`int 1`, `int 2`); + call `put` on `HashMap reference` with arguments(`int 3`, `int 4`); + call `put` on `HashMap reference` with arguments(`int 5`, `int 6`); + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + store `Map reference` to `map` ++ +* Map initialization with non-static values. ++ +[source,Painless] +---- +<1> byte b = 0; +<2> int i = 1; +<3> long l = 2L; +<4> float f = 3.0F; +<5> double d = 4.0; +<6> String s = "5"; +<7> Map map = [b:i, l:f*d, d:s]; +---- ++ +<1> declare `byte b`; + store `byte 0` to `b` +<2> declare `int i`; + store `int 1` to `i` +<3> declare `long l`; + store `long 2` to `l` +<4> declare `float f`; + store `float 3.0` to `f` +<5> declare `double d`; + store `double 4.0` to `d` +<6> declare `String s`; + store `String "5"` to `s` +<7> declare `Map map`; + allocate `HashMap` instance -> `HashMap reference`; + load from `b` -> `byte 0`; + load from `i` -> `int 1`; + call `put` on `HashMap reference` with arguments(`byte 0`, `int 1`); + load from `l` -> `long 2`; + load from `f` -> `float 3.0`; + load from `d` -> `double 4.0`; + promote `float 3.0` and `double 4.0`: result `double`; + implicit cast `float 3.0` to `double 3.0` -> `double 3.0`; + multiply `double 3.0` and `double 4.0` -> `double 12.0`; + call `put` on `HashMap reference` with arguments(`long 2`, `double 12.0`); + load from `d` -> `double 4.0`; + load from `s` -> `String "5"`; + call `put` on `HashMap reference` with + arguments(`double 4.0`, `String "5"`); + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + store `Map reference` to `map` + +[[map-access-operator]] +==== Map Access + +Use the `map access operator '[]'` as a shortcut for a `put` method call or +`get` method call made on a `Map` type value. + +*Errors* + +* If a value other than a `Map` type value is accessed. + +*Grammar* +[source,ANTLR4] +---- +map_access: '[' expression ']' +---- + +*Examples* + +* Map access with the `Map` type. ++ +[source,Painless] +---- +<1> Map map = new HashMap(); +<2> map['value2'] = 2; +<3> map['value5'] = 5; +<4> int x = map['value2'] + map['value5']; +<5> String y = 'value5'; +<6> int z = x[z]; +---- ++ +<1> declare `Map map`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + store `Map reference` to `map` +<2> load from `map` -> `Map reference`; + call `put` on `Map reference` with arguments(`String 'value2'`, `int 2`) +<3> load from `map` -> `Map reference`; + call `put` on `Map reference` with arguments(`String 'value5'`, `int 5`) +<4> declare `int x`; + load from `map` -> `Map reference`; + call `get` on `Map reference` with arguments(`String 'value2'`) -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + load from `map` -> `Map reference`; + call `get` on `Map reference` with arguments(`String 'value5'`) -> `def`; + implicit cast `def` to `int 5` -> `int 5`; + add `int 2` and `int 5` -> `int 7`; + store `int 7` to `x` +<5> declare `String y`; + store `String 'value5'` to `y` +<6> declare `int z`; + load from `map` -> `Map reference`; + load from `y` -> `String 'value5'`; + call `get` on `Map reference` with arguments(`String 'value5'`) -> `def`; + implicit cast `def` to `int 5` -> `int 5`; + store `int 5` to `z` ++ +* Map access with the `def` type. ++ +[source,Painless] +---- +<1> def d = new HashMap(); +<2> d['value2'] = 2; +<3> d['value5'] = 5; +<4> int x = d['value2'] + d['value5']; +<5> String y = 'value5'; +<6> def z = d[y]; +---- ++ +<1> declare `def d`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def`; + store `def` to `d` +<2> load from `d` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + call `put` on `HashMap reference` with arguments(`String 'value2'`, `int 2`) +<3> load from `d` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + call `put` on `HashMap reference` with arguments(`String 'value5'`, `int 5`) +<4> declare `int x`; + load from `d` -> `def`; + implicit cast `def` to `HashMap reference` -> `HashMap reference`; + call `get` on `HashMap reference` with arguments(`String 'value2'`) + -> `def`; + implicit cast `def` to `int 2` -> `int 2`; + load from `d` -> `def`; + call `get` on `HashMap reference` with arguments(`String 'value5'`) + -> `def`; + implicit cast `def` to `int 5` -> `int 5`; + add `int 2` and `int 5` -> `int 7`; + store `int 7` to `x` +<5> declare `String y`; + store `String 'value5'` to `y` +<6> declare `def z`; + load from `d` -> `def`; + load from `y` -> `String 'value5'`; + call `get` on `HashMap reference` with arguments(`String 'value5'`) + -> `def`; + store `def` to `z` + +[[new-instance-operator]] +==== New Instance + +Use the `new instance operator 'new ()'` to allocate a +<> instance to the heap and call a specified +constructor. Implicit <> is evaluated as +necessary per argument during the constructor call. + +An overloaded constructor is one that shares the same name with two or more +constructors. A constructor is overloaded based on arity where the same +reference type name is re-used for multiple constructors as long as the number +of parameters differs. + +*Errors* + +* If the reference type name doesn't exist for instance allocation. +* If the number of arguments passed in is different from the number of specified + parameters. +* If the arguments cannot be implicitly cast or implicitly boxed/unboxed to the + correct type values for the parameters. + +*Grammar* + +[source,ANTLR4] +---- +new_instance: 'new' TYPE '(' (expression (',' expression)*)? ')'; +---- + +*Examples* + +* Allocation of new instances with different types. + +[source,Painless] +---- +<1> Map m = new HashMap(); +<2> def d = new ArrayList(); +<3> def e = new HashMap(m); +---- +<1> declare `Map m`; + allocate `HashMap` instance -> `HashMap reference`; + implicit cast `HashMap reference` to `Map reference` -> `Map reference`; + store `Map reference` to `m`; +<2> declare `def d`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `def` -> `def`; + store `def` to `d`; +<3> declare `def e`; + load from `m` -> `Map reference`; + allocate `HashMap` instance with arguments (`Map reference`) + -> `HashMap reference`; + implicit cast `HashMap reference` to `def` -> `def`; + store `def` to `e`; + +[[string-concatenation-operator]] +==== String Concatenation + +Use the `string concatenation operator '+'` to concatenate two values together +where at least one of the values is a <>. + +*Grammar* + +[source,ANTLR4] +---- +concatenate: expression '+' expression; +---- + +*Examples* + +* String concatenation with different primitive types. ++ +[source,Painless] +---- +<1> String x = "con"; +<2> String y = x + "cat"; +<3> String z = 4 + 5 + x; +---- ++ +<1> declare `String x`; + store `String "con"` to `x`; +<2> declare `String y`; + load from `x` -> `String "con"`; + concat `String "con"` and `String "cat"` -> `String "concat"`; + store `String "concat"` to `y` +<3> declare `String z`; + add `int 4` and `int 5` -> `int 9`; + concat `int 9` and `String "9concat"`; + store `String "9concat"` to `z`; + (note the addition is done prior to the concatenation due to precedence and + associativity of the specific operations) ++ +* String concatenation with the `def` type. ++ +[source,Painless] +---- +<1> def d = 2; +<2> d = "con" + d + "cat"; +---- ++ +<1> declare `def`; + implicit cast `int 2` to `def` -> `def`; + store `def` in `d`; +<2> concat `String "con"` and `int 9` -> `String "con9"`; + concat `String "con9"` and `String "con"` -> `String "con9cat"` + implicit cast `String "con9cat"` to `def` -> `def`; + store `def` to `d`; + (note the switch in type of `d` from `int` to `String`) + +[[elvis-operator]] +==== Elvis + +An elvis consists of two expressions. The first expression is evaluated +with to check for a `null` value. If the first expression evaluates to +`null` then the second expression is evaluated and its value used. If the first +expression evaluates to `non-null` then the resultant value of the first +expression is used. Use the `elvis operator '?:'` as a shortcut for the +conditional operator. + +*Errors* + +* If the first expression or second expression cannot produce a `null` value. + +*Grammar* + +[source,ANTLR4] +---- +elvis: expression '?:' expression; +---- + +*Examples* + +* Elvis with different reference types. ++ +[source,Painless] +---- +<1> List x = new ArrayList(); +<2> List y = x ?: new ArrayList(); +<3> y = null; +<4> List z = y ?: new ArrayList(); +---- ++ +<1> declare `List x`; + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `x`; +<2> declare `List y`; + load `x` -> `List reference`; + `List reference` equals `null` -> `false`; + evaluate 1st expression: `List reference` -> `List reference`; + store `List reference` to `y` +<3> store `null` to `y`; +<4> declare `List z`; + load `y` -> `List reference`; + `List reference` equals `null` -> `true`; + evaluate 2nd expression: + allocate `ArrayList` instance -> `ArrayList reference`; + implicit cast `ArrayList reference` to `List reference` -> `List reference`; + store `List reference` to `z`; diff --git a/docs/painless/painless-operators.asciidoc b/docs/painless/painless-operators.asciidoc index 8329686f663..b51e94088a6 100644 --- a/docs/painless/painless-operators.asciidoc +++ b/docs/painless/painless-operators.asciidoc @@ -1,1819 +1,64 @@ [[painless-operators]] === Operators -The following is a table of the available operators in Painless. Each operator will have further information and examples outside of the table. Many operators will have a promotion table as described by the documentation on promotion [MARK]. - -[options="header",cols="6,3,2,4"] -|==== -|Operator|Symbol(s)|Precedence|Associativity -|Precedence|()|0|left-to-right -|Field Access|.|1|left-to-right -|Method Call|. ()|1|left-to-right -|Null Safe|?.|1|left-to-right -|Function Call|()|1|left-to-right -|Array Initialization|[] {}|1|left-to-right -|Array Access|[]|1|left-to-right -|Array Length|.|1|left-to-right -|List Initialization|[]|1|left-to-right -|List Access|[]|1|left-to-right -|Map Initialization|[:]|1|left-to-right -|Map Access|[]|1|left-to-right -|Post Increment|++|1|left-to-right -|Post Decrement|--|1|left-to-right -|Pre Increment|++|2|right-to-left -|Pre Decrement|--|2|right-to-left -|Unary Positive|+|2|right-to-left -|Unary Negative|-|2|right-to-left -|Boolean Not|!|2|right-to-left -|Bitwise Not|~|2|right-to-left -|Cast|()|3|right-to-left -|Constructor Call|new ()|3|right-to-left -|New Array|new|3|right-to-left -|Multiplication|*|4|left-to-right -|Division|/|4|left-to-right -|Remainder|%|4|left-to-right -|String Concatenation|+|5|left-to-right -|Addition|+|5|left-to-right -|Subtraction|-|5|left-to-right -|Left Shift|<<|6|left-to-right -|Right Shift|>>|6|left-to-right -|Unsigned Right Shift|>>>|6|left-to-right -|Greater Than|>|7|left-to-right -|Greater Than Or Equal|>=|7|left-to-right -|Less Than|<|7|left-to-right -|Less Than Or Equal|<=|7|left-to-right -|Instance Of|instanceof|8|left-to-right -|Equality Equals|==|9|left-to-right -|Equality Not Equals|!=|9|left-to-right -|Identity Equals|===|9|left-to-right -|Identity Not Equals|!==|9|left-to-right -|Bitwise And|&|10|left-to-right -|Boolean Xor|^|11|left-to-right -|Bitwise Xor|^|11|left-to-right -|Bitwise Or|\||12|left-to-right -|Boolean And|&&|13|left-to-right -|Boolean Or|\|\||14|left-to-right -|Conditional|? :|15|right-to-left -|Elvis|?:|16|right-to-left -|Assignment|=|17|right-to-left -|Compound Assignment|$=|17|right-to-left +An operator is the most basic action that can be taken to evaluate values in a +script. An expression is one-to-many consecutive operations. Precedence is the +order in which an operator will be evaluated relative to another operator. +Associativity is the direction within an expression in which a specific operator +is evaluated. The following table lists all available operators: + +[cols="<6,<3,^3,^2,^4"] +|==== +| *Operator* | *Category* | *Symbol(s)* | *Precedence* | *Associativity* +| <> | <> | () | 0 | left -> right +| <> | <> | . () | 1 | left -> right +| <> | <> | . | 1 | left -> right +| <> | <> | ?. | 1 | left -> right +| <> | <> | () | 1 | left -> right +| <> | <> | [] {} | 1 | left -> right +| <> | <> | [] | 1 | left -> right +| <> | <> | . | 1 | left -> right +| <> | <> | [] | 1 | left -> right +| <> | <> | [] | 1 | left -> right +| <> | <> | [:] | 1 | left -> right +| <> | <> | [] | 1 | left -> right +| <> | <> | ++ | 1 | left -> right +| <> | <> | -- | 1 | left -> right +| <> | <> | ++ | 2 | right -> left +| <> | <> | -- | 2 | right -> left +| <> | <> | + | 2 | right -> left +| <> | <> | - | 2 | right -> left +| <> | <> | ! | 2 | right -> left +| <> | <> | ~ | 2 | right -> left +| <> | <> | () | 3 | right -> left +| <> | <> | new () | 3 | right -> left +| <> | <> | new [] | 3 | right -> left +| <> | <> | * | 4 | left -> right +| <> | <> | / | 4 | left -> right +| <> | <> | % | 4 | left -> right +| <> | <> | + | 5 | left -> right +| <> | <> | + | 5 | left -> right +| <> | <> | - | 5 | left -> right +| <> | <> | << | 6 | left -> right +| <> | <> | >> | 6 | left -> right +| <> | <> | >>> | 6 | left -> right +| <> | <> | > | 7 | left -> right +| <> | <> | >= | 7 | left -> right +| <> | <> | < | 7 | left -> right +| <> | <> | <= | 7 | left -> right +| <> | <> | instanceof | 8 | left -> right +| <> | <> | == | 9 | left -> right +| <> | <> | != | 9 | left -> right +| <> | <> | === | 9 | left -> right +| <> | <> | !== | 9 | left -> right +| <> | <> | & | 10 | left -> right +| <> | <> | ^ | 11 | left -> right +| <> | <> | ^ | 11 | left -> right +| <> | <> | \| | 12 | left -> right +| <> | <> | && | 13 | left -> right +| <> | <> | \|\| | 14 | left -> right +| <> | <> | ? : | 15 | right -> left +| <> | <> | ?: | 16 | right -> left +| <> | <> | = | 17 | right -> left +| <> | <> | $= | 17 | right -> left |==== - -[[precedence-operator]] -==== Precedence - -You group expressions using the precedence operator to guarantee -the order of evaluation and override existing precedence relationships between operators. The format is an opening parenthesis, one or more expressions, and -a closing parenthesis. For example, `(20+1)*2`. - -*Grammar:* -[source,ANTLR4] ----- -precedence: '(' expression ')'; ----- - -*Examples:* -[source,Java] ----- -int x = (5+4)*6; // declares the variable int x and sets it to (5+4)*6 - // where 5+4 is evaluated first due to the precedence operator -int y = 2*(x-4); // declares the variable int y and sets it to 2*(x-4) - // where x-4 is evaluated first due to the precedence operator ----- - - -[[dot-operator]] -==== Dot -You use the dot operator `.` to access a type's <> and <>. - -[[field-access]] -===== Accessing Fields -You access primitive and reference type members in a reference type using the -dot operator '.' followed by the id of the member. The accessed member behaves -the same way as the type it represents with one exception: if the reference -type is of type `def`, the member is also considered to be of type `def` and -resolved at runtime. - -*Grammar:* -[source,ANTLR4] ----- -field_access: ID '.' ID; ----- - -*Examples:* -[source,Java] ----- -FeatureTest ft = new FeatureTest(); // Declare FeatureTest variable ft and - // set it to a newly allocated FeatureTest -ft.x = 5; // Access int member x from ft and assign - // it the literal int value 5 -ft.y = ft.x; // Access int member y from ft and assign - // it the value of ft member x -int value = ft.x + ft.y; // Declare variable value as an int, - // add ft members x and y together, - // assign the sum to the variable value ----- - -[[method-access]] -===== Calling Methods - -You call reference type methods using the dot operator and the method id: -`.method_id(arg1,...,argn)`. The parentheses are required even if there are no -arguments. - -If the reference type is not type `def`, the argument types for the method -can be resolved at compile time. An error occurs if appropriate type -conversions (casting) cannot be performed. If the reference type is type `def`, the argument types for the method are all considered to be the type `def`. The -appropriate type conversions are performed at run-time. - -Automatic <> is performed when you pass in -arguments to a method. - -Method calls can be overloaded based on arity in Painless. The same method -name can be re-used for different methods as long as the number of arguments -differs. This differs from Java method overloading, where only the types must -differ. This has an effect on some of the provided reference type methods in -the <>. Where there are overloaded methods with -the same arity for a reference type in Java, Painless chooses a single method -to be provided. - -*Grammar:* -[source,ANTLR4] ----- -method_call: ID '.' ID '(' (expression (',' expression)*)? ')'; ----- - -*Examples:* -[source,Java] ----- -Map m = new HashMap(); // Declare Map variable m and set it a newly - // allocated HashMap -x.put(1, 2); // Call the put method on variable x to add key 1 - // with the value 2 to the Map -int z = x.get(1); // Declare int variable z, call the get method to - // retrieve the value of key 1, and assign the - // return value of the method call to variable z -def d = new ArrayList(); // Declare def variable m and set it a newly - // allocated ArrayList -d.add(1); // Call the add method on variable d and add the - // literal int 1 to the ArrayList. Note that - // the argument type is considered to be of - // type def since the reference type is also def -int i = Integer.parseInt('2'); // Declare int variable i and set it to the - // value returned by the static method parseInt ----- - -************************** -Painless describes the Map method arguments using the `def` type: - -[source,Java] ----- -put(def, def) -get(def) ----- - -When you call `x.put(1, 2)`, the key and value are implicitly converted from -the int type to the def type. - -Assume for a minute that the Map method arguments were described as Integers: - -[source,Java] ----- -put(Integer, Integer) -get(Integer) ----- - -In this case, the key and value would implicitly be _boxed_ from the primitive -int type to the Integer reference type. For more information about how Painless -casts between primitive types and reference types, see <>. -************************** - -==== Null Safe - -The null safe operator `?.` can be used in place of the dot operator -to check if a reference type instance is `null` before attempting to access -a field or make a method call against it. When using the null safe operator, -if the instance is `null`, the returned value is `null`. If the reference -type instance is non-null, it returns the value of the field or result of -the method call normally. - -// REVIEWER NOTE: The following paragraph doesn't make sense to me. Do you -All resultant types must be a reference type or be able to be implicitly cast -to a reference type or an error will occur. - -*Grammar:* -[source,ANTLR4] ----- -null_safe: null_safe_field_access - | null_safe_method_call; -null_safe_field_access: ID '?.' ID; -null_safe_method_call: ID '?.' ID '(' (expression (',' expression)*)? ')'; ----- - -*Examples:* -[source,Java] ----- -Map x = new HashMap(); // Declare the Map variable x and set it to a newly - // allocated HashMap -Map y = null; // Declare the Map variable y and set it to null -def z = new HashMap(); // Declares the def variable z and set it to a newly - // allocated HashMap - -x.put(1, 2); // Put the key-value pair 1 and 2 into x -z.put(5, 6); // Put the key-value pair 5 and 6 into z - -def value = x?.get(1); // Declare the def variable value and set it to the - // result of .get(1) since x is not null -value = y?.get(3); // Sets value to null since y is null -value = z?.get(5); // Sets value to the result of .get(5) since z is not null ----- - -==== Parenthesis - -User-defined function calls can be made in Painless using the parenthesis -operator. See Function Calls [MARK] for more information. - -==== Brackets and Braces - -The brackets operator `[]` is used to create and access arrays, lists, and maps. -The braces operator `{}` is used to intialize arrays. - -[[array-initialization]] -===== Creating and Initializing Arrays - -You create and initialize arrays using the brackets `[]` and braces `{}` -operators. Each set of brackets represents a dimension. The values you want to -initialize each dimension with are specified as a comma-separated list enclosed -in braces. For example, `new int[] {1, 2, 3}` creates a one dimensional `int` -array with a size of 3 and the values 1, 2, and 3. - -To allocate an array, you use the `new` keyword followed by the type and a -set of brackets for each dimension. You can explicitly define the size of each dimension by specifying an expression within the brackets, or initialize each -dimension with the desired number of values. The allocated size of each -dimension is its permanent size. - -To initialize an array, specify the values you want to initialize -each dimension with as a comma-separated list of expressions enclosed in braces. -For example, `new int[] {1, 2, 3}` creates a one-dimensional `int` array with a -size of 3 and the values 1, 2, and 3. - -When you initialize an array, the order of the expressions is maintained. Each expression used as part of the initialization is converted to the -array's type. An error occurs if the types do not match. - -*Grammar:* -[source,ANTLR4] ----- -declare_array: TYPE ('[' ']')+; - -array_initialization: 'new' TYPE '[' ']' '{' expression (',' expression) '}' - | 'new' TYPE '[' ']' '{' '}'; ----- - -*Examples:* -[source,Java] ----- -int[] x = new int[5]; // Declare int array x and assign it a newly - // allocated int array with a size of 5 -def[][] y = new def[5][5]; // Declare the 2-dimensional def array y and - // assign it a newly allocated 2-dimensional - // array where both dimensions have a size of 5 -int[] x = new int[] {1, 2, 3}; // Declare int array x and set it to an int - // array with values 1, 2, 3 and a size of 3 -int i = 1; -long l = 2L; -float f = 3.0F; -double d = 4.0; -String s = "5"; -def[] da = new def[] {i, l, f*d, s}; // Declare def array da and set it to - // a def array with a size of 4 and the - // values i, l, f*d, and s ----- - -[[array-access]] -===== Accessing Array Elements - -Elements in an array are stored and accessed using the brackets `[]` operator. -Elements are referenced by an expression enclosed in brackets. An error -occurs if the expression used to reference an element cannot be implicitly -cast to an `int`. - -The range of elements within an array that can be accessed is `[0, size)` where -size is the originally allocated size of the array. To access elements relative -to the last element in an array, you can use a negative numeric value from -`[-size, -1]`. An error occurs if you attempt to reference an element outside -of the array's range. - -*Grammar:* -[source,ANTLR4] ----- -brace_access: '[' expression ']' ----- - -*Examples:* -[source,Java] ----- - -int[] x = new int[2]; // Declare int array x and set it to a newly allocated - // array with a size of 2 -x[0] = 2; // Set the 0th element of array x to 2 -x[1] = 5; // Set the 1st element of array x to 5 -int y = x[0] + x[1]; // Declare the int variable y and set it to the sum - // of the first two elements of array x -int z = 1; // Declare the int variable z and set it to 1 -return x[z]; // Access the 1st element of array x using the - // variable z as an expression and return the value - -def d = new int[2]; // Declare def variable d and set it to a newly - // allocated array with a size of 2 -d[0] = 2; // Set the 0th element of array d to 2 -d[1] = 5; // Set the 1st element of array d to 2 -def y = d[0] + d[1]; // Declare def variable y and set it to the sum - // of the first two elements of array d -def z = 1; // Declare def variable z and set it to 1 -return d[z]; // Access the 1st element of array d using the - // variable z as an expression and return the value ----- - -NOTE: The use of the `def` type in the second example means that the types -cannot be resolved until runtime. - -[[array-length]] -===== Array Length - -Arrays contain a special member known as 'length' that is a read-only value that contains the size of the array. This member can be accessed from an array using the dot operator. - -*Examples:* -[source,Java] ----- -int[] x = new int[10]; // declares an int array variable x and sets it to a newly allocated array with a size of 10 -int l = x.length; // declares and int variable l and sets it to the field length of variable x ----- - -===== Creating and Initializing Lists - -You create and initialize lists using the brackets `[]` operator. The values -you want to initialize the list with are specified as a comma-separated list -of expressions enclosed in brackets. For example, `List l = [1, 2, 3]` creates -a new three item list. Each expression used to initialize the list is converted -a `def` type when the value is inserted into the list. The order of the -expressions is maintained. - -*Grammar:* -[source,ANTLR4] ----- -list_initialization: '[' expression (',' expression)* ']' - | '[' ']'; ----- - -*Examples:* -[source,Java] ----- -List empty = []; // declares the List variable empty and sets it to a newly initialized empty List -List l0 = [1, 2, 3]; // declares the List variable l0 and sets it to a newly initialized List with the values 1, 2, and 3 - -int i = 1; -long l = 2L; -float f = 3.0F; -double d = 4.0; -String s = "5"; -List l1 = [i, l, f*d, s]; // declares the List variable l1 and sets it to a newly initialized List with the values of i, l, and f*d and s ----- - -===== Accessing List Elements - -Elements in a List are stored or accessed using the brackets operator. The format begins with an opening bracket, followed by an expression, and finishes with a closing bracket. Storing elements in a List is equivalent to invoking a List's set method. Accessing elements in a List is equivalent to invoking a List's get method. Using this operator is strictly a shortcut for the previously mentioned methods. The range of elements within a List that can be accessed is [0, size) where size is the number of elements currently in the List. Elements may also be accessed from the last element in a List using a negative numeric value from [-size, -1]. The expression used to determine which element is accessed must be able to be implicitly cast to an int. An error will occur if the expression is outside of the legal range or is not of type int. - -*Grammar:* -[source,ANTLR4] ----- -list_access: '[' expression ']' ----- - -*Examples:* -[source,Java] ----- -List x = new ArrayList(); // declares a List variable x and sets it to a newly allocated ArrayList -x.add(1); // invokes the add method on the variable x and adds the constant int 1 to the List -x.add(2); // invokes the add method on the variable x and adds the constant int 2 to the List -x.add(3); // invokes the add method on the variable x and adds the constant int 3 to the List -x[0] = 2; // sets the 0th element of the variable x to the constant int 2 -x[1] = 5; // sets the 1st element of the variable x to the constant int 2 -int y = x[0] + x[1]; // declares the int variable y and sets it to the sum of the first two elements of the variable x -int z = 1; // declares the int variable z and sets it to the constant int 1 -return x[z]; // accesses the 1st element of the variable x using the variable z as an expression and returns the value - -def d = new ArrayList(); // declares a def variable d and sets it to a newly allocated ArrayList -d.add(1); // invokes the add method on the variable d and adds the constant int 1 to the List -d.add(2); // invokes the add method on the variable d and adds the constant int 2 to the List -d.add(3); // invokes the add method on the variable d and adds the constant int 3 to the List -d[0] = 2; // sets the 0th element of the variable d to the constant int 2 -d[1] = 5; // sets the 1st element of the variable d to the constant int 2 -def y = d[0] + d[1]; // declares the def variable y and sets it to the sum of the first two elements of the variable d -def z = 1; // declares the def variable z and sets it to the constant int 1 -return d[z]; // accesses the 1st element of the variable d using the variable z as an expression and returns the value ----- - -Note in the first example above all types can be resolved at compile-time, while in the second example all types must wait to be resolved until run-time. - -===== Creating and Initializing Maps - -A Map can be created and initialized using the brackets operator. The format begins with a bracket, followed by an arbitrary number of key-value pairs delimited with commas (except the last), and ends with a closing bracket. Each key-value pair is a set of two expressions separate by a colon. If there is only a single colon with no expressions, a new empty Map is created. - -*Grammar:* -[source,ANTLR4] ----- -map_initialization: '[' key_pair (',' key_pair)* ']' - | '[' ':' ']'; -key_pair: expression ':' expression ----- - -Each expression used as part of the initialization is converted to a `def` type -for insertion into the map. - -*Examples:* -[source,Java] ----- -Map empty = [:]; // declares the Map variable empty and sets it to a newly initialized empty Map -Map m0 = [1:2, 3:4, 5:6]; // declares the Map variable m0 and sets it to a newly initialized Map with the keys 1, 3, 5 and values 2, 4, 6, respectively - -byte b = 0; -int i = 1; -long l = 2L; -float f = 3.0F; -double d = 4.0; -String s = "5"; -Map m1 = [b:i, l:f*d, d:s]; // declares the Map variable m1 and sets it to a newly initialized Map with the keys b, l, d and values i, f*d, s, respectively ----- - -===== Accessing Map Elements - -Elements in a Map can be stored or accessed using the brackets operator. The format begins with an opening bracket, followed by an expression, and finishes with a closing bracket. Storing values in a Map is equivalent to invoking a Map's put method. Accessing values in a Map is equivalent to invoking a Map's get method. Using this operator is strictly a shortcut for the previously mentioned methods. Any element from a Map can be stored/accessed where the expression is the key. If a key has no corresponding value when accessing a Map then the value will be null. - -*Grammar:* -[source,ANTLR4] ----- -map_access: '[' expression ']' ----- - -*Examples:* -[source,Java] ----- -Map x = new HashMap(); // declares a Map variable x and sets it to a newly allocated HashMap -x['value2'] = 2; // puts the value of the key constant String value2 of the variable x to the constant int 2 -x['value5'] = 5; // puts the value of the key constant String value5 of the variable x to the constant int 5 -int y = x['value2'] + x['value5']; // declares the int variable y and sets it to the sum of the two values of the variable x -String z = 'value5'; // declares the String variable z and sets it to the constant String value5 -return x[z]; // accesses the value for the key value5 of the variable x using the variable z as an expression and returns the value - -def d = new HashMap(); // declares a def variable d and sets it to a newly allocated HashMap -d['value2'] = 2; // puts the value of the key constant String value2 of the variable d to the constant int 2 -d['value5'] = 5; // puts the value of the key constant String value5 of the variable d to the constant int 5 -int y = d['value2'] + d['value5']; // declares the int variable y and sets it to the sum of the two values of the variable d -String z = 'value5'; // declares the String variable z and sets it to the constant String value5 -return d[z]; // accesses the value for the key value5 of the variable x using the variable z as an expression and returns the value ----- - -Note in the first example above all types can be resolved at compile-time, while in the second example all types must wait to be resolved until run-time. - -==== Post Increment - -A variable/field representing a numerical value can be possibly evaluated as part of an expression, and then increased by 1 for its respective type. The format starts with a variable name followed by a plus and ends with a plus. - -*Grammar:* -[source,ANTLR4] ----- -post_increment: ( variable | member ) '++' ----- - -A numeric promotion may occur during a post-increment followed by a downcast if necessary. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. A downcast may be required after the type promotion to assign the appropriate value back into the variable/field. Non-numeric variables/members will result in an error. - -Promotion Table: - -|==== -|from|to|downcast -|byte|int|byte -|short|int|short -|char|int|char -|int|int| -|long|long| -|float|float| -|double|double| -|def|def| -|==== - -Examples(s): -[source,Java] ----- -int i = 0; // declares the int variable i and sets it to the constant 0 -i++; // increments the int variable i by 1 to a value of 1 -long l = 1; // declares the long variable l and set it the constant 1 -long k; // declares the long variable k -k = l++; // sets the long variable k to the value of l (1), and then increments the long variable l by 1 to a value of 2 ----- - -==== Post Decrement - -A variable/field representing a numerical value can be possibly evaluated as part of an expression, and then increased by 1 for its respective type. The format starts with a variable name followed by a minus and ends with a minus. - -*Grammar:* -[source,ANTLR4] ----- -post_increment: ( variable | member ) '--' ----- - -A numeric promotion may occur during a post-decrement followed by a downcast if necessary. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. A downcast may be required after the type promotion to assign the appropriate value back into the variable/field. Non-numeric variables/members will result in an error. - -Promotion Table: - -|==== -|from|to|downcast -|byte|int|byte -|short|int|short -|char|int|char -|int|int| -|long|long| -|float|float| -|double|double| -|def|def| -|==== - -Examples(s): -[source,Java] ----- -short i = 0; // declares the short variable i and sets it to the constant short 0 -i--; // decrements the short variable i by 1 to a value of -1 (promoted to int and downcast to short) -float l = 1.0f; // declares the float variable l and sets it the constant float 1.0f -float k; // declares the float variable k -k = l--; // sets the float variable k to the value of l (1.0f), and then decrements the float variable l by 1.0 to a value of 0.0 ----- - -==== Pre Increment - -A variable/field representing a numerical value can be increased by 1 for its respective type, and then possibly evaluated as part of an expression. The format starts with a plus followed by a plus and ends with a variable name. - -*Grammar:* -[source,ANTLR4] ----- -pre_increment: '++' ( variable | member ) ----- - -A numeric promotion may occur during a pre-increment followed by a downcast if necessary. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. A downcast may be required after the type promotion to assign the appropriate value back into the variable/field. Non-numeric variables/members will result in an error. - -Promotion Table: - -|==== -|from|to|downcast -|byte|int|byte -|short|int|short -|char|int|char -|int|int| -|long|long| -|float|float| -|double|double| -|def|def| -|==== - -Examples(s): -[source,Java] ----- -int i = 0; // declares the int variable i and sets it to the constant int 0 -++i; // increments the int variable i by 1 to a value of 1 -long l = 1; // declares the long variable l and sets it to the constant long 1 -long k; // declares the long variable k -k = ++l; // increments the long variable l by 1 to a value of 2, and then sets the long variable k to the value of l (2) ----- - -==== Pre Decrement - -A variable/field representing a numerical value can be decreased by 1 for its respective type, and then possibly evaluated as part of an expression. The format starts with a minus followed by a minus and ends with a variable name. - -*Grammar:* -[source,ANTLR4] ----- -pre_decrement: '--' ( variable | member ) ----- - -A numeric promotion may occur during a pre-decrement followed by a downcast if necessary. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. A downcast may be required after the type promotion to assign the appropriate value back into the variable/field. Non-numeric variables/members will result in an error. - -Promotion Table: -|==== -|from|to|downcast -|byte|int|byte -|short|int|short -|char|int|char -|int|int| -|long|long| -|float|float| -|double|double| -|def|def| -|==== - -Examples(s): -[source,Java] ----- -byte i = 1; // declares the byte variable i and sets it to the constant int 1 ---i; // decrements the byte variable i by 1 to a value of 0 (promoted to int and downcast to byte) -double l = 1.0; // declares the double variable l and sets it to the constant double 1.0 -double k; // declares the double variable k -k = --l; // decrements the double variable l by 1.0 to a value of 0.0, and then sets the double variable k to the value of l (0.0) ----- - -==== Unary Positive - -Unary positive gives the identity of a numerical value using the plus operator. In practice this is usually a no-op, but will cause some numeric types to be promoted. Format starts with a plus operator followed by a numerical expression. - -*Grammar:* -[source,ANTLR4] ----- -unary_positive: '+' expression ----- - -A numeric promotion may occur during a unary positive operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|float|float -|double|double -|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = +1; // declares the int variable x and sets it to positive 1 -long y = +x; // declares the long variable y and sets it to positive x (promoted to long from int) -def z = +y; // declares the def variable z and sets it to positive y -byte z = +2; //ERROR: cannot implicitly downcast an int to a byte ----- - -==== Unary Negative - -Unary negative negates a numeric value using the minus operator. Format starts with a minus followed by a numerical expression. - -*Grammar:* -[source,ANTLR4] ----- -unary_negative: '-' expression ----- - -A numeric promotion may occur during a unary negative operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|float|float -|double|double -|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = -1; // declares the int variable x and sets it to negative 1 -long y = -x; // declares the long variable y and sets it to negative x (promoted to long from int) -def z = -y; // declares the def variable z and sets it to negative y -byte z = -2; //ERROR: cannot implicitly downcast an int to a byte ----- - -==== Boolean Not - -Boolean not will flip a boolean value from true to false or false to true using the bang operator. The format is a bang operator followed by an expression. - -*Grammar:* -[source,ANTLR4] ----- -boolean_not: '!' expression; ----- - -Note that def types will be assumed to be of the boolean type. Any def type evaluated at run-time that does not represent a boolean will result in an error. Non-boolean expressions will result in an error. - -*Examples:* -[source,Java] ----- -boolean x = !false; // declares the boolean variable x and sets it to the opposite of the false value -boolean y = !x; // declares the boolean variable y and sets it to the opposite of the boolean variable x -def z = !y; // declares the def variable z and sets it to the opposite of the boolean variable y ----- - -==== Bitwise Not - -Bitwise not will flip each bit of an integer type expression. The format is the tilde operator followed by an expression. - -*Grammar:* -[source,ANTLR4] ----- -bitwise_not: '~' expression; ----- - -A numeric promotion may occur during unary positive operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-integer expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|def|def -|==== - -*Examples:* -[source,Java] ----- -byte x = 1; // declares the byte variable x and sets it to a constant int 1 -int y = ~x; // declares the int variable y and sets it to the negation of x -long z = ~y; // declares the long variable z and sets it the negation of y -def d = ~z; // declares the def variable d and sets it the negation of z -def e; // declares the def variable e -e = ~d; // sets e the negation of d ----- - -==== Cast - -The cast operator can be used to explicitly convert one type to another. See casting [MARK] for more information. - -[[constructor-call]] -==== Constructor Call - -A constructor call is a special type of method call [MARK] used to allocate a reference type instance using the new operator. The format is the new operator followed by a type, an opening parenthesis, arguments if any, and a closing parenthesis. Arguments are a series of zero-to-many expressions delimited by commas. Auto-boxing and auto-unboxing will be applied automatically for arguments passed into a constructor call. See boxing and unboxing [MARK] for more information on this topic. Constructor argument types can always be resolved at run-time; if appropriate type conversions (casting) cannot be applied an error will occur. Once a reference type instance has been allocated, its members may be used as part of other expressions. - -Constructor calls may be overloaded based on arity in Painless. This means the same reference type may have multiple constructors as long as the number of arguments differs for each one. This does have an effect on some of the provided reference type constructors in the Painless API [MARK]. When there are overloaded constructors with the same arity for a reference type in Java a single constructor must be chosen to be provided in Painless. - -*Grammar:* -[source,ANTLR4] ----- -constructor_call: 'new' TYPE '(' (expression (',' expression)*)? ')'; ----- - -*Examples:* -[source,Java] ----- -Map m = new HashMap(); // declares the Map variable m and sets it to a newly allocated HashMap using an empty constructor -m.put(3, 3); // invokes the method call member put and adds the key-value pair of 3 to Map variable m -def d = new ArrayList(); // declares the def variable d and sets it to a newly allocated ArrayList using an empty constructor -def e; // declares the def variable e -e = new HashMap(m); // sets e to a newly allocated HashMap using the constructor with a single argument m ----- - -[[new-array]] -==== New Array - -An array type instance can be allocated using the new operator. The format starts with the new operator followed by the type followed by a series of opening and closing braces each containing an expression for the size of the dimension. - -*Grammar:* -[source,ANTLR4] ----- -new_array: 'new' TYPE ('[' expression ']')+; ----- - -*Examples:* -[source,Java] ----- -int[] x = new int[5]; // declares an int array variable x and sets it to a newly allocated array with a size of 5 -x = new int[10]; // sets the int array variable x to a newly allocated array with a size of 10 -def[][] y = new def[5][5]; // declares a 2-dimensional def array variable y and set it to a newly - // allocated 2-dimensional array where both dimensions have a size of 5 ----- - -==== Multiplication - -Multiplies two numerical expressions. Rules for resultant overflow and NaN values follow the Java specification. The format is an expression, followed by the star operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -multiplication: expression '*' expression; ----- - -A numeric promotion may occur during a multiplication operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric numbers will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = 5*4; // declares the int variable x and sets it to the result of 5 multiplied by 4 -double y = x*7.0; // declares the double variable y and sets it to the result of x multiplied by 7.0 (x is promoted to a double) -def z = x*y; // declares the def variable z and sets it to the result of x multiplied by y (x is promoted to a double) -def a = z*x; // declares the def variable a and sets it to the result of z multiplied by x (x is promoted to def at compile-time and double at run-time) ----- - -==== Division - -Divides two numerical expressions. Rules for NaN values and division by zero follow the Java specification. Integer division will drop the remainder of the resultant value. The format is an expression, followed by the slash operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -division: expression '/' expression; ----- - -A numeric promotion may occur during a division operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = 5/4; // declares the int variable x and sets it to the result of 5 divided by 4 -double y = x/7.0; // declares the double variable y and sets it to the result of x divided by 7.0 (x is promoted to a double) -def z = x/y; // declares the def variable z and sets it to the result of x divided by y (x is promoted to a double) -def a = z/x; // declares the def variable a and sets it to the result of z divided by x (x is promoted to def at compile-time and double at run-time) ----- - -==== Remainder - -Calculates the remainder for division between two numerical expressions. Rules for NaN values and division by zero follow the Java specification. The format is an expression, followed by the percent operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -remainder: expression '%' expression; ----- - -A numeric promotion may occur during a remainder operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = 5%4; // declares the int variable x and sets it to the remainder of 5 divided by 4 -double y = x%7.0; // declares the double variable y and sets it to the remainder of x divided by 7.0 (x is promoted to a double) -def z = x%y; // declares the def variable z and sets it to the remainder of x divided by y (x is promoted to a double) -def a = z%x; // declares the def variable a and sets it to the remainder of z divided by x (x is promoted to def at compile-time and double at run-time) ----- - -==== String Concatenation - -Concatenates two expressions together as a single String where at least of one of the expressions is a String to begin with. The format is an expression, followed by a plus operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -concatenate: expression '+' expression; ----- - -*Examples:* -[source,Java] ----- -String x = "con"; // declares the String variable x and sets it to the String constant "con" -String y = x + "cat"; // declares the String variable y and sets it to the concatenation of the String variable x and the String constant "cat" -String z = 4 + x; // declares the String variable z and sets it to the concatenation of the int constant 4 and the String variable x (4 is implicitly cast to a String) -def d = 2; // declares the def variable d and sets it to the int constant 2 -z = z + d; // sets the String variable z to the concatenation of the String variable z -d = "con" + x + y + "cat"; // sets the def variable d to the concatenation of String constant "con", x, y, and the String constant "cat" ----- - -==== Addition - -Adds two numerical expressions. Rules for resultant overflow and NaN values follow the Java specification. The format is an expression, followed by the plus operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -addition: expression '+' expression; ----- - -A numeric promotion may occur during a addition operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error, except in the case of String which then implies the operation is string concatenation [MARK] rather than addition. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = 5 + 4; // declares the int variable x and sets it to the result of 5 added to 4 -double y = x + 7.0; // declares the double variable y and sets it to the result of x added to 7.0 (x is promoted to a double) -def z = x + y; // declares the def variable z and sets it to the result of x added to y (x is promoted to a double) -def a = z + x; // declares the def variable a and sets it to the result of z added to x (x is promoted to def at compile-time and double at run-time) ----- - -==== Subtraction - -Subtracts two numerical expressions. Rules for resultant overflow and NaN values follow the Java specification. The format is an expression, followed by the minus operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -subtraction: expression '-' expression; ----- - -A numeric promotion may occur during a subtraction operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -int x = 5-4; // declares the int variable x and sets it to the result of 4 subtracted from 5 -double y = x-7.0; // declares the double variable y and sets it to the result of 7.0 subtracted from x (x is promoted to a double) -def z = x-y; // declares the def variable z and sets it to the result of y subtracted from x (x is promoted to a double) -def a = z-x; // declares the def variable a and sets it to the result of x subtracted from z (x is promoted to def at compile-time and double at run-time) ----- - -==== Left Shift - -Shifts lower order bits to higher order bits in the left-side expression by the distance specified in the right-side expression. The format is an expression followed by two left-carrots, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -left_shift: expression '<<' expression; ----- - -A numeric promotion may occur during a left shift operation to the left-side expression. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric and floating point expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|def|def -|==== - -The right-side expression will be explicitly cast to an int value and truncated based on the promoted type of the left-side expression. If the left-side expression is of type int then the lowest order 5-bits will be taken as the distance to shift from the right-side expression (0-31). If the left-side expression is of type long then the lowest order 6-bits will be taken as the distance to shift from the right-side expression (0-63). Non-numeric and floating point expressions will result in an error. - -*Examples:* -[source,Java] ----- -int x = 5 << 4; // declares the int variable x and sets it to the result of 5 left shifted by 4 -long y = x << 7; // declares the long variable y and sets it to the result of x left shifted by 7 (x is promoted to a long) -def z = x << y; // declares the def variable z and sets it to the result of x left shifted by y -def a = z << x; // declares the def variable a and sets it to the result of z left shifted by x ----- - -==== Right Shift - -Shifts higher order bits to lower order bits in the left-side expression by the distance specified in the right-side expression. Right shift will preserve the signed bit (highest order bit) as part of the result. The format is an expression followed by two right-carrots, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -right_shift: expression '>>' expression; ----- - -A numeric promotion may occur during a right shift operation to the left-side expression. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric and floating point expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|def|def -|==== - -The right-side expression will be explicitly cast to an int value and truncated based on the promoted type of the left-side expression. If the left-side expression is of type int then the lowest order 5-bits will be taken as the distance to shift from the right-side expression (0-31). If the left-side expression is of type long then the lowest order 6-bits will be taken as the distance to shift from the right-side expression (0-63). Non-numeric and floating point expressions will result in an error. - -*Examples:* -[source,Java] ----- -int x = 5 >> 4; // declares the int variable x and sets it to the result of 5 right shifted by 4 -long y = x >> 7; // declares the long variable y and sets it to the result of x right shifted by 7 (x is promoted to a long) -def z = x >> y; // declares the def variable z and sets it to the result of x right shifted by y -def a = z >> x; // declares the def variable a and sets it to the result of z right shifted by x ----- - -==== Unsigned Right Shift - -Shifts higher order bits to lower order bits in the left-side expression by the distance specified in the right-side expression. Unsigned right shift will not preserve the signed bit (highest order bit) as part of the result. The format is an expression followed by three right-carrots, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -unsigned_right_shift: expression '>>>' expression; ----- - -A numeric promotion may occur during an unsigned right shift operation to the left-side expression. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric and floating point expressions will result in an error. - -Promotion Table: -|==== -|from|to -|byte|int -|short|int -|char|int -|int|int -|long|long -|def|def -|==== - -The right-side expression will be explicitly cast to an int value and truncated based on the promoted type of the left-side expression. If the left-side expression is of type int then the lowest order 5-bits will be taken as the distance to shift from the right-side expression (0-31). If the left-side expression is of type long then the lowest order 6-bits will be taken as the distance to shift from the right-side expression (0-63). Non-numeric and floating point expressions will result in an error. - -*Examples:* -[source,Java] ----- -int x = 5 >> 4; // declares the int variable x and sets it to the result of 5 unsigned right shifted by 4 -long y = x >> 7; // declares the long variable y and sets it to the result of x unsigned right shifted by 7 (x is promoted to a long) -def z = x >> y; // declares the def variable z and sets it to the result of x unsigned right shifted by y -def a = z >> x; // declares the def variable a and sets it to the result of z unsigned right shifted by x ----- - -==== Greater Than - -Greater than compares two numerical expressions where a resultant boolean value will be true if the left-side expression is a larger value than the right-side expression otherwise false. The format is an expression, followed by the right angle operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -greater_than: expression '>' expression; ----- - -A numeric promotion may occur during a greater than operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean x = 5 > 4; // declares the int variable x and sets it to the result of 5 greater than 4 -double y = 7.0; // declares the double variable y and sets it to the double constant 7.0 -def z = y > 6.5; // declares the def variable z and sets it to the result of y greater than 6.5 -def a = y > x; // declares the def variable a and sets it to the result of y greater than z (x is promoted to double at compile-time) ----- - -==== Greater Than Or Equal - -Greater than or equal compares two numerical expressions where a resultant boolean value will be true if the left-side expression is a larger value than or equal to the right-side expression otherwise false. The format is an expression, followed by the right angle and equals operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -greater_than_or_equal: expression '>=' expression; ----- - -A numeric promotion may occur during a greater than or equal operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean x = 5 >= 4; // declares the int variable x and sets it to the result of 5 greater than or equal to 4 -double y = 7.0; // declares the double variable y and sets it to the double constant 7.0 -def z = y >= 6.5; // declares the def variable z and sets it to the result of y greater than or equal to 6.5 -def a = y >= x; // declares the def variable a and sets it to the result of y greater than or equal to z (x is promoted to double at compile-time) ----- - -==== Less Than - -Less than compares two numerical expressions where a resultant boolean value will be true if the left-side expression is a smaller value than the right-side expression otherwise false. The format is an expression, followed by the left angle operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -less_than: expression '<' expression; ----- - -A numeric promotion may occur during a less than operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean x = 5 < 4; // declares the int variable x and sets it to the result of 5 less than 4 -double y = 7.0; // declares the double variable y and sets it to the double constant 7.0 -def z = y < 6.5; // declares the def variable z and sets it to the result of y less than 6.5 -def a = y < x; // declares the def variable a and sets it to the result of y less than z (x is promoted to double at compile-time) ----- - -==== Less Than Or Equal - -Less than or equal compares two numerical expressions where a resultant boolean value will be true if the left-side expression is a larger value than or equal to the right-side expression otherwise false. The format is an expression, followed by the left angle and equals operator, and a closing expression. - -*Grammar:* -[source,ANTLR4] ----- -less_than_or_equal: expression '<=' expression; ----- - -A numeric promotion may occur during a less than or equal operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-numeric expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean x = 5 <= 4; // declares the int variable x and sets it to the result of 5 less than or equal to 4 -double y = 7.0; // declares the double variable y and sets it to the double constant 7.0 -def z = y <= 6.5; // declares the def variable z and sets it to the result of y less than or equal to 6.5 -def a = y <= x; // declares the def variable a and sets it to the result of y less than or equal to z (x is promoted to double at compile-time) ----- - -==== Instance Of - -The instanceof operator can be used to compare a variable's type to a specified reference type where a resultant boolean value is true if the variable type is the same as or a descendant of the specified reference type and false otherwise. The format is an id, followed by the instanceof operator, and finished with a type. - -*Grammar:* -[source,ANTLR4] ----- -instance_of: ID 'instanceof' TYPE; ----- - -*Examples:* -[source,Java] ----- -Map x = new HashMap(); // declares the Map variable x and sets it to a newly allocated HashMap -List y = new ArrayList(); // declares the List variable y and sets it to a newly allocated ArrayList -def z = y; // declares the def variable z and sets it to y -boolean a = x instanceof HashMap; // declares the boolean variable a and sets it to true since x's type is the same type as HashMap -boolean b = y instanceof Map; // declares the boolean variable b and sets it to false since y's type is not the same type as Map or a descendant of Map -boolean c = z instanceof List; // declares the boolean variable c and sets it to true since z's type is a descendant of the type List ----- - -==== Equality Equals - -Equality equals compares two expressions where a resultant boolean value is true if the two expressions are equal and false otherwise. When reference types are compared using this operator the equivalent of the equals member method will be called against the first expression, where the second expression is the argument. Though the equals member method is used for reference types, this operation will always be null-safe. Valid comparisons are between boolean types, primitive numeric types, and reference types. If a comparison is made that is not listed as one of the valid comparisons an error will occur. The format is an expression, followed by the equals-equals operator, and finished with an expression. - -*Grammar:* -[source,ANTLR4] ----- -equality_equals: expression '==' expression; ----- - -A numeric type promotion may occur during a primitive numeric comparison. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean b0 = true; // declares the boolean variable b0 and sets it the constant boolean true -boolean b1 = false; // declares the boolean variable b1 and sets it the constant boolean false -int i = 2; // declares the int variable i and sets it the constant int 2 -float f = 2.0f; // declares the float variable f and sets it the constant float 2.0 -List l0 = new ArrayList(); // declares the List variable l0 and sets it to a newly allocated ArrayList -ArrayList l1 = new ArrayList(); // declares the ArrayList variable l1 and sets it to a newly allocated ArrayList -def di0 = 2; // declares the def variable di0 and sets it the constant int 2 -def di1 = 3; // declares the def variable di1 and sets it the constant int 3 -def dl = new ArrayList(); // declares the def variable dl and sets it to a newly allocated ArrayList -boolean result; // declares the boolean variable result - -result = b0 == b1; // compares b0 to b1 and has a boolean result of false -result = i == f; // compares i to f where i is promoted to float and has a boolean result of true -result = b0 == i; // ERROR: a comparison between a boolean and a primitive numeric type is illegal -result = i == l0; // ERROR: a comparison between a primitive numeric type and a reference type is illegal - -l0.add(1); // adds a constant int 1 to the List l0 -l1.add(1); // adds a constant int 1 to the ArrayList l1 -result = l0 == l1; // compares l0 to l1 using l0.equals(l1) and has a boolean result of true -l0.add(1); // adds a constant int 1 to the List l0 -result = l0 == l1; // compares l0 to l1 using l0.equals(l1) and has a boolean result of false - -result = di0 == di1; // compares di0 to di1 and has a boolean result of false -result = di0 == i; // compares di0 to i where i is promoted to def and has a boolean result of true - -dl.add(1); // adds a constant int 1 to the def ArrayList dl -result = dl == l0; // compares dl to l0 using dl.equals(l0) with a boolean result of true - -result = null == dl; // compares null to dl with a boolean result of false -result = l1 == null; // compares l1 to null with a boolean result of false ----- - -==== Equality Not Equals - -Equality not equals compares two expressions where a resultant boolean value is true if the two expressions are not equal and false otherwise. When reference types are compared using this operator the equivalent of the equals member method will be called against the first expression, where the second expression is the argument, with the resultant boolean being reversed. Though the equals member method is used for reference types, this operation will always be null-safe. Valid comparisons are between boolean types, primitive numeric types, and reference types. If a comparison is made that is not listed as one of the valid comparisons an error will occur. The format is an expression, followed by the bang-equals operator, and finished with an expression. - -*Grammar:* -[source,ANTLR4] ----- -equality_not_equals: expression '!=' expression; ----- - -A numeric type promotion may occur during a primitive numeric comparison. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean b0 = true; // declares the boolean variable b0 and sets it the constant boolean true -boolean b1 = false; // declares the boolean variable b1 and sets it the constant boolean false -int i = 2; // declares the int variable i and sets it the constant int 2 -float f = 2.0f; // declares the float variable f and sets it the constant float 2.0 -List l0 = new ArrayList(); // declares the List variable l0 and sets it to a newly allocated ArrayList -ArrayList l1 = new ArrayList(); // declares the ArrayList variable l1 and sets it to a newly allocated ArrayList -def di0 = 2; // declares the def variable di0 and sets it the constant int 2 -def di1 = 3; // declares the def variable di1 and sets it the constant int 3 -def dl = new ArrayList(); // declares the def variable dl and sets it to a newly allocated ArrayList -boolean result; // declares the boolean variable result - -result = b0 != b1; // compares b0 to b1 and has a boolean result of true -result = i != f; // compares i to f where i is promoted to float and has a boolean result of false -result = b0 != i; // ERROR: a comparison between a boolean and a primitive numeric type is illegal -result = i != l0; // ERROR: a comparison between a primitive numeric type and a reference type is illegal - -l0.add(1); // adds a constant int 1 to the List l0 -l1.add(1); // adds a constant int 1 to the ArrayList l1 -result = l0 != l1; // compares l0 to l1 using l0.equals(l1) and has a boolean result of false -l0.add(1); // adds a constant int 1 to the List l0 -result = l0 != l1; // compares l0 to l1 using l0.equals(l1) and has a boolean result of true - -result = di0 != di1; // compares di0 to di1 and has a boolean result of true -result = di0 != i; // compares di0 to i where i is promoted to def and has a boolean result of false - -dl.add(1); // adds a constant int 1 to the def ArrayList dl -result = dl != l0; // compares dl to l0 using dl.equals(l0) with a boolean result of false - -result = null != dl; // compares null to dl with a boolean result of true -result = l1 != null; // compares null to l1 with a boolean result of true ----- - -==== Identity Equals - -Identity equals compares two expressions where a resultant boolean value is true if the two expressions are equal and false otherwise. Two primitive types are considered to be equal if they have the same value. Two reference types are considered to be equal if they refer to the exact same instance in memory or are both null. Valid comparisons are between boolean types, primitive numeric types, and reference types. If a comparison is made that is not listed as one of the valid comparisons an error will occur. The format is an expression, followed by the equals-equals-equals operator, and finished with an expression. - -*Grammar:* -[source,ANTLR4] ----- -identity_equals: expression '===' expression; ----- - -A numeric type promotion may occur during a primitive numeric comparison. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean b0 = true; // declares the boolean variable b0 and sets it the constant boolean true -boolean b1 = false; // declares the boolean variable b1 and sets it the constant boolean false -int i = 2; // declares the int variable i and sets it the constant int 2 -float f = 2.0f; // declares the float variable f and sets it the constant float 2.0 -List l0 = new ArrayList(); // declares the List variable l0 and sets it to a newly allocated ArrayList -ArrayList l1 = new ArrayList(); // declares the ArrayList variable l1 and sets it to a newly allocated ArrayList -List l2 = l1; // declares the List variable l2 and sets it to l1 -def di0 = 2; // declares the def variable di0 and sets it the constant int 2 -def di1 = 3; // declares the def variable di1 and sets it the constant int 3 -def dl = l0; // declares the def variable dl and sets it to l0 -boolean result; // declares the boolean variable result - -result = b0 === b1; // compares b0 to b1 and has a boolean result of false -result = i === f; // compares i to f where i is promoted to float and has a boolean result of true -result = b0 === i; // ERROR: a comparison between a boolean and a primitive numeric type is illegal -result = i === l0; // ERROR: a comparison between a primitive numeric type and a reference type is illegal - -l0.add(1); // adds a constant int 1 to the List l0 -l1.add(1); // adds a constant int 1 to the ArrayList l1 -result = l0 === l1; // compares l0 to l1 and has a boolean result of false -l0.add(1); // adds a constant int 1 to the List l0 -result = l0 === l1; // compares l0 to l1 and has a boolean result of false -result = l1 === l2; // compares l1 to l2 and has a boolean result of true - -result = di0 === di1; // compares di0 to di1 and has a boolean result of false -result = di0 === i; // compares di0 to i where i is promoted to def and has a boolean result of true - -result = dl === l0; // compares dl to l0 with a boolean result of true - -result = null === dl; // compares null to dl with a boolean result of false -result = l1 === null; // compares null to l1 with a boolean result of false ----- - -==== Identity Not Equals - -Identity not equals compares two expressions where a resultant boolean value is true if the two expressions are not equal and false otherwise. Two primitive types are considered to be not equal if they have different values. Two reference types are considered to be not equal if they refer to the different instances in memory or one is null and the other is not. Valid comparisons are between boolean types, primitive numeric types, and reference types. If a comparison is made that is not listed as one of the valid comparisons an error will occur. The format is an expression, followed by the bang-equals-equals operator, and finished with an expression. - -*Grammar:* -[source,ANTLR4] ----- -identity_not_equals: expression '!==' expression; ----- - -A numeric type promotion may occur during a primitive numeric comparison. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean b0 = true; // declares the boolean variable b0 and sets it the constant boolean true -boolean b1 = false; // declares the boolean variable b1 and sets it the constant boolean false -int i = 2; // declares the int variable i and sets it the constant int 2 -float f = 2.0f; // declares the float variable f and sets it the constant float 2.0 -List l0 = new ArrayList(); // declares the List variable l0 and sets it to a newly allocated ArrayList -ArrayList l1 = new ArrayList(); // declares the ArrayList variable l1 and sets it to a newly allocated ArrayList -List l2 = l1; // declares the List variable l2 and sets it to l1 -def di0 = 2; // declares the def variable di0 and sets it the constant int 2 -def di1 = 3; // declares the def variable di1 and sets it the constant int 3 -def dl = l0; // declares the def variable dl and sets it to l0 -boolean result; // declares the boolean variable result - -result = b0 !== b1; // compares b0 to b1 and has a boolean result of true -result = i !== f; // compares i to f where i is promoted to float and has a boolean result of false -result = b0 !== i; // ERROR: a comparison between a boolean and a primitive numeric type is illegal -result = i !== l0; // ERROR: a comparison between a primitive numeric type and a reference type is illegal - -l0.add(1); // adds a constant int 1 to the List l0 -l1.add(1); // adds a constant int 1 to the ArrayList l1 -result = l0 !== l1; // compares l0 to l1 and has a boolean result of true -l0.add(1); // adds a constant int 1 to the List l0 -result = l0 !== l1; // compares l0 to l1 and has a boolean result of true -result = l1 !== l2; // compares l1 to l2 and has a boolean result of false - -result = di0 !== di1; // compares di0 to di1 and has a boolean result of true -result = di0 !== i; // compares di0 to i where i is promoted to def and has a boolean result of false - -result = dl !== l0; // compares dl to l0 with a boolean result of false - -result = null !== dl; // compares null to dl with a boolean result of true -result = l1 !== null; // compares null to l1 with a boolean result of true ----- - -==== Bitwise And - -Bitwise and will and together two integer type expressions. The table below shows what each resultant bit will in the resultant integer type value be based on the corresponding bit in each integer type expression. - -|==== -||1|0 -|1|1|0 -|0|0|0 -|==== - -The format starts with an expression, follows with the ampersand operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -bitwise_and: expression '&' expression; ----- - -A numeric promotion may occur during a bitwise and operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-integer expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|def -|byte|int|int|int|int|long|def -|short|int|int|int|int|long|def -|char|int|int|int|int|long|def -|int|int|int|int|int|long|def -|long|long|long|long|long|long|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -byte x = 16; // declares the byte variable x and sets it to a constant int 1 -int y = x & 4; // declares the int variable y and sets it to the result of x and 4 -long z = y & x; // declares the long variable z and sets it the result of y and x -def d = z & 2; // declares the def variable d and sets it the result of z and 2 -def e; // declares the def variable e -e = d & z; // sets e to the result of d and z ----- - -==== Boolean Xor - -Boolean xor will xor together two boolean expressions. The table below shows what the resultant boolean value will be based on the two boolean expressions. - -|==== -||true|false -|true|false|true -|false|true|false -|==== - -The format starts with an expression, follows with the carrot operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -boolean_xor: expression '^' expression; ----- - -Note that def types will be assumed to be of the boolean type. Any def type evaluated at run-time that does not represent a boolean will result in an error. Non-boolean expressions will result in an error. - -*Examples:* -[source,Java] ----- -boolean x = false; // declares the boolean variable x and sets the constant boolean false -boolean y = x ^ true; // declares the boolean variable y and sets it the result of x xor true -def z = y ^ x; // declares the def variable z and sets it to the result of y xor x ----- - -==== Bitwise Xor - -Bitwise xor will xor together two integer type expressions. The table below shows what each resultant bit will in the resultant integer type value be based on the corresponding bit in each integer type expression. - -|==== -||1|0 -|1|0|1 -|0|1|0 -|==== - -The format starts with an expression, follows with the carrot operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -bitwise_xor: expression '^' expression; ----- - -A numeric promotion may occur during a bitwise xor operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-integer expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|def -|byte|int|int|int|int|long|def -|short|int|int|int|int|long|def -|char|int|int|int|int|long|def -|int|int|int|int|int|long|def -|long|long|long|long|long|long|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -byte x = 16; // declares the byte variable x and sets it to a constant int 1 -int y = x ^ 4; // declares the int variable y and sets it to the result of x xor 4 -long z = y ^ x; // declares the long variable z and sets it the result of y xor x -def d = z ^ 2; // declares the def variable d and sets it the result of z xor 2 -def e; // declares the def variable e -e = d ^ z; // sets e to the result of d xor z ----- - -==== Bitwise Or - -Bitwise or will or together two integer type expressions. The table below shows what each resultant bit will in the resultant integer type value be based on the corresponding bit in each integer type expression. - -|==== -||1|0 -|1|1|1 -|0|1|0 -|==== - -The format starts with an expression, follows with the pipe operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -bitwise_or: expression '|' expression; ----- - -A numeric promotion may occur during a bitwise xor operation. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. Non-integer expressions will result in an error. - -Promotion Table: -|==== -||byte|short|char|int|long|def -|byte|int|int|int|int|long|def -|short|int|int|int|int|long|def -|char|int|int|int|int|long|def -|int|int|int|int|int|long|def -|long|long|long|long|long|long|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -byte x = 16; // declares the byte variable x and sets it to a constant int 1 -int y = x | 4; // declares the int variable y and sets it to the result of x or 4 -long z = y | x; // declares the long variable z and sets it the result of y or x -def d = z | 2; // declares the def variable d and sets it the result of z or 2 -def e; // declares the def variable e -e = d | z; // sets e to the result of d or z ----- - -==== Boolean And - -Boolean and will and together two boolean expressions. If the first expression is found to be false then it is known that the result will also be false, so evaluation of the second expression will be skipped. The table below shows what the resultant boolean value will be based on the two boolean expressions. - -||true|false -|true|true|false -|false|false|false - -The format starts with an expression, follows with the ampersand-ampersand operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -boolean_and: expression '&&' expression; ----- - -Note that def types will be assumed to be of the boolean type. Any def type evaluated at run-time that does not represent a boolean will result in an error. Non-boolean expressions will result in an error. - -*Examples:* -[source,Java] ----- -boolean x = false; // declares the boolean variable x and sets the constant boolean false -boolean y = x && true; // declares the boolean variable y and sets it the result of x and true -def z = y && x; // declares the def variable z and sets it to the result of y and x ----- - -==== Boolean Or - -Boolean or will or together two boolean expressions. If the first expression is found to be true then it is known that the result will also be true, so evaluation of the second expression will be skipped. The table below shows what the resultant boolean value will be based on the two boolean expressions. - -|==== -||true|false -|true|true|true -|false|true|false -|==== - -The format starts with an expression, follows with the pipe-pipe operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -boolean_and: expression '||' expression; ----- - -Note that def types will be assumed to be of the boolean type. Any def type evaluated at run-time that does not represent a boolean will result in an error. Non-boolean expressions will result in an error. - -*Examples:* -[source,Java] ----- -boolean x = false; // declares the boolean variable x and sets the constant boolean false -boolean y = x || true; // declares the boolean variable y and sets it the result of x or true -def z = y || x; // declares the def variable z and sets it to the result of y or x ----- - -==== Conditional - -A conditional operation consists of three expressions. The first expression is evaluated with an expected boolean result type. If the first expression evaluates to true then the second expression will be evaluated. If the first expression evaluates to false then the third expression will be evaluated. This can be used as a shortcut many different operations without requiring a full if/else branch. Errors will occur if the first expression does not evaluate to a boolean type or if one of the second or third expression cannot be converted to a type appropriate for the expected result. The format is an expression followed by a question-mark operator, another expression, a colon operator, and finishes with a final expression. - -*Grammar:* -[source,ANTLR4] ----- -conditional: expression '?' expression ':' expression; ----- - -A numeric type promotion may occur during the evaluation of a conditional with the second and third expressions if the expected result is a numeric type. A def type evaluated at run-time will follow the same promotion table at run-time following whatever type def represents. - -Promotion Table: -|==== -||byte|short|char|int|long|float|double|def -|byte|int|int|int|int|long|float|double|def -|short|int|int|int|int|long|float|double|def -|char|int|int|int|int|long|float|double|def -|int|int|int|int|int|long|float|double|def -|long|long|long|long|long|long|float|double|def -|float|float|float|float|float|float|float|double|def -|double|double|double|double|double|double|double|double|def -|def|def|def|def|def|def|def|def|def -|==== - -*Examples:* -[source,Java] ----- -boolean b = true; // declares the boolean variable b and sets it the constant boolean true - -int x = b ? 1 : 2; // declares the int variable x and sets it to the int constant 1 - // since the first expression of the conditional evaluates to true - // so the second expression is evaluated for a result - -List y = x > 1 ? new ArrayList() : null; // declares the List variable y and sets it to null - // since the first expression of the conditional evaluates to false - // so the third expression is evaluated for a result - -def z = x < 2 ? true : false; // declares the def variable z and sets it to the boolean constant true - // since the first expression of the conditional evaluates to true - // so the second expression is evaluated for a result ----- - -==== Elvis - -The elvis operator consists of two expressions. If the first expression is a non-null value then the resultant value will be the evaluated first expression otherwise the resultant value will be the evaluated second expression. This is typically used as a shortcut for a null check in a conditional. An error will occur if the expected result is a primitive type. The format is an expression, followed by the question-mark-colon operator, and finishes with an expression. - -*Grammar:* -[source,ANTLR4] ----- -elvis: expression '?:' expression; ----- - -*Examples:* -[source,Java] ----- -List l = new ArrayList(); // declares the List variable l and sets it to a newly allocated ArrayList -List y = l ?: new ArrayList(); // declares the List variable y and sets it to l since l is not null -y = null; // sets y to null -def z = y ?: new HashMap(); // declares the def variable z and sets it to a newly allocated HashMap since y is null ----- - -==== Assignment - -Assignment can be used to assign a value to a variable. See Variable Assignment [MARK] for more information. - -==== Compound Assignment - -Compound assignment can be used as a shortcut for an assignment where a binary operation would occur between the variable/field as the left-side expression and a separate right-side expression. The variable/field and right-side expression must be of appropriate types for the specific operation or an error will occur. A downcast may be necessary for certain operations to be able to assign the result back into the variable/field and will happen implicitly. The format is a variable/field, followed by one of the compound assignment operators, finished with an expression. - -*Grammar:* -[source,ANTLR4] ----- -compund_assignment: ID (. ID)? '$=' expression; // $ is a placeholder for the operation symbol ----- - -A compound assignment is equivalent to the expression below where V is the variable/field and T is the type of variable/member. - -[source,Java] ----- -V = (T)(V op expression); ----- - -The table below shows all available operators for compound assignment. All operators follow any casting/promotion rules according to their regular definition. - -|==== -|Operator|Compound Symbol -|Multiplication|*= -|Division|/= -|Remainder|%= -|String Concatenation|+= -|Addition|+= -|Subtraction|-= -|Left Shift|<<= -|Right Shift|>>= -|Unsigned Right Shift|>>>= -|Bitwise And|&= -|Boolean And|&= -|Bitwise Xor|^= -|Boolean Xor|^= -|Bitwise Or|\|= -|Boolean Or|\|= -|==== - -*Examples:* -[source,Java] ----- -int i = 10; // declares the variable i and sets it to constant int 10 -i *= 2; // multiplies i by 2 -- i = (int)(i * 2) -i /= 5; // divides i by 5 -- i = (int)(i / 5) -i %= 3; // gives the remainder for i/3 -- i = (int)(i % 3) -i += 5; // adds 5 to i -- i = (int)(i + 5) -i -= 5; // subtracts 5 from i -- i = (int)(i - 5) -i <<= 2; // left shifts i by 2 -- i = (int)(i << 2) -i >>= 1; // right shifts i by 1 -- i = (int)(i >> 1) -i >>>= 1; // unsigned right shifts i by 1 -- i = (int)(i >>> 1) -i &= 15; // ands i with 15 -- i = (int)(i & 15) -i ^= 12; // xors i with 12 -- i = (int)(i ^ 2) -i |= 4; // ors i with 4 -- i = (int)(i | 4) - -boolean b = true; // declares the boolean variable b and sets it to the constant boolean true -b &= false; // ands b with false -- b = (boolean)(b & false) -b ^= false; // xors b with false -- b = (boolean)(b & false) -b |= true; // ors be with true -- b = (boolean)(b & false) - -def x = 'compound'; // declares the def variable x and sets it to the constant String 'compound' -x += ' assignment'; // string concatenates ' assignment' to x -- x = (String)(x + ' assignment') ----- diff --git a/docs/painless/painless-regexes.asciidoc b/docs/painless/painless-regexes.asciidoc new file mode 100644 index 00000000000..b4434208ab0 --- /dev/null +++ b/docs/painless/painless-regexes.asciidoc @@ -0,0 +1,33 @@ +[[painless-regexes]] +=== Regexes + +Regular expression constants are directly supported. To ensure fast performance, +this is the only mechanism for creating patterns. Regular expressions +are always constants and compiled efficiently a single time. + +[source,painless] +--------------------------------------------------------- +Pattern p = /[aeiou]/ +--------------------------------------------------------- + +[[pattern-flags]] +==== Pattern flags + +You can define flags on patterns in Painless by adding characters after the +trailing `/` like `/foo/i` or `/foo \w #comment/iUx`. Painless exposes all of +the flags from Java's +https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html[ +Pattern class] using these characters: + +[cols="<,<,<",options="header",] +|======================================================================= +| Character | Java Constant | Example +|`c` | CANON_EQ | `'å' ==~ /å/c` (open in hex editor to see) +|`i` | CASE_INSENSITIVE | `'A' ==~ /a/i` +|`l` | LITERAL | `'[a]' ==~ /[a]/l` +|`m` | MULTILINE | `'a\nb\nc' =~ /^b$/m` +|`s` | DOTALL (aka single line) | `'a\nb\nc' =~ /.b./s` +|`U` | UNICODE_CHARACTER_CLASS | `'Ɛ' ==~ /\\w/U` +|`u` | UNICODE_CASE | `'Ɛ' ==~ /ɛ/iu` +|`x` | COMMENTS (aka extended) | `'a' ==~ /a #comment/x` +|======================================================================= \ No newline at end of file diff --git a/docs/painless/painless-scripts.asciidoc b/docs/painless/painless-scripts.asciidoc new file mode 100644 index 00000000000..87e5b601590 --- /dev/null +++ b/docs/painless/painless-scripts.asciidoc @@ -0,0 +1,6 @@ +[[painless-scripts]] +=== Scripts + +Scripts are composed of one-to-many <> and are +run in a sandbox that determines what local variables are immediately available +along with what APIs are whitelisted for use. \ No newline at end of file diff --git a/docs/painless/painless-statements.asciidoc b/docs/painless/painless-statements.asciidoc new file mode 100644 index 00000000000..3bc4513baa7 --- /dev/null +++ b/docs/painless/painless-statements.asciidoc @@ -0,0 +1,14 @@ +[[painless-statements]] +=== Statements + +Painless supports all of Java's https://docs.oracle.com/javase/tutorial/java/nutsandbolts/flow.html[ +control flow statements] except the `switch` statement. + +Painless also supports the `for in` syntax from Groovy: + +[source,painless] +--------------------------------------------------------- +for (item : list) { + ... +} +--------------------------------------------------------- \ No newline at end of file diff --git a/docs/painless/painless-types.asciidoc b/docs/painless/painless-types.asciidoc index a897b8e8a04..65ae9b3f703 100644 --- a/docs/painless/painless-types.asciidoc +++ b/docs/painless/painless-types.asciidoc @@ -12,16 +12,16 @@ belongs to one of the following categories: <>, A primitive type represents basic data built natively into the JVM and is allocated to non-heap memory. Declare a primitive type -<>, and assign it a primitive type value for -evaluation during later operations. The default value for a newly-declared -primitive type variable is listed as part of the definitions below. A primitive -type value is copied during an assignment or as an argument for a -method/function call. +<> or access a primitive type member field (from +a reference type instance), and assign it a primitive type value for evaluation +during later operations. The default value for a newly-declared primitive type +variable is listed as part of the definitions below. A primitive type value is +copied during an assignment or as an argument for a method/function call. A primitive type has a corresponding reference type (also known as a boxed -type). Use the <> or -<> on a primitive type value to force -evaluation as its corresponding reference type value. +type). Use the <> or +<> on a primitive type value to +force evaluation as its corresponding reference type value. The following primitive types are available: @@ -83,11 +83,11 @@ logical quantity with two possible values of `true` and `false` ---- + <1> declare `int i`; - assign `int 1` to `i` + store `int 1` to `i` <2> declare `double d`; - assign default `double 0.0` to `d` + store default `double 0.0` to `d` <3> declare `boolean b`; - assign `boolean true` to `b` + store `boolean true` to `b` + * Method call on a primitive type using the corresponding reference type. + @@ -98,8 +98,8 @@ logical quantity with two possible values of `true` and `false` ---- + <1> declare `int i`; - assign `int 1` to `i` -<2> access `i` -> `int 1`; + store `int 1` to `i` +<2> load from `i` -> `int 1`; box `int 1` -> `Integer 1 reference`; call `toString` on `Integer 1 reference` -> `String '1'` @@ -113,7 +113,7 @@ multiple pieces of data (member fields) and logic to manipulate that data A reference type instance is a single set of data for one reference type object allocated to the heap. Use the -<> to allocate a reference type +<> to allocate a reference type instance. Use a reference type instance to load from, store to, and manipulate complex data. @@ -122,10 +122,11 @@ reference type values may refer to the same reference type instance. A change to a reference type instance will affect all reference type values referring to that specific instance. -Declare a reference type <>, and assign it a -reference type value for evaluation during later operations. The default value -for a newly-declared reference type variable is `null`. A reference type value -is shallow-copied during an assignment or as an argument for a method/function +Declare a reference type <> or access a reference +type member field (from a reference type instance), and assign it a reference +type value for evaluation during later operations. The default value for a +newly-declared reference type variable is `null`. A reference type value is +shallow-copied during an assignment or as an argument for a method/function call. Assign `null` to a reference type variable to indicate the reference type value refers to no reference type instance. The JVM will garbage collect a reference type instance when it is no longer referred to by any reference type @@ -138,8 +139,8 @@ static member field:: A static member field is a named and typed piece of data. Each reference type *object* contains one set of data representative of its static member fields. -Use the <> in correspondence with the -reference type object name to access a static member field for loading and +Use the <> in correspondence with +the reference type object name to access a static member field for loading and storing to a specific reference type *object*. No reference type instance allocation is necessary to use a static member field. @@ -148,32 +149,34 @@ non-static member field:: A non-static member field is a named and typed piece of data. Each reference type *instance* contains one set of data representative of its reference type object's non-static member fields. Use the -<> for loading and storing to a non-static -member field of a specific reference type *instance*. An allocated reference -type instance is required to use a non-static member field. +<> for loading and storing to a +non-static member field of a specific reference type *instance*. An allocated +reference type instance is required to use a non-static member field. static member method:: -A static member method is a function called on a reference type *object*. Use -the <> in correspondence with the reference -type object name to call a static member method. No reference type instance -allocation is necessary to use a static member method. +A static member method is a <> called on a +reference type *object*. Use the <> +in correspondence with the reference type object name to call a static member +method. No reference type instance allocation is necessary to use a static +member method. non-static member method:: -A non-static member method is a function called on a reference type *instance*. -A non-static member method called on a reference type instance can load from and -store to non-static member fields of that specific reference type instance. Use -the <> in correspondence with a specific -reference type instance to call a non-static member method. An allocated -reference type instance is required to use a non-static member method. +A non-static member method is a <> called on a +reference type *instance*. A non-static member method called on a reference type +instance can load from and store to non-static member fields of that specific +reference type instance. Use the <> +in correspondence with a specific reference type instance to call a non-static +member method. An allocated reference type instance is required to use a +non-static member method. constructor:: -A constructor is a special type of function used to allocate a reference type -*instance* defined by a specific reference type *object*. Use the -<> to allocate a reference type -instance. +A constructor is a special type of <> used to +allocate a reference type *instance* defined by a specific reference type +*object*. Use the <> to allocate +a reference type instance. A reference type object follows a basic inheritance model. Consider types A and B. Type A is considered to be a parent of B, and B a child of A, if B inherits @@ -198,16 +201,16 @@ relationships. <1> declare `List l`; allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `l` -<2> access `l` -> `List reference`; + store `List reference` to `l` +<2> load from `l` -> `List reference`; implicit cast `int 1` to `def` -> `def` call `add` on `List reference` with arguments (`def`) <3> declare `int i`; - access `l` -> `List reference`; + load from `l` -> `List reference`; call `get` on `List reference` with arguments (`int 0`) -> `def`; implicit cast `def` to `int 1` -> `int 1`; add `int 1` and `int 2` -> `int 3`; - assign `int 3` to `i` + store `int 3` to `i` + * Sharing a reference type instance. + @@ -223,26 +226,26 @@ relationships. <1> declare `List l0`; allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `l0` + store `List reference` to `l0` <2> declare `List l1`; - access `l0` -> `List reference`; - assign `List reference` to `l1` + load from `l0` -> `List reference`; + store `List reference` to `l1` (note `l0` and `l1` refer to the same instance known as a shallow-copy) -<3> access `l0` -> `List reference`; +<3> load from `l0` -> `List reference`; implicit cast `int 1` to `def` -> `def` call `add` on `List reference` with arguments (`def`) -<4> access `l1` -> `List reference`; +<4> load from `l1` -> `List reference`; implicit cast `int 2` to `def` -> `def` call `add` on `List reference` with arguments (`def`) <5> declare `int i`; - access `l0` -> `List reference`; + load from `l0` -> `List reference`; call `get` on `List reference` with arguments (`int 0`) -> `def @0`; implicit cast `def @0` to `int 1` -> `int 1`; - access `l1` -> `List reference`; + load from `l1` -> `List reference`; call `get` on `List reference` with arguments (`int 1`) -> `def @1`; implicit cast `def @1` to `int 2` -> `int 2`; add `int 1` and `int 2` -> `int 3`; - assign `int 3` to `i`; + store `int 3` to `i`; + * Using the static members of a reference type. + @@ -253,11 +256,11 @@ relationships. ---- + <1> declare `int i`; - access `MAX_VALUE` on `Integer` -> `int 2147483647`; - assign `int 2147483647` to `i` + load from `MAX_VALUE` on `Integer` -> `int 2147483647`; + store `int 2147483647` to `i` <2> declare `long l`; call `parseLong` on `Long` with arguments (`long 123`) -> `long 123`; - assign `long 123` to `l` + store `long 123` to `l` [[dynamic-types]] ==== Dynamic Types @@ -268,11 +271,12 @@ the behavior of whatever value it represents at run-time and will always represent the child-most descendant type value of any type value when evaluated during operations. -Declare a `def` type <>, and assign it -any type of value for evaluation during later operations. The default value -for a newly-declared `def` type variable is `null`. A `def` type variable or -method/function parameter can change the type it represents during the -compilation and evaluation of a script. +Declare a `def` type <> or access a `def` type +member field (from a reference type instance), and assign it any type of value +for evaluation during later operations. The default value for a newly-declared +`def` type variable is `null`. A `def` type variable or method/function +parameter can change the type it represents during the compilation and +evaluation of a script. Using the `def` type can have a slight impact on performance. Use only primitive types and reference types directly when performance is critical. @@ -295,13 +299,13 @@ types and reference types directly when performance is critical. + <1> declare `def dp`; implicit cast `int 1` to `def` -> `def`; - assign `def` to `dp` + store `def` to `dp` <2> declare `def dr`; allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `def` -> `def`; - assign `def` to `dr` -<3> access `dp` -> `def`; - assign `def` to `dr`; + store `def` to `dr` +<3> load from `dp` -> `def`; + store `def` to `dr`; (note the switch in the type `dr` represents from `ArrayList` to `int`) + * A `def` type value representing the child-most descendant of a value. @@ -317,12 +321,12 @@ types and reference types directly when performance is critical. allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `Object reference` -> `Object reference`; - assign `Object reference` to `l` + store `Object reference` to `l` <2> declare `def d`; - access `l` -> `Object reference`; + load from `l` -> `Object reference`; implicit cast `Object reference` to `def` -> `def`; - assign `def` to `d`; -<3> access `d` -> `def`; + store `def` to `d`; +<3> load from `d` -> `def`; implicit cast `def` to `ArrayList reference` -> `ArrayList reference`; call `ensureCapacity` on `ArrayList reference` with arguments (`int 10`); (note `def` was implicit cast to `ArrayList reference` @@ -333,9 +337,9 @@ types and reference types directly when performance is critical. ==== String Type The `String` type is a specialized reference type that does not require -explicit allocation. Use a <> to directly evaluate a -`String` type value. While not required, the -<> can allocate `String` type +explicit allocation. Use a <> to directly +evaluate a `String` type value. While not required, the +<> can allocate `String` type instances. *Examples* @@ -351,15 +355,15 @@ instances. ---- + <1> declare `String r`; - assign `String "some text"` to `r` + store `String "some text"` to `r` <2> declare `String s`; - assign `String 'some text'` to `s` + store `String 'some text'` to `s` <3> declare `String t`; allocate `String` instance with arguments (`String "some text"`) -> `String "some text"`; - assign `String "some text"` to `t` + store `String "some text"` to `t` <4> declare `String u`; - assign default `null` to `u` + store default `null` to `u` [[void-type]] ==== void Type @@ -382,35 +386,38 @@ void addToList(List l, def d) { ==== Array Type An array type is a specialized reference type where an array type instance -represents a series of values allocated to the heap. All values in an array -type instance are of the same type. Each value is assigned an index from within -the range `[0, length)` where length is the total number of values allocated for -the array type instance. +contains a series of values allocated to the heap. Each value in an array type +instance is defined as an element. All elements in an array type instance are of +the same type (element type) specified as part of declaration. Each element is +assigned an index within the range `[0, length)` where length is the total +number of elements allocated for an array type instance. -Use the <> or the -<> to allocate an array -type instance. Declare an array type <>, and -assign it an array type value for evaluation during later operations. The -default value for a newly-declared array type variable is `null`. An array type -value is shallow-copied during an assignment or as an argument for a -method/function call. Assign `null` to an array type variable to indicate the -array type value refers to no array type instance. The JVM will garbage collect -an array type instance when it is no longer referred to by any array type -values. Pass `null` as an argument to a method/function call to indicate the -argument refers to no array type instance. +Use the <> or the +<> to allocate an +array type instance. Declare an array type <> or +access an array type member field (from a reference type instance), and assign +it an array type value for evaluation during later operations. The default value +for a newly-declared array type variable is `null`. An array type value is +shallow-copied during an assignment or as an argument for a method/function +call. Assign `null` to an array type variable to indicate the array type value +refers to no array type instance. The JVM will garbage collect an array type +instance when it is no longer referred to by any array type values. Pass `null` +as an argument to a method/function call to indicate the argument refers to no +array type instance. -Use the <> to retrieve the length of an -array type value as an int type value. Use the -<> to load from and store to individual -values within an array type value. +Use the <> to retrieve the length +of an array type value as an `int` type value. Use the +<> to load from and store to +an individual element within an array type instance. When an array type instance is allocated with multiple dimensions using the -range `[2, d]` where `d >= 2`, each dimension in the range `[1, d-1]` is also -an array type. The array type of each dimension, `n`, is an array type with the -number of dimensions equal to `d-n`. For example, consider `int[][][]` with 3 -dimensions. The 3rd dimension, `d-3`, is the primitive type `int`. The 2nd -dimension, `d-2`, is the array type `int[]`. And the 1st dimension, `d-1` is -the array type `int[][]`. +range `[2, d]` where `d >= 2`, each element within each dimension in the range +`[1, d-1]` is also an array type. The element type of each dimension, `n`, is an +array type with the number of dimensions equal to `d-n`. For example, consider +`int[][][]` with 3 dimensions. Each element in the 3rd dimension, `d-3`, is the +primitive type `int`. Each element in the 2nd dimension, `d-2`, is the array +type `int[]`. And each element in the 1st dimension, `d-1` is the array type +`int[][]`. *Examples* @@ -426,26 +433,26 @@ the array type `int[][]`. ---- + <1> declare `int[] x`; - assign default `null` to `x` + store default `null` to `x` <2> declare `float[] y`; allocate `1-d float array` instance with `length [10]` -> `1-d float array reference`; - assign `1-d float array reference` to `y` + store `1-d float array reference` to `y` <3> declare `def z`; allocate `1-d float array` instance with `length [5]` -> `1-d float array reference`; implicit cast `1-d float array reference` to `def` -> `def`; - assign `def` to `z` -<4> access `y` -> `1-d float array reference`; - assign `float 1.0` to `index [9]` of `1-d float array reference` -<5> access `y` -> `1-d float array reference @0`; - access `index [9]` of `1-d float array reference @0` -> `float 1.0`; - access `z` -> `def`; + store `def` to `z` +<4> load from `y` -> `1-d float array reference`; + store `float 1.0` to `index [9]` of `1-d float array reference` +<5> load from `y` -> `1-d float array reference @0`; + load from `index [9]` of `1-d float array reference @0` -> `float 1.0`; + load from `z` -> `def`; implicit cast `def` to `1-d float array reference @1` -> `1-d float array reference @1`; - assign `float 1.0` to `index [0]` of `1-d float array reference @1` + store `float 1.0` to `index [0]` of `1-d float array reference @1` + -* Use of a multi-dimensional array. +* General use of a multi-dimensional array. + [source,Painless] ---- @@ -457,10 +464,10 @@ the array type `int[][]`. <1> declare `int[][][] ia`; allocate `3-d int array` instance with length `[2, 3, 4]` -> `3-d int array reference`; - assign `3-d int array reference` to `ia3` -<2> access `ia3` -> `3-d int array reference`; - assign `int 99` to `index [1, 2, 3]` of `3-d int array reference` + store `3-d int array reference` to `ia3` +<2> load from `ia3` -> `3-d int array reference`; + store `int 99` to `index [1, 2, 3]` of `3-d int array reference` <3> declare `int i`; - access `ia3` -> `3-d int array reference`; - access `index [1, 2, 3]` of `3-d int array reference` -> `int 99`; - assign `int 99` to `i` + load from `ia3` -> `3-d int array reference`; + load from `index [1, 2, 3]` of `3-d int array reference` -> `int 99`; + store `int 99` to `i` diff --git a/docs/painless/painless-variables.asciidoc b/docs/painless/painless-variables.asciidoc index 8b8782b1511..8f83b9e2b57 100644 --- a/docs/painless/painless-variables.asciidoc +++ b/docs/painless/painless-variables.asciidoc @@ -4,7 +4,7 @@ A variable loads and stores a value for evaluation during <>. -[[declaration]] +[[variable-declaration]] ==== Declaration Declare a variable before use with the format of <> @@ -12,16 +12,17 @@ followed by <>. Declare an <> variable using an opening `[` token and a closing `]` token for each dimension directly after the identifier. Specify a comma-separated list of identifiers following the type to declare multiple -variables in a single statement. Use an <> -combined with a declaration to immediately assign a value to a variable. -A variable not immediately assigned a value will have a default value assigned -implicitly based on the type. +variables in a single statement. Use an +<> combined with a declaration to +immediately assign a value to a variable. A variable not immediately assigned a +value will have a default value assigned implicitly based on the type. *Errors* * If a variable is used prior to or without declaration. *Grammar* + [source,ANTLR4] ---- declaration : type ID assignment? (',' ID assignment?)*; @@ -45,37 +46,39 @@ assignment: '=' expression; ---- + <1> declare `int x`; - assign default `null` to `x` + store default `null` to `x` <2> declare `List y`; - assign default `null` to `y` + store default `null` to `y` <3> declare `int x`; - assign default `int 0` to `x`; + store default `int 0` to `x`; declare `int y`; - assign `int 5` to `y`; + store `int 5` to `y`; declare `int z`; - assign default `int 0` to `z`; + store default `int 0` to `z`; <4> declare `def d`; - assign default `null` to `d` + store default `null` to `d` <5> declare `int i`; - assign `int 10` to `i` + store `int 10` to `i` <6> declare `float[] f`; - assign default `null` to `f` + store default `null` to `f` <7> declare `Map[][] m`; - assign default `null` to `m` + store default `null` to `m` -[[assignment]] +[[variable-assignment]] ==== Assignment -Use the *assignment operator* to store a value in a variable. Any operation -that produces a value can be assigned to any variable as long as the -<> are the same or the resultant type can be -<> to the variable type. +Use the `assignment operator '='` to store a value in a variable for use in +subsequent operations. Any operation that produces a value can be assigned to +any variable as long as the <> are the same or the +resultant type can be <> to the variable +type. *Errors* * If the type of value is unable to match the type of variable. *Grammar* + [source,ANTLR4] ---- assignment: ID '=' expression @@ -92,8 +95,8 @@ assignment: ID '=' expression ---- + <1> declare `int i`; - assign default `int 0` to `i` -<2> assign `int 10` to `i` + store default `int 0` to `i` +<2> store `int 10` to `i` + * Declaration combined with immediate assignment. + @@ -104,11 +107,11 @@ assignment: ID '=' expression ---- + <1> declare `int i`; - assign `int 10` to `i` + store `int 10` to `i` <2> declare `double j`; - assign `double 2.0` to `j` + store `double 2.0` to `j` + -* Assignment of one variable to another using primitive types. +* Assignment of one variable to another using primitive type values. + [source,Painless] ---- @@ -117,12 +120,13 @@ assignment: ID '=' expression ---- + <1> declare `int i`; - assign `int 10` to `i` + store `int 10` to `i` <2> declare `int j`; - access `i` -> `int 10`; - assign `int 10` to `j` + load from `i` -> `int 10`; + store `int 10` to `j` + -* Assignment with reference types using the *new instance operator*. +* Assignment with reference types using the + <>. + [source,Painless] ---- @@ -132,13 +136,13 @@ assignment: ID '=' expression + <1> declare `ArrayList l`; allocate `ArrayList` instance -> `ArrayList reference`; - assign `ArrayList reference` to `l` + store `ArrayList reference` to `l` <2> declare `Map m`; allocate `HashMap` instance -> `HashMap reference`; implicit cast `HashMap reference` to `Map reference` -> `Map reference`; - assign `Map reference` to `m` + store `Map reference` to `m` + -* Assignment of one variable to another using reference types. +* Assignment of one variable to another using reference type values. + [source,Painless] ---- @@ -151,18 +155,19 @@ assignment: ID '=' expression <1> declare `List l`; allocate `ArrayList` instance -> `ArrayList reference`; implicit cast `ArrayList reference` to `List reference` -> `List reference`; - assign `List reference` to `l` + store `List reference` to `l` <2> declare `List k`; - access `l` -> `List reference`; - assign `List reference` to `k`; + load from `l` -> `List reference`; + store `List reference` to `k`; (note `l` and `k` refer to the same instance known as a shallow-copy) <3> declare `List m`; - assign default `null` to `m` -<4> access `k` -> `List reference`; - assign `List reference` to `m`; + store default `null` to `m` +<4> load from `k` -> `List reference`; + store `List reference` to `m`; (note `l`, `k`, and `m` refer to the same instance) + -* Assignment with an array type variable using the *new array operator*. +* Assignment with array type variables using the + <>. + [source,Painless] ---- @@ -176,24 +181,24 @@ assignment: ID '=' expression ---- + <1> declare `int[] ia1`; - assign default `null` to `ia1` + store default `null` to `ia1` <2> allocate `1-d int array` instance with `length [2]` -> `1-d int array reference`; - assign `1-d int array reference` to `ia1` -<3> access `ia1` -> `1-d int array reference`; - assign `int 1` to `index [0]` of `1-d int array reference` + store `1-d int array reference` to `ia1` +<3> load from `ia1` -> `1-d int array reference`; + store `int 1` to `index [0]` of `1-d int array reference` <4> declare `int[] ib1`; - access `ia1` -> `1-d int array reference`; - assign `1-d int array reference` to `ib1`; + load from `ia1` -> `1-d int array reference`; + store `1-d int array reference` to `ib1`; (note `ia1` and `ib1` refer to the same instance known as a shallow copy) <5> declare `int[][] ic2`; allocate `2-d int array` instance with `length [2, 5]` -> `2-d int array reference`; - assign `2-d int array reference` to `ic2` -<6> access `ic2` -> `2-d int array reference`; - assign `int 2` to `index [1, 3]` of `2-d int array reference` -<7> access `ia1` -> `1-d int array reference`; - access `ic2` -> `2-d int array reference`; - assign `1-d int array reference` to + store `2-d int array reference` to `ic2` +<6> load from `ic2` -> `2-d int array reference`; + store `int 2` to `index [1, 3]` of `2-d int array reference` +<7> load from `ia1` -> `1-d int array reference`; + load from `ic2` -> `2-d int array reference`; + store `1-d int array reference` to `index [0]` of `2-d int array reference`; (note `ia1`, `ib1`, and `index [0]` of `ia2` refer to the same instance) diff --git a/x-pack/docs/en/commands/certgen.asciidoc b/docs/reference/commands/certgen.asciidoc similarity index 99% rename from x-pack/docs/en/commands/certgen.asciidoc rename to docs/reference/commands/certgen.asciidoc index c2a00f11b69..3a8b15fbd28 100644 --- a/x-pack/docs/en/commands/certgen.asciidoc +++ b/docs/reference/commands/certgen.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[certgen]] == certgen diff --git a/x-pack/docs/en/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc similarity index 99% rename from x-pack/docs/en/commands/certutil.asciidoc rename to docs/reference/commands/certutil.asciidoc index ad265c89f10..e0c6c701e31 100644 --- a/x-pack/docs/en/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[certutil]] == elasticsearch-certutil diff --git a/x-pack/docs/en/commands/index.asciidoc b/docs/reference/commands/index.asciidoc similarity index 100% rename from x-pack/docs/en/commands/index.asciidoc rename to docs/reference/commands/index.asciidoc diff --git a/x-pack/docs/en/commands/migrate-tool.asciidoc b/docs/reference/commands/migrate-tool.asciidoc similarity index 99% rename from x-pack/docs/en/commands/migrate-tool.asciidoc rename to docs/reference/commands/migrate-tool.asciidoc index 1d19452df80..a1903ac69da 100644 --- a/x-pack/docs/en/commands/migrate-tool.asciidoc +++ b/docs/reference/commands/migrate-tool.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[migrate-tool]] == elasticsearch-migrate diff --git a/x-pack/docs/en/commands/saml-metadata.asciidoc b/docs/reference/commands/saml-metadata.asciidoc similarity index 99% rename from x-pack/docs/en/commands/saml-metadata.asciidoc rename to docs/reference/commands/saml-metadata.asciidoc index 1cd283fd776..069c7135c01 100644 --- a/x-pack/docs/en/commands/saml-metadata.asciidoc +++ b/docs/reference/commands/saml-metadata.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[saml-metadata]] == saml-metadata diff --git a/x-pack/docs/en/commands/setup-passwords.asciidoc b/docs/reference/commands/setup-passwords.asciidoc similarity index 99% rename from x-pack/docs/en/commands/setup-passwords.asciidoc rename to docs/reference/commands/setup-passwords.asciidoc index b323dc8e5c1..a7dcd25d65e 100644 --- a/x-pack/docs/en/commands/setup-passwords.asciidoc +++ b/docs/reference/commands/setup-passwords.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[setup-passwords]] == elasticsearch-setup-passwords diff --git a/x-pack/docs/en/commands/syskeygen.asciidoc b/docs/reference/commands/syskeygen.asciidoc similarity index 98% rename from x-pack/docs/en/commands/syskeygen.asciidoc rename to docs/reference/commands/syskeygen.asciidoc index f4a198ff4bf..3ae7456448d 100644 --- a/x-pack/docs/en/commands/syskeygen.asciidoc +++ b/docs/reference/commands/syskeygen.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[syskeygen]] == elasticsearch-syskeygen diff --git a/x-pack/docs/en/commands/users-command.asciidoc b/docs/reference/commands/users-command.asciidoc similarity index 99% rename from x-pack/docs/en/commands/users-command.asciidoc rename to docs/reference/commands/users-command.asciidoc index ab1b89b149b..e53e0815c5d 100644 --- a/x-pack/docs/en/commands/users-command.asciidoc +++ b/docs/reference/commands/users-command.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="gold+"] [[users-command]] == Users Command ++++ diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index b38a554d681..adbdc01db1e 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -14,17 +14,17 @@ include::getting-started.asciidoc[] include::setup.asciidoc[] -include::{xes-repo-dir}/setup/setup-xes.asciidoc[] +include::setup/setup-xes.asciidoc[] include::{xes-repo-dir}/monitoring/configuring-monitoring.asciidoc[] include::{xes-repo-dir}/security/configuring-es.asciidoc[] -include::{xes-repo-dir}/setup/setup-xclient.asciidoc[] +include::setup/setup-xclient.asciidoc[] -include::{xes-repo-dir}/settings/configuring-xes.asciidoc[] +include::settings/configuring-xes.asciidoc[] -include::{xes-repo-dir}/setup/bootstrap-checks-xes.asciidoc[] +include::setup/bootstrap-checks-xes.asciidoc[] :edit_url: include::upgrade.asciidoc[] @@ -65,7 +65,7 @@ include::{xes-repo-dir}/rollup/index.asciidoc[] include::rest-api/index.asciidoc[] -include::{xes-repo-dir}/commands/index.asciidoc[] +include::commands/index.asciidoc[] :edit_url: include::how-to.asciidoc[] diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index 4a15436b804..bdb00916755 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -115,7 +115,7 @@ PUT my_index/_doc/1 [[match-unmatch]] ==== `match` and `unmatch` -The `match` parameter uses a pattern to match on the fieldname, while +The `match` parameter uses a pattern to match on the field name, while `unmatch` uses a pattern to exclude fields matched by `match`. The following example matches all `string` fields whose name starts with @@ -259,7 +259,7 @@ PUT my_index/_doc/1 -------------------------------------------------- // CONSOLE <1> The `english` field is mapped as a `string` field with the `english` analyzer. -<2> The `count` field is mapped as a `long` field with `doc_values` disabled +<2> The `count` field is mapped as a `long` field with `doc_values` disabled. [[template-examples]] ==== Template examples diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index 19899c76d8a..ff959b98b6e 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -100,11 +100,6 @@ PUT my_index/_mapping/_doc <1> The mapping that you specify for `my_field` should consist of the existing mapping for that field, plus the `fielddata` parameter. -TIP: The `fielddata.*` parameter must have the same settings for fields of the -same name in the same index. Its value can be updated on existing fields -using the <>. - - [[field-data-filtering]] ==== `fielddata_frequency_filter` diff --git a/docs/reference/mapping/params/properties.asciidoc b/docs/reference/mapping/params/properties.asciidoc index fa74bffd9d3..e50c0b3ac77 100644 --- a/docs/reference/mapping/params/properties.asciidoc +++ b/docs/reference/mapping/params/properties.asciidoc @@ -78,7 +78,7 @@ GET my_index/_search { "query": { "match": { - "manager.name": "Alice White" <1> + "manager.name": "Alice White" } }, "aggs": { @@ -89,7 +89,7 @@ GET my_index/_search "aggs": { "Employee Ages": { "histogram": { - "field": "employees.age", <2> + "field": "employees.age", "interval": 5 } } diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index ecb2e8dace2..9b71d7e7404 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -42,6 +42,8 @@ string:: <> and <> <>:: Record numeric features to boost hits at query time. +<>:: Record numeric feature vectors to boost hits at query time. + [float] === Multi-fields @@ -90,4 +92,4 @@ include::types/parent-join.asciidoc[] include::types/feature.asciidoc[] - +include::types/feature-vector.asciidoc[] diff --git a/docs/reference/mapping/types/feature-vector.asciidoc b/docs/reference/mapping/types/feature-vector.asciidoc new file mode 100644 index 00000000000..2ce3c017fe3 --- /dev/null +++ b/docs/reference/mapping/types/feature-vector.asciidoc @@ -0,0 +1,64 @@ +[[feature-vector]] +=== Feature vector datatype + +A `feature_vector` field can index numeric feature vectors, so that they can +later be used to boost documents in queries with a +<> query. + +It is analogous to the <> datatype but is better suited +when the list of features is sparse so that it wouldn't be reasonable to add +one field to the mappings for each of them. + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "_doc": { + "properties": { + "topics": { + "type": "feature_vector" <1> + } + } + } + } +} + +PUT my_index/_doc/1 +{ + "topics": { <2> + "politics": 20, + "economics": 50.8 + } +} + +PUT my_index/_doc/2 +{ + "topics": { + "politics": 5.2, + "sports": 80.1 + } +} + +GET my_index/_search +{ + "query": { + "feature": { + "field": "topics.politics" + } + } +} +-------------------------------------------------- +// CONSOLE +<1> Feature vector fields must use the `feature_vector` field type +<2> Feature vector fields must be a hash with string keys and strictly positive numeric values + +NOTE: `feature_vector` fields only support single-valued features and strictly +positive values. Multi-valued fields and zero or negative values will be rejected. + +NOTE: `feature_vector` fields do not support sorting or aggregating and may +only be queried using <> queries. + +NOTE: `feature_vector` fields only preserve 9 significant bits for the +precision, which translates to a relative error of about 0.4%. + diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 804fb1c6508..238e26bf337 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -2,8 +2,8 @@ === Nested datatype The `nested` type is a specialised version of the <> datatype -that allows arrays of objects to be indexed and queried independently of each -other. +that allows arrays of objects to be indexed in a way that they can be queried +independently of each other. ==== How arrays of objects are flattened diff --git a/x-pack/docs/en/node.asciidoc b/docs/reference/modules/ml-node.asciidoc similarity index 100% rename from x-pack/docs/en/node.asciidoc rename to docs/reference/modules/ml-node.asciidoc diff --git a/docs/reference/modules/node.asciidoc b/docs/reference/modules/node.asciidoc index cf053df1818..f772977e3f0 100644 --- a/docs/reference/modules/node.asciidoc +++ b/docs/reference/modules/node.asciidoc @@ -325,5 +325,5 @@ the <>, the <> and the <>. ifdef::include-xpack[] -include::{xes-repo-dir}/node.asciidoc[] +include::ml-node.asciidoc[] endif::include-xpack[] diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc index 19c29b1cf3a..387278a432a 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -2,9 +2,10 @@ === Feature Query The `feature` query is a specialized query that only works on -<> fields. Its goal is to boost the score of documents based -on the values of numeric features. It is typically put in a `should` clause of -a <> query so that its score is added to the score +<> fields and <> fields. +Its goal is to boost the score of documents based on the values of numeric +features. It is typically put in a `should` clause of a +<> query so that its score is added to the score of the query. Compared to using <> or other @@ -13,7 +14,16 @@ efficiently skip non-competitive hits when <> is set to `false`. Speedups may be spectacular. -Here is an example: +Here is an example that indexes various features: + - https://en.wikipedia.org/wiki/PageRank[`pagerank`], a measure of the + importance of a website, + - `url_length`, the length of the url, which typically correlates negatively + with relevance, + - `topics`, which associates a list of topics with every document alongside a + measure of how well the document is connected to this topic. + +Then the example includes an example query that searches for `"2016"` and boosts +based or `pagerank`, `url_length` and the `sports` topic. [source,js] -------------------------------------------------- @@ -28,6 +38,9 @@ PUT test "url_length": { "type": "feature", "positive_score_impact": false + }, + "topics": { + "type": "feature_vector" } } } @@ -36,32 +49,73 @@ PUT test PUT test/_doc/1 { - "pagerank": 10, - "url_length": 50 + "url": "http://en.wikipedia.org/wiki/2016_Summer_Olympics", + "content": "Rio 2016", + "pagerank": 50.3, + "url_length": 42, + "topics": { + "sports": 50, + "brazil": 30 + } } PUT test/_doc/2 { - "pagerank": 100, - "url_length": 20 + "url": "http://en.wikipedia.org/wiki/2016_Brazilian_Grand_Prix", + "content": "Formula One motor race held on 13 November 2016 at the Autódromo José Carlos Pace in São Paulo, Brazil", + "pagerank": 50.3, + "url_length": 47, + "topics": { + "sports": 35, + "formula one": 65, + "brazil": 20 + } +} + +PUT test/_doc/3 +{ + "url": "http://en.wikipedia.org/wiki/Deadpool_(film)", + "content": "Deadpool is a 2016 American superhero film", + "pagerank": 50.3, + "url_length": 37, + "topics": { + "movies": 60, + "super hero": 65 + } } POST test/_refresh -GET test/_search +GET test/_search { "query": { - "feature": { - "field": "pagerank" - } - } -} - -GET test/_search -{ - "query": { - "feature": { - "field": "url_length" + "bool": { + "must": [ + { + "match": { + "content": "2016" + } + } + ], + "should": [ + { + "feature": { + "field": "pagerank" + } + }, + { + "feature": { + "field": "url_length", + "boost": 0.1 + } + }, + { + "feature": { + "field": "topics.sports", + "boost": 0.4 + } + } + ] } } } diff --git a/docs/reference/query-dsl/span-multi-term-query.asciidoc b/docs/reference/query-dsl/span-multi-term-query.asciidoc index ff7af83451b..40bd1553298 100644 --- a/docs/reference/query-dsl/span-multi-term-query.asciidoc +++ b/docs/reference/query-dsl/span-multi-term-query.asciidoc @@ -37,10 +37,9 @@ GET /_search -------------------------------------------------- // CONSOLE -WARNING: By default `span_multi queries are rewritten to a `span_or` query -containing **all** the expanded terms. This can be expensive if the number of expanded -terms is large. To avoid an unbounded expansion you can set the -<> of the multi term query to `top_terms_*` -rewrite. Or, if you use `span_multi` on `prefix` query only, you can -activate the <> field option of the `text` field instead. This will -rewrite any prefix query on the field to a a single term query that matches the indexed prefix. \ No newline at end of file +WARNING: `span_multi` queries will hit too many clauses failure if the number of terms that match the query exceeds the +boolean query limit (defaults to 1024).To avoid an unbounded expansion you can set the <> of the multi term query to `top_terms_*` rewrite. Or, if you use `span_multi` on `prefix` query only, +you can activate the <> field option of the `text` field instead. This will +rewrite any prefix query on the field to a a single term query that matches the indexed prefix. + diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index ac1c8388e83..2cee0f3a58c 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -288,8 +288,9 @@ GET /_search "pin.location" : [-70, 40], "order" : "asc", "unit" : "km", - "mode" : "min", - "distance_type" : "arc" + "mode" : "min", + "distance_type" : "arc", + "ignore_unmapped": true } } ], @@ -317,6 +318,12 @@ GET /_search The unit to use when computing sort values. The default is `m` (meters). + +`ignore_unmapped`:: + + Indicates if the unmapped field should be treated as a missing value. Setting it to `true` is equivalent to specifying + an `unmapped_type` in the field sort. The default is `false` (unmapped field are causing the search to fail). + NOTE: geo distance sorting does not support configurable missing values: the distance will always be considered equal to +Infinity+ when a document does not have values for the field that is used for distance computation. diff --git a/x-pack/docs/en/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc similarity index 98% rename from x-pack/docs/en/settings/audit-settings.asciidoc rename to docs/reference/settings/audit-settings.asciidoc index 1e477083c90..5995c65a01c 100644 --- a/x-pack/docs/en/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -87,7 +87,7 @@ Controls how often to roll over to a new index: `hourly`, `daily`, `weekly`, or `xpack.security.audit.index.events.include`:: Specifies the audit events to be indexed. The default value is `anonymous_access_denied, authentication_failed, realm_authentication_failed, access_granted, access_denied, tampered_request, connection_granted, connection_denied, run_as_granted, run_as_denied`. -See {xpack-ref}/auditing.html#audit-event-types[Audit Entry Types] for the +See {xpack-ref}/audit-event-types.html[Audit Entry Types] for the complete list. `xpack.security.audit.index.events.exclude`:: diff --git a/x-pack/docs/en/settings/configuring-xes.asciidoc b/docs/reference/settings/configuring-xes.asciidoc similarity index 100% rename from x-pack/docs/en/settings/configuring-xes.asciidoc rename to docs/reference/settings/configuring-xes.asciidoc diff --git a/x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png b/docs/reference/settings/images/monitoring-es-cgroup-true.png similarity index 100% rename from x-pack/docs/en/settings/images/monitoring-es-cgroup-true.png rename to docs/reference/settings/images/monitoring-es-cgroup-true.png diff --git a/x-pack/docs/en/settings/license-settings.asciidoc b/docs/reference/settings/license-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/license-settings.asciidoc rename to docs/reference/settings/license-settings.asciidoc diff --git a/x-pack/docs/en/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/ml-settings.asciidoc rename to docs/reference/settings/ml-settings.asciidoc diff --git a/x-pack/docs/en/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/monitoring-settings.asciidoc rename to docs/reference/settings/monitoring-settings.asciidoc diff --git a/x-pack/docs/en/settings/notification-settings.asciidoc b/docs/reference/settings/notification-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/notification-settings.asciidoc rename to docs/reference/settings/notification-settings.asciidoc diff --git a/x-pack/docs/en/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/security-settings.asciidoc rename to docs/reference/settings/security-settings.asciidoc diff --git a/x-pack/docs/en/settings/sql-settings.asciidoc b/docs/reference/settings/sql-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/sql-settings.asciidoc rename to docs/reference/settings/sql-settings.asciidoc diff --git a/x-pack/docs/en/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc similarity index 100% rename from x-pack/docs/en/settings/ssl-settings.asciidoc rename to docs/reference/settings/ssl-settings.asciidoc diff --git a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc b/docs/reference/setup/bootstrap-checks-xes.asciidoc similarity index 100% rename from x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc rename to docs/reference/setup/bootstrap-checks-xes.asciidoc diff --git a/docs/reference/setup/install.asciidoc b/docs/reference/setup/install.asciidoc index 7668d45ee35..783cb804e7a 100644 --- a/docs/reference/setup/install.asciidoc +++ b/docs/reference/setup/install.asciidoc @@ -65,6 +65,5 @@ include::install/rpm.asciidoc[] include::install/windows.asciidoc[] -ifdef::include-xpack[] -include::{xes-repo-dir}/setup/docker.asciidoc[] -endif::include-xpack[] +include::install/docker.asciidoc[] + diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index af9d35f3f16..2abacf947c7 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -139,7 +139,7 @@ ifdef::include-xpack[] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within Elasticsearch. -include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] +include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/x-pack/docs/en/setup/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc similarity index 99% rename from x-pack/docs/en/setup/docker.asciidoc rename to docs/reference/setup/install/docker.asciidoc index 6ad6ef0fe6c..0e62fa207f6 100644 --- a/x-pack/docs/en/setup/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -1,4 +1,3 @@ -[role="xpack"] [[docker]] === Install {es} with Docker diff --git a/docs/reference/setup/install/next-steps.asciidoc b/docs/reference/setup/install/next-steps.asciidoc index 1e004eeee6b..e52cdfee077 100644 --- a/docs/reference/setup/install/next-steps.asciidoc +++ b/docs/reference/setup/install/next-steps.asciidoc @@ -1,11 +1,10 @@ [role="exclude"] ==== Next steps -You now have a test Elasticsearch environment set up. Before you start -serious development or go into production with Elasticsearch, you will need to -do some additional setup: +You now have a test {es} environment set up. Before you start +serious development or go into production with {es}, you must do some additional +setup: * Learn how to <>. * Configure <>. * Configure <>. - diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index a44b0b37d31..aad7cf5bf3b 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -126,7 +126,7 @@ ifdef::include-xpack[] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within {es}. -include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] +include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/docs/reference/setup/install/windows.asciidoc b/docs/reference/setup/install/windows.asciidoc index 861daa160e7..56bb953c18e 100644 --- a/docs/reference/setup/install/windows.asciidoc +++ b/docs/reference/setup/install/windows.asciidoc @@ -337,7 +337,7 @@ ifdef::include-xpack[] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within {es}. -include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] +include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/x-pack/docs/en/setup/xpack-indices.asciidoc b/docs/reference/setup/install/xpack-indices.asciidoc similarity index 100% rename from x-pack/docs/en/setup/xpack-indices.asciidoc rename to docs/reference/setup/install/xpack-indices.asciidoc diff --git a/docs/reference/setup/install/zip-targz.asciidoc b/docs/reference/setup/install/zip-targz.asciidoc index 94de390656b..f44742c648e 100644 --- a/docs/reference/setup/install/zip-targz.asciidoc +++ b/docs/reference/setup/install/zip-targz.asciidoc @@ -85,7 +85,7 @@ ifdef::include-xpack[] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within {es}. -include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] +include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 18c1272d25d..cd86a626891 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -53,7 +53,7 @@ ifdef::include-xpack[] ==== Enable automatic creation of {xpack} indices {xpack} will try to automatically create a number of indices within {es}. -include::{xes-repo-dir}/setup/xpack-indices.asciidoc[] +include::xpack-indices.asciidoc[] endif::include-xpack[] diff --git a/x-pack/docs/en/setup/setup-xclient.asciidoc b/docs/reference/setup/setup-xclient.asciidoc similarity index 99% rename from x-pack/docs/en/setup/setup-xclient.asciidoc rename to docs/reference/setup/setup-xclient.asciidoc index b94d4fe9c10..4282264e395 100644 --- a/x-pack/docs/en/setup/setup-xclient.asciidoc +++ b/docs/reference/setup/setup-xclient.asciidoc @@ -1,4 +1,5 @@ [role="xpack"] +[testenv="basic"] [[setup-xpack-client]] == Configuring {xpack} Java Clients diff --git a/x-pack/docs/en/setup/setup-xes.asciidoc b/docs/reference/setup/setup-xes.asciidoc similarity index 100% rename from x-pack/docs/en/setup/setup-xes.asciidoc rename to docs/reference/setup/setup-xes.asciidoc diff --git a/libs/build.gradle b/libs/build.gradle index 7f24f69eedc..b0924aa1f54 100644 --- a/libs/build.gradle +++ b/libs/build.gradle @@ -33,12 +33,12 @@ subprojects { dependencies.all { Dependency dep -> Project depProject = dependencyToProject(dep) if (depProject != null - && false == depProject.path.equals(':libs:elasticsearch-core') + && false == depProject.path.equals(':libs:core') && false == isEclipse && depProject.path.startsWith(':libs')) { throw new InvalidUserDataException("projects in :libs " + "may not depend on other projects libs except " - + ":libs:elasticsearch-core but " + + ":libs:core but " + "${project.path} depends on ${depProject.path}") } } diff --git a/server/cli/build.gradle b/libs/cli/build.gradle similarity index 100% rename from server/cli/build.gradle rename to libs/cli/build.gradle diff --git a/server/cli/licenses/jopt-simple-5.0.2.jar.sha1 b/libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 similarity index 100% rename from server/cli/licenses/jopt-simple-5.0.2.jar.sha1 rename to libs/cli/licenses/jopt-simple-5.0.2.jar.sha1 diff --git a/server/cli/licenses/jopt-simple-LICENSE.txt b/libs/cli/licenses/jopt-simple-LICENSE.txt similarity index 100% rename from server/cli/licenses/jopt-simple-LICENSE.txt rename to libs/cli/licenses/jopt-simple-LICENSE.txt diff --git a/server/cli/licenses/jopt-simple-NOTICE.txt b/libs/cli/licenses/jopt-simple-NOTICE.txt similarity index 100% rename from server/cli/licenses/jopt-simple-NOTICE.txt rename to libs/cli/licenses/jopt-simple-NOTICE.txt diff --git a/server/cli/src/main/java/org/elasticsearch/cli/Command.java b/libs/cli/src/main/java/org/elasticsearch/cli/Command.java similarity index 100% rename from server/cli/src/main/java/org/elasticsearch/cli/Command.java rename to libs/cli/src/main/java/org/elasticsearch/cli/Command.java diff --git a/server/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java b/libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java similarity index 100% rename from server/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java rename to libs/cli/src/main/java/org/elasticsearch/cli/ExitCodes.java diff --git a/server/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java similarity index 99% rename from server/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java rename to libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java index 054a29e78a6..bcc75a2d1be 100644 --- a/server/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/MultiCommand.java @@ -19,17 +19,15 @@ package org.elasticsearch.cli; -import java.io.Closeable; +import joptsimple.NonOptionArgumentSpec; +import joptsimple.OptionSet; +import org.elasticsearch.core.internal.io.IOUtils; + import java.io.IOException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; -import joptsimple.NonOptionArgumentSpec; -import joptsimple.OptionSet; - -import org.elasticsearch.core.internal.io.IOUtils; - /** * A cli tool which is made up of multiple subcommands. */ diff --git a/server/cli/src/main/java/org/elasticsearch/cli/SuppressForbidden.java b/libs/cli/src/main/java/org/elasticsearch/cli/SuppressForbidden.java similarity index 100% rename from server/cli/src/main/java/org/elasticsearch/cli/SuppressForbidden.java rename to libs/cli/src/main/java/org/elasticsearch/cli/SuppressForbidden.java diff --git a/server/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java similarity index 100% rename from server/cli/src/main/java/org/elasticsearch/cli/Terminal.java rename to libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java diff --git a/server/cli/src/main/java/org/elasticsearch/cli/UserException.java b/libs/cli/src/main/java/org/elasticsearch/cli/UserException.java similarity index 100% rename from server/cli/src/main/java/org/elasticsearch/cli/UserException.java rename to libs/cli/src/main/java/org/elasticsearch/cli/UserException.java diff --git a/libs/elasticsearch-core/build.gradle b/libs/core/build.gradle similarity index 93% rename from libs/elasticsearch-core/build.gradle rename to libs/core/build.gradle index d374e7a8486..2017c2a418a 100644 --- a/libs/elasticsearch-core/build.gradle +++ b/libs/core/build.gradle @@ -81,7 +81,7 @@ dependencies { java9Compile sourceSets.main.output } - if (isEclipse == false || project.path == ":libs:elasticsearch-core-tests") { + if (isEclipse == false || project.path == ":libs:core-tests") { testCompile("org.elasticsearch.test:framework:${version}") { exclude group: 'org.elasticsearch', module: 'elasticsearch-core' } @@ -89,15 +89,15 @@ dependencies { } forbiddenApisMain { - // elasticsearch-core does not depend on server - // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + // :libs:core does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to server signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:elasticsearch-core") { + if (project.path == ":libs:core") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { diff --git a/libs/elasticsearch-core/src/main/eclipse-build.gradle b/libs/core/src/main/eclipse-build.gradle similarity index 68% rename from libs/elasticsearch-core/src/main/eclipse-build.gradle rename to libs/core/src/main/eclipse-build.gradle index 9c84a4d6bd8..6bc7562f7fd 100644 --- a/libs/elasticsearch-core/src/main/eclipse-build.gradle +++ b/libs/core/src/main/eclipse-build.gradle @@ -1,2 +1,2 @@ -// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +// this is just shell gradle file for eclipse to have separate projects for core src and tests apply from: '../../build.gradle' diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JarHell.java rename to libs/core/src/main/java/org/elasticsearch/bootstrap/JarHell.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java b/libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java rename to libs/core/src/main/java/org/elasticsearch/bootstrap/JavaVersion.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java b/libs/core/src/main/java/org/elasticsearch/common/Booleans.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java rename to libs/core/src/main/java/org/elasticsearch/common/Booleans.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java b/libs/core/src/main/java/org/elasticsearch/common/CheckedFunction.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java rename to libs/core/src/main/java/org/elasticsearch/common/CheckedFunction.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java b/libs/core/src/main/java/org/elasticsearch/common/Glob.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java rename to libs/core/src/main/java/org/elasticsearch/common/Glob.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java b/libs/core/src/main/java/org/elasticsearch/common/Nullable.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java rename to libs/core/src/main/java/org/elasticsearch/common/Nullable.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java b/libs/core/src/main/java/org/elasticsearch/common/SuppressForbidden.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/SuppressForbidden.java rename to libs/core/src/main/java/org/elasticsearch/common/SuppressForbidden.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/collect/Tuple.java b/libs/core/src/main/java/org/elasticsearch/common/collect/Tuple.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/collect/Tuple.java rename to libs/core/src/main/java/org/elasticsearch/common/collect/Tuple.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/concurrent/CompletableContext.java b/libs/core/src/main/java/org/elasticsearch/common/concurrent/CompletableContext.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/concurrent/CompletableContext.java rename to libs/core/src/main/java/org/elasticsearch/common/concurrent/CompletableContext.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java b/libs/core/src/main/java/org/elasticsearch/common/io/PathUtils.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/io/PathUtils.java rename to libs/core/src/main/java/org/elasticsearch/common/io/PathUtils.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java rename to libs/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java b/libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java rename to libs/core/src/main/java/org/elasticsearch/core/internal/io/IOUtils.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/Streams.java b/libs/core/src/main/java/org/elasticsearch/core/internal/io/Streams.java similarity index 100% rename from libs/elasticsearch-core/src/main/java/org/elasticsearch/core/internal/io/Streams.java rename to libs/core/src/main/java/org/elasticsearch/core/internal/io/Streams.java diff --git a/libs/elasticsearch-core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java b/libs/core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java similarity index 100% rename from libs/elasticsearch-core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java rename to libs/core/src/main/java9/org/elasticsearch/core/internal/io/Streams.java diff --git a/libs/elasticsearch-core/src/test/eclipse-build.gradle b/libs/core/src/test/eclipse-build.gradle similarity index 54% rename from libs/elasticsearch-core/src/test/eclipse-build.gradle rename to libs/core/src/test/eclipse-build.gradle index f43f019941b..b5fe0417428 100644 --- a/libs/elasticsearch-core/src/test/eclipse-build.gradle +++ b/libs/core/src/test/eclipse-build.gradle @@ -1,6 +1,6 @@ -// this is just shell gradle file for eclipse to have separate projects for elasticsearch-core src and tests +// this is just shell gradle file for eclipse to have separate projects for core src and tests apply from: '../../build.gradle' dependencies { - testCompile project(':libs:elasticsearch-core') + testCompile project(':libs:core') } diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java similarity index 100% rename from libs/elasticsearch-core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java rename to libs/core/src/test/java/org/elasticsearch/bootstrap/JarHellTests.java diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/collect/TupleTests.java b/libs/core/src/test/java/org/elasticsearch/common/collect/TupleTests.java similarity index 100% rename from libs/elasticsearch-core/src/test/java/org/elasticsearch/common/collect/TupleTests.java rename to libs/core/src/test/java/org/elasticsearch/common/collect/TupleTests.java diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java similarity index 100% rename from libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java rename to libs/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java similarity index 100% rename from libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java rename to libs/core/src/test/java/org/elasticsearch/core/internal/io/IOUtilsTests.java diff --git a/libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java b/libs/core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java similarity index 100% rename from libs/elasticsearch-core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java rename to libs/core/src/test/java/org/elasticsearch/core/internal/io/StreamsTests.java diff --git a/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar b/libs/core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar similarity index 100% rename from libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar rename to libs/core/src/test/resources/org/elasticsearch/bootstrap/duplicate-classes.jar diff --git a/libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar b/libs/core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar similarity index 100% rename from libs/elasticsearch-core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar rename to libs/core/src/test/resources/org/elasticsearch/bootstrap/duplicate-xmlbeans-classes.jar diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptingSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptingSelector.java deleted file mode 100644 index da64020daa8..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptingSelector.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.stream.Collectors; - -/** - * Selector implementation that handles {@link NioServerSocketChannel}. It's main piece of functionality is - * accepting new channels. - */ -public class AcceptingSelector extends ESSelector { - - private final AcceptorEventHandler eventHandler; - private final ConcurrentLinkedQueue newChannels = new ConcurrentLinkedQueue<>(); - - public AcceptingSelector(AcceptorEventHandler eventHandler) throws IOException { - super(eventHandler); - this.eventHandler = eventHandler; - } - - public AcceptingSelector(AcceptorEventHandler eventHandler, Selector selector) throws IOException { - super(eventHandler, selector); - this.eventHandler = eventHandler; - } - - @Override - void processKey(SelectionKey selectionKey) { - ServerChannelContext channelContext = (ServerChannelContext) selectionKey.attachment(); - if (selectionKey.isAcceptable()) { - try { - eventHandler.acceptChannel(channelContext); - } catch (IOException e) { - eventHandler.acceptException(channelContext, e); - } - } - } - - @Override - void preSelect() { - setUpNewServerChannels(); - } - - @Override - void cleanup() { - channelsToClose.addAll(newChannels.stream().map(NioServerSocketChannel::getContext).collect(Collectors.toList())); - } - - /** - * Schedules a NioServerSocketChannel to be registered with this selector. The channel will by queued and - * eventually registered next time through the event loop. - * - * @param serverSocketChannel the channel to register - */ - public void scheduleForRegistration(NioServerSocketChannel serverSocketChannel) { - newChannels.add(serverSocketChannel); - ensureSelectorOpenForEnqueuing(newChannels, serverSocketChannel); - wakeup(); - } - - private void setUpNewServerChannels() { - NioServerSocketChannel newChannel; - while ((newChannel = this.newChannels.poll()) != null) { - ServerChannelContext context = newChannel.getContext(); - assert context.getSelector() == this : "The channel must be registered with the selector with which it was created"; - try { - if (context.isOpen()) { - eventHandler.handleRegistration(context); - } else { - eventHandler.registrationException(context, new ClosedChannelException()); - } - } catch (Exception e) { - eventHandler.registrationException(context, e); - } - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java deleted file mode 100644 index f3aab9c9be1..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/AcceptorEventHandler.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.channels.SelectionKey; -import java.util.function.Consumer; -import java.util.function.Supplier; - -/** - * Event handler designed to handle events from server sockets - */ -public class AcceptorEventHandler extends EventHandler { - - private final Supplier selectorSupplier; - - public AcceptorEventHandler(Supplier selectorSupplier, Consumer exceptionHandler) { - super(exceptionHandler); - this.selectorSupplier = selectorSupplier; - } - - /** - * This method is called when a NioServerSocketChannel is being registered with the selector. It should - * only be called once per channel. - * - * @param context that was registered - */ - protected void handleRegistration(ServerChannelContext context) throws IOException { - context.register(); - SelectionKey selectionKey = context.getSelectionKey(); - selectionKey.attach(context); - SelectionKeyUtils.setAcceptInterested(selectionKey); - } - - /** - * This method is called when an attempt to register a server channel throws an exception. - * - * @param context that was registered - * @param exception that occurred - */ - protected void registrationException(ServerChannelContext context, Exception exception) { - context.handleException(exception); - } - - /** - * This method is called when a server channel signals it is ready to accept a connection. All of the - * accept logic should occur in this call. - * - * @param context that can accept a connection - */ - protected void acceptChannel(ServerChannelContext context) throws IOException { - context.acceptChannels(selectorSupplier); - } - - /** - * This method is called when an attempt to accept a connection throws an exception. - * - * @param context that accepting a connection - * @param exception that occurred - */ - protected void acceptException(ServerChannelContext context, Exception exception) { - context.handleException(exception); - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java deleted file mode 100644 index c6cf97d10d3..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ESSelector.java +++ /dev/null @@ -1,248 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.Closeable; -import java.io.IOException; -import java.nio.channels.CancelledKeyException; -import java.nio.channels.ClosedSelectorException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.util.Iterator; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.Future; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; - -/** - * This is a basic selector abstraction. This selector wraps a raw nio {@link Selector}. When you call - * {@link #runLoop()}, the selector will run until {@link #close()} is called. This instance handles closing - * of channels. Users should call {@link #queueChannelClose(NioChannel)} to schedule a channel for close by - * this selector. - *

- * Children of this class should implement the specific {@link #processKey(SelectionKey)}, - * {@link #preSelect()}, and {@link #cleanup()} functionality. - */ -public abstract class ESSelector implements Closeable { - - final Selector selector; - final ConcurrentLinkedQueue> channelsToClose = new ConcurrentLinkedQueue<>(); - - private final EventHandler eventHandler; - private final ReentrantLock runLock = new ReentrantLock(); - private final CountDownLatch exitedLoop = new CountDownLatch(1); - private final AtomicBoolean isClosed = new AtomicBoolean(false); - private final CompletableFuture isRunningFuture = new CompletableFuture<>(); - private volatile Thread thread; - - ESSelector(EventHandler eventHandler) throws IOException { - this(eventHandler, Selector.open()); - } - - ESSelector(EventHandler eventHandler, Selector selector) { - this.eventHandler = eventHandler; - this.selector = selector; - } - - /** - * Starts this selector. The selector will run until {@link #close()} is called. - */ - public void runLoop() { - if (runLock.tryLock()) { - isRunningFuture.complete(null); - try { - setThread(); - while (isOpen()) { - singleLoop(); - } - } finally { - try { - cleanupAndCloseChannels(); - } finally { - try { - selector.close(); - } catch (IOException e) { - eventHandler.selectorException(e); - } finally { - runLock.unlock(); - exitedLoop.countDown(); - } - } - } - } else { - throw new IllegalStateException("selector is already running"); - } - } - - void singleLoop() { - try { - closePendingChannels(); - preSelect(); - - int ready = selector.select(300); - if (ready > 0) { - Set selectionKeys = selector.selectedKeys(); - Iterator keyIterator = selectionKeys.iterator(); - while (keyIterator.hasNext()) { - SelectionKey sk = keyIterator.next(); - keyIterator.remove(); - if (sk.isValid()) { - try { - processKey(sk); - } catch (CancelledKeyException cke) { - eventHandler.genericChannelException((ChannelContext) sk.attachment(), cke); - } - } else { - eventHandler.genericChannelException((ChannelContext) sk.attachment(), new CancelledKeyException()); - } - } - } - } catch (ClosedSelectorException e) { - if (isOpen()) { - throw e; - } - } catch (IOException e) { - eventHandler.selectorException(e); - } catch (Exception e) { - eventHandler.uncaughtException(e); - } - } - - void cleanupAndCloseChannels() { - cleanup(); - channelsToClose.addAll(selector.keys().stream().map(sk -> (ChannelContext) sk.attachment()).collect(Collectors.toList())); - closePendingChannels(); - } - - /** - * Called by the base {@link ESSelector} class when there is a {@link SelectionKey} to be handled. - * - * @param selectionKey the key to be handled - * @throws CancelledKeyException thrown when the key has already been cancelled - */ - abstract void processKey(SelectionKey selectionKey) throws CancelledKeyException; - - /** - * Called immediately prior to a raw {@link Selector#select()} call. Should be used to implement - * channel registration, handling queued writes, and other work that is not specifically processing - * a selection key. - */ - abstract void preSelect(); - - /** - * Called once as the selector is being closed. - */ - abstract void cleanup(); - - void setThread() { - thread = Thread.currentThread(); - } - - public boolean isOnCurrentThread() { - return Thread.currentThread() == thread; - } - - public void assertOnSelectorThread() { - assert isOnCurrentThread() : "Must be on selector thread to perform this operation. Currently on thread [" - + Thread.currentThread().getName() + "]."; - } - - void wakeup() { - // TODO: Do we need the wakeup optimizations that some other libraries use? - selector.wakeup(); - } - - @Override - public void close() throws IOException { - if (isClosed.compareAndSet(false, true)) { - wakeup(); - if (isRunning()) { - try { - exitedLoop.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException("Thread was interrupted while waiting for selector to close", e); - } - } else if (selector.isOpen()) { - selector.close(); - } - } - } - - public void queueChannelClose(NioChannel channel) { - ChannelContext context = channel.getContext(); - assert context.getSelector() == this : "Must schedule a channel for closure with its selector"; - channelsToClose.offer(context); - ensureSelectorOpenForEnqueuing(channelsToClose, context); - wakeup(); - } - - public Selector rawSelector() { - return selector; - } - - public boolean isOpen() { - return isClosed.get() == false; - } - - public boolean isRunning() { - return runLock.isLocked(); - } - - public Future isRunningFuture() { - return isRunningFuture; - } - - /** - * This is a convenience method to be called after some object (normally channels) are enqueued with this - * selector. This method will check if the selector is still open. If it is open, normal operation can - * proceed. - * - * If the selector is closed, then we attempt to remove the object from the queue. If the removal - * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If - * the object cannot be removed from the queue, then the object has already been handled by the selector - * and operation can proceed normally. - * - * If this method is called from the selector thread, we will not throw an exception as the selector - * thread can manipulate its queues internally even if it is no longer open. - * - * @param queue the queue to which the object was added - * @param objectAdded the objected added - * @param the object type - */ - void ensureSelectorOpenForEnqueuing(ConcurrentLinkedQueue queue, O objectAdded) { - if (isOpen() == false && isOnCurrentThread() == false) { - if (queue.remove(objectAdded)) { - throw new IllegalStateException("selector is already closed"); - } - } - } - - private void closePendingChannels() { - ChannelContext channelContext; - while ((channelContext = channelsToClose.poll()) != null) { - eventHandler.handleClose(channelContext); - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java deleted file mode 100644 index cb4d43af4fd..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/EventHandler.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.channels.Selector; -import java.util.function.Consumer; - -public abstract class EventHandler { - - protected final Consumer exceptionHandler; - - protected EventHandler(Consumer exceptionHandler) { - this.exceptionHandler = exceptionHandler; - } - - /** - * This method handles an IOException that was thrown during a call to {@link Selector#select(long)} or - * {@link Selector#close()}. - * - * @param exception the exception - */ - protected void selectorException(IOException exception) { - exceptionHandler.accept(exception); - } - - /** - * This method handles an exception that was uncaught during a select loop. - * - * @param exception that was uncaught - */ - protected void uncaughtException(Exception exception) { - Thread thread = Thread.currentThread(); - thread.getUncaughtExceptionHandler().uncaughtException(thread, exception); - } - - /** - * This method handles the closing of an NioChannel - * - * @param context that should be closed - */ - protected void handleClose(ChannelContext context) { - try { - context.closeFromSelector(); - } catch (IOException e) { - closeException(context, e); - } - assert context.isOpen() == false : "Should always be done as we are on the selector thread"; - } - - /** - * This method is called when an attempt to close a channel throws an exception. - * - * @param channel that was being closed - * @param exception that occurred - */ - protected void closeException(ChannelContext channel, Exception exception) { - channel.handleException(exception); - } - - /** - * This method is called when handling an event from a channel fails due to an unexpected exception. - * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw - * {@link java.nio.channels.CancelledKeyException}. - * - * @param channel that caused the exception - * @param exception that was thrown - */ - protected void genericChannelException(ChannelContext channel, Exception exception) { - channel.handleException(exception); - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java deleted file mode 100644 index 3f2fd44259c..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioGroup.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.nio.utils.ExceptionsHelper; - -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -/** - * The NioGroup is a group of selectors for interfacing with java nio. When it is started it will create the - * configured number of socket and acceptor selectors. Each selector will be running in a dedicated thread. - * Server connections can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} - * method. Client connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} - * method. - *

- * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method - * when the channel is created. This is what allows an NioGroup to support different channel types. - */ -public class NioGroup implements AutoCloseable { - - - private final ArrayList acceptors; - private final RoundRobinSupplier acceptorSupplier; - - private final ArrayList socketSelectors; - private final RoundRobinSupplier socketSelectorSupplier; - - private final AtomicBoolean isOpen = new AtomicBoolean(true); - - public NioGroup(ThreadFactory acceptorThreadFactory, int acceptorCount, - Function, AcceptorEventHandler> acceptorEventHandlerFunction, - ThreadFactory socketSelectorThreadFactory, int socketSelectorCount, - Supplier socketEventHandlerFunction) throws IOException { - acceptors = new ArrayList<>(acceptorCount); - socketSelectors = new ArrayList<>(socketSelectorCount); - - try { - for (int i = 0; i < socketSelectorCount; ++i) { - SocketSelector selector = new SocketSelector(socketEventHandlerFunction.get()); - socketSelectors.add(selector); - } - startSelectors(socketSelectors, socketSelectorThreadFactory); - - for (int i = 0; i < acceptorCount; ++i) { - SocketSelector[] childSelectors = this.socketSelectors.toArray(new SocketSelector[this.socketSelectors.size()]); - Supplier selectorSupplier = new RoundRobinSupplier<>(childSelectors); - AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(selectorSupplier)); - acceptors.add(acceptor); - } - startSelectors(acceptors, acceptorThreadFactory); - } catch (Exception e) { - try { - close(); - } catch (Exception e1) { - e.addSuppressed(e1); - } - throw e; - } - - socketSelectorSupplier = new RoundRobinSupplier<>(socketSelectors.toArray(new SocketSelector[socketSelectors.size()])); - acceptorSupplier = new RoundRobinSupplier<>(acceptors.toArray(new AcceptingSelector[acceptors.size()])); - } - - public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) - throws IOException { - ensureOpen(); - if (acceptors.isEmpty()) { - throw new IllegalArgumentException("There are no acceptors configured. Without acceptors, server channels are not supported."); - } - return factory.openNioServerSocketChannel(address, acceptorSupplier); - } - - public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { - ensureOpen(); - return factory.openNioChannel(address, socketSelectorSupplier); - } - - @Override - public void close() throws IOException { - if (isOpen.compareAndSet(true, false)) { - List toClose = Stream.concat(acceptors.stream(), socketSelectors.stream()).collect(Collectors.toList()); - List closingExceptions = new ArrayList<>(); - for (ESSelector selector : toClose) { - try { - selector.close(); - } catch (IOException e) { - closingExceptions.add(e); - } - } - ExceptionsHelper.rethrowAndSuppress(closingExceptions); - } - } - - private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { - for (ESSelector acceptor : selectors) { - if (acceptor.isRunning() == false) { - threadFactory.newThread(acceptor::runLoop).start(); - try { - acceptor.isRunningFuture().get(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new IllegalStateException("Interrupted while waiting for selector to start.", e); - } catch (ExecutionException e) { - if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else { - throw new RuntimeException("Exception during selector start.", e); - } - } - } - } - } - - private void ensureOpen() { - if (isOpen.get() == false) { - throw new IllegalStateException("NioGroup is closed."); - } - } -} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java deleted file mode 100644 index 30ef7b317a3..00000000000 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketSelector.java +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import java.io.IOException; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.ClosedSelectorException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.function.BiConsumer; - -/** - * Selector implementation that handles {@link NioSocketChannel}. It's main piece of functionality is - * handling connect, read, and write events. - */ -public class SocketSelector extends ESSelector { - - private final ConcurrentLinkedQueue newChannels = new ConcurrentLinkedQueue<>(); - private final ConcurrentLinkedQueue queuedWrites = new ConcurrentLinkedQueue<>(); - private final SocketEventHandler eventHandler; - - public SocketSelector(SocketEventHandler eventHandler) throws IOException { - super(eventHandler); - this.eventHandler = eventHandler; - } - - public SocketSelector(SocketEventHandler eventHandler, Selector selector) throws IOException { - super(eventHandler, selector); - this.eventHandler = eventHandler; - } - - @Override - void processKey(SelectionKey selectionKey) { - SocketChannelContext channelContext = (SocketChannelContext) selectionKey.attachment(); - int ops = selectionKey.readyOps(); - if ((ops & SelectionKey.OP_CONNECT) != 0) { - attemptConnect(channelContext, true); - } - - if (channelContext.isConnectComplete()) { - if ((ops & SelectionKey.OP_WRITE) != 0) { - handleWrite(channelContext); - } - - if ((ops & SelectionKey.OP_READ) != 0) { - handleRead(channelContext); - } - } - - eventHandler.postHandling(channelContext); - } - - @Override - void preSelect() { - setUpNewChannels(); - handleQueuedWrites(); - } - - @Override - void cleanup() { - WriteOperation op; - while ((op = queuedWrites.poll()) != null) { - executeFailedListener(op.getListener(), new ClosedSelectorException()); - } - channelsToClose.addAll(newChannels); - } - - /** - * Schedules a NioSocketChannel to be registered by this selector. The channel will by queued and eventually - * registered next time through the event loop. - * @param nioSocketChannel the channel to register - */ - public void scheduleForRegistration(NioSocketChannel nioSocketChannel) { - SocketChannelContext channelContext = nioSocketChannel.getContext(); - newChannels.offer(channelContext); - ensureSelectorOpenForEnqueuing(newChannels, channelContext); - wakeup(); - } - - - /** - * Queues a write operation to be handled by the event loop. This can be called by any thread and is the - * api available for non-selector threads to schedule writes. - * - * @param writeOperation to be queued - */ - public void queueWrite(WriteOperation writeOperation) { - queuedWrites.offer(writeOperation); - if (isOpen() == false) { - boolean wasRemoved = queuedWrites.remove(writeOperation); - if (wasRemoved) { - writeOperation.getListener().accept(null, new ClosedSelectorException()); - } - } else { - wakeup(); - } - } - - /** - * Queues a write operation directly in a channel's buffer. Channel buffers are only safe to be accessed - * by the selector thread. As a result, this method should only be called by the selector thread. - * - * @param writeOperation to be queued in a channel's buffer - */ - public void queueWriteInChannelBuffer(WriteOperation writeOperation) { - assertOnSelectorThread(); - SocketChannelContext context = writeOperation.getChannel(); - try { - SelectionKeyUtils.setWriteInterested(context.getSelectionKey()); - context.queueWriteOperation(writeOperation); - } catch (Exception e) { - executeFailedListener(writeOperation.getListener(), e); - } - } - - /** - * Executes a success listener with consistent exception handling. This can only be called from current - * selector thread. - * - * @param listener to be executed - * @param value to provide to listener - */ - public void executeListener(BiConsumer listener, V value) { - assertOnSelectorThread(); - try { - listener.accept(value, null); - } catch (Exception e) { - eventHandler.listenerException(e); - } - } - - /** - * Executes a failed listener with consistent exception handling. This can only be called from current - * selector thread. - * - * @param listener to be executed - * @param exception to provide to listener - */ - public void executeFailedListener(BiConsumer listener, Exception exception) { - assertOnSelectorThread(); - try { - listener.accept(null, exception); - } catch (Exception e) { - eventHandler.listenerException(e); - } - } - - private void handleWrite(SocketChannelContext context) { - try { - eventHandler.handleWrite(context); - } catch (Exception e) { - eventHandler.writeException(context, e); - } - } - - private void handleRead(SocketChannelContext context) { - try { - eventHandler.handleRead(context); - } catch (Exception e) { - eventHandler.readException(context, e); - } - } - - private void handleQueuedWrites() { - WriteOperation writeOperation; - while ((writeOperation = queuedWrites.poll()) != null) { - if (writeOperation.getChannel().isOpen()) { - queueWriteInChannelBuffer(writeOperation); - } else { - executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); - } - } - } - - private void setUpNewChannels() { - SocketChannelContext channelContext; - while ((channelContext = this.newChannels.poll()) != null) { - setupChannel(channelContext); - } - } - - private void setupChannel(SocketChannelContext context) { - assert context.getSelector() == this : "The channel must be registered with the selector with which it was created"; - try { - if (context.isOpen()) { - eventHandler.handleRegistration(context); - attemptConnect(context, false); - } else { - eventHandler.registrationException(context, new ClosedChannelException()); - } - } catch (Exception e) { - eventHandler.registrationException(context, e); - } - } - - private void attemptConnect(SocketChannelContext context, boolean connectEvent) { - try { - eventHandler.handleConnect(context); - if (connectEvent && context.isConnectComplete() == false) { - eventHandler.connectException(context, new IOException("Received OP_CONNECT but connect failed")); - } - } catch (Exception e) { - eventHandler.connectException(context, e); - } - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptingSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptingSelectorTests.java deleted file mode 100644 index 7536ad9d1e1..00000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptingSelectorTests.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; -import java.security.PrivilegedActionException; -import java.util.Collections; -import java.util.HashSet; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class AcceptingSelectorTests extends ESTestCase { - - private AcceptingSelector selector; - private NioServerSocketChannel serverChannel; - private AcceptorEventHandler eventHandler; - private TestSelectionKey selectionKey; - private Selector rawSelector; - private ServerChannelContext context; - - @Before - public void setUp() throws Exception { - super.setUp(); - - eventHandler = mock(AcceptorEventHandler.class); - serverChannel = mock(NioServerSocketChannel.class); - - rawSelector = mock(Selector.class); - selector = new AcceptingSelector(eventHandler, rawSelector); - this.selector.setThread(); - - context = mock(ServerChannelContext.class); - selectionKey = new TestSelectionKey(0); - selectionKey.attach(context); - when(context.getSelectionKey()).thenReturn(selectionKey); - when(context.getSelector()).thenReturn(selector); - when(context.isOpen()).thenReturn(true); - when(serverChannel.getContext()).thenReturn(context); - } - - public void testRegisteredChannel() throws IOException { - selector.scheduleForRegistration(serverChannel); - - selector.preSelect(); - - verify(eventHandler).handleRegistration(context); - } - - public void testClosedChannelWillNotBeRegistered() { - when(context.isOpen()).thenReturn(false); - selector.scheduleForRegistration(serverChannel); - - selector.preSelect(); - - verify(eventHandler).registrationException(same(context), any(ClosedChannelException.class)); - } - - public void testRegisterChannelFailsDueToException() throws Exception { - selector.scheduleForRegistration(serverChannel); - - ClosedChannelException closedChannelException = new ClosedChannelException(); - doThrow(closedChannelException).when(eventHandler).handleRegistration(context); - - selector.preSelect(); - - verify(eventHandler).registrationException(context, closedChannelException); - } - - public void testAcceptEvent() throws IOException { - selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); - - selector.processKey(selectionKey); - - verify(eventHandler).acceptChannel(context); - } - - public void testAcceptException() throws IOException { - selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); - IOException ioException = new IOException(); - - doThrow(ioException).when(eventHandler).acceptChannel(context); - - selector.processKey(selectionKey); - - verify(eventHandler).acceptException(context, ioException); - } - - public void testCleanup() throws IOException { - selector.scheduleForRegistration(serverChannel); - - selector.preSelect(); - - TestSelectionKey key = new TestSelectionKey(0); - key.attach(context); - when(rawSelector.keys()).thenReturn(new HashSet<>(Collections.singletonList(key))); - - selector.cleanupAndCloseChannels(); - - verify(eventHandler).handleClose(context); - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java deleted file mode 100644 index a162a8e234c..00000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/AcceptorEventHandlerTests.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.channels.SelectionKey; -import java.nio.channels.ServerSocketChannel; -import java.nio.channels.SocketChannel; -import java.util.ArrayList; -import java.util.function.Consumer; - -import static org.mockito.Matchers.same; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class AcceptorEventHandlerTests extends ESTestCase { - - private AcceptorEventHandler handler; - private ChannelFactory channelFactory; - private NioServerSocketChannel channel; - private DoNotRegisterContext context; - private RoundRobinSupplier selectorSupplier; - - @Before - @SuppressWarnings("unchecked") - public void setUpHandler() throws IOException { - channelFactory = mock(ChannelFactory.class); - ArrayList selectors = new ArrayList<>(); - selectors.add(mock(SocketSelector.class)); - selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()])); - handler = new AcceptorEventHandler(selectorSupplier, mock(Consumer.class)); - - channel = new NioServerSocketChannel(mock(ServerSocketChannel.class)); - context = new DoNotRegisterContext(channel, mock(AcceptingSelector.class), mock(Consumer.class)); - channel.setContext(context); - } - - public void testHandleRegisterSetsOP_ACCEPTInterest() throws IOException { - assertNull(context.getSelectionKey()); - - handler.handleRegistration(context); - - assertEquals(SelectionKey.OP_ACCEPT, channel.getContext().getSelectionKey().interestOps()); - } - - public void testRegisterAddsAttachment() throws IOException { - assertNull(context.getSelectionKey()); - - handler.handleRegistration(context); - - assertEquals(context, context.getSelectionKey().attachment()); - } - - public void testHandleAcceptCallsChannelFactory() throws IOException { - NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class)); - NioSocketChannel nullChannel = null; - when(channelFactory.acceptNioChannel(same(context), same(selectorSupplier))).thenReturn(childChannel, nullChannel); - - handler.acceptChannel(context); - - verify(channelFactory, times(2)).acceptNioChannel(same(context), same(selectorSupplier)); - } - - @SuppressWarnings("unchecked") - public void testHandleAcceptCallsServerAcceptCallback() throws IOException { - NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class)); - SocketChannelContext childContext = mock(SocketChannelContext.class); - childChannel.setContext(childContext); - ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); - channel = new NioServerSocketChannel(mock(ServerSocketChannel.class)); - channel.setContext(serverChannelContext); - when(serverChannelContext.getChannel()).thenReturn(channel); - when(channelFactory.acceptNioChannel(same(context), same(selectorSupplier))).thenReturn(childChannel); - - handler.acceptChannel(serverChannelContext); - - verify(serverChannelContext).acceptChannels(selectorSupplier); - } - - public void testAcceptExceptionCallsExceptionHandler() throws IOException { - ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); - IOException exception = new IOException(); - handler.acceptException(serverChannelContext, exception); - - verify(serverChannelContext).handleException(exception); - } - - private class DoNotRegisterContext extends ServerChannelContext { - - - @SuppressWarnings("unchecked") - DoNotRegisterContext(NioServerSocketChannel channel, AcceptingSelector selector, Consumer acceptor) { - super(channel, channelFactory, selector, acceptor, mock(Consumer.class)); - } - - @Override - public void register() { - setSelectionKey(new TestSelectionKey(0)); - } - } -} diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java deleted file mode 100644 index 05b84345f45..00000000000 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ESSelectorTests.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.nio; - -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.io.IOException; -import java.nio.channels.CancelledKeyException; -import java.nio.channels.ClosedSelectorException; -import java.nio.channels.SelectionKey; -import java.nio.channels.Selector; - -import static org.mockito.Matchers.anyInt; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class ESSelectorTests extends ESTestCase { - - private ESSelector selector; - private EventHandler handler; - private Selector rawSelector; - - @Before - public void setUp() throws Exception { - super.setUp(); - handler = mock(EventHandler.class); - rawSelector = mock(Selector.class); - selector = new TestSelector(handler, rawSelector); - } - - @SuppressWarnings({"unchecked", "rawtypes"}) - public void testQueueChannelForClosed() throws IOException { - NioChannel channel = mock(NioChannel.class); - ChannelContext context = mock(ChannelContext.class); - when(channel.getContext()).thenReturn(context); - when(context.getSelector()).thenReturn(selector); - - selector.queueChannelClose(channel); - - selector.singleLoop(); - - verify(handler).handleClose(context); - } - - public void testSelectorClosedExceptionIsNotCaughtWhileRunning() throws IOException { - boolean closedSelectorExceptionCaught = false; - when(rawSelector.select(anyInt())).thenThrow(new ClosedSelectorException()); - try { - this.selector.singleLoop(); - } catch (ClosedSelectorException e) { - closedSelectorExceptionCaught = true; - } - - assertTrue(closedSelectorExceptionCaught); - } - - public void testIOExceptionWhileSelect() throws IOException { - IOException ioException = new IOException(); - - when(rawSelector.select(anyInt())).thenThrow(ioException); - - this.selector.singleLoop(); - - verify(handler).selectorException(ioException); - } - - public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException { - when(rawSelector.isOpen()).thenReturn(true); - selector.close(); - verify(rawSelector).close(); - } - - private static class TestSelector extends ESSelector { - - TestSelector(EventHandler eventHandler, Selector selector) throws IOException { - super(eventHandler, selector); - } - - @Override - void processKey(SelectionKey selectionKey) throws CancelledKeyException { - - } - - @Override - void preSelect() { - - } - - @Override - void cleanup() { - - } - } - -} diff --git a/libs/elasticsearch-nio/build.gradle b/libs/nio/build.gradle similarity index 89% rename from libs/elasticsearch-nio/build.gradle rename to libs/nio/build.gradle index f8b0b8fba13..43c9a133a3f 100644 --- a/libs/elasticsearch-nio/build.gradle +++ b/libs/nio/build.gradle @@ -39,7 +39,7 @@ dependencies { testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" - if (isEclipse == false || project.path == ":libs:elasticsearch-nio-tests") { + if (isEclipse == false || project.path == ":libs:nio-tests") { testCompile("org.elasticsearch.test:framework:${version}") { exclude group: 'org.elasticsearch', module: 'elasticsearch-nio' } @@ -49,7 +49,7 @@ dependencies { if (isEclipse) { // in eclipse the project is under a fake root, we need to change around the source sets sourceSets { - if (project.path == ":libs:elasticsearch-nio") { + if (project.path == ":libs:nio") { main.java.srcDirs = ['java'] main.resources.srcDirs = ['resources'] } else { @@ -60,7 +60,7 @@ if (isEclipse) { } forbiddenApisMain { - // elasticsearch-nio does not depend on core, so only jdk signatures should be checked + // nio does not depend on core, so only jdk signatures should be checked // es-all is not checked as we connect and accept sockets signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } diff --git a/libs/elasticsearch-nio/src/main/eclipse-build.gradle b/libs/nio/src/main/eclipse-build.gradle similarity index 69% rename from libs/elasticsearch-nio/src/main/eclipse-build.gradle rename to libs/nio/src/main/eclipse-build.gradle index 5aa089de19e..ae3ab2b13a1 100644 --- a/libs/elasticsearch-nio/src/main/eclipse-build.gradle +++ b/libs/nio/src/main/eclipse-build.gradle @@ -1,3 +1,3 @@ -// this is just shell gradle file for eclipse to have separate projects for elasticsearch-nio src and tests +// this is just shell gradle file for eclipse to have separate projects for nio src and tests apply from: '../../build.gradle' diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java similarity index 96% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java rename to libs/nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java index ef1e188a22e..a82d381951b 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -24,7 +24,7 @@ import java.util.function.Consumer; public class BytesChannelContext extends SocketChannelContext { - public BytesChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + public BytesChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, ReadWriteHandler handler, InboundChannelBuffer channelBuffer) { super(channel, selector, exceptionHandler, handler, channelBuffer); } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java rename to libs/nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java similarity index 98% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java rename to libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java index 93930bbabf0..e3702c2880a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/ChannelContext.java @@ -105,7 +105,7 @@ public abstract class ChannelContext supplier) throws IOException { + public Socket openNioChannel(InetSocketAddress remoteAddress, Supplier supplier) throws IOException { SocketChannel rawChannel = rawChannelFactory.openNioChannel(remoteAddress); - SocketSelector selector = supplier.get(); + NioSelector selector = supplier.get(); Socket channel = internalCreateChannel(selector, rawChannel); scheduleChannel(channel, selector); return channel; } - public Socket acceptNioChannel(ServerChannelContext serverContext, Supplier supplier) throws IOException { + public Socket acceptNioChannel(ServerChannelContext serverContext, Supplier supplier) throws IOException { SocketChannel rawChannel = rawChannelFactory.acceptNioChannel(serverContext); // Null is returned if there are no pending sockets to accept if (rawChannel == null) { return null; } else { - SocketSelector selector = supplier.get(); + NioSelector selector = supplier.get(); Socket channel = internalCreateChannel(selector, rawChannel); scheduleChannel(channel, selector); return channel; } } - public ServerSocket openNioServerSocketChannel(InetSocketAddress address, Supplier supplier) throws IOException { + public ServerSocket openNioServerSocketChannel(InetSocketAddress address, Supplier supplier) throws IOException { ServerSocketChannel rawChannel = rawChannelFactory.openNioServerSocketChannel(address); - AcceptingSelector selector = supplier.get(); + NioSelector selector = supplier.get(); ServerSocket serverChannel = internalCreateServerChannel(selector, rawChannel); scheduleServerChannel(serverChannel, selector); return serverChannel; @@ -81,7 +81,7 @@ public abstract class ChannelFactory exceptionHandler) { - super(exceptionHandler); + protected final Consumer exceptionHandler; + private final Supplier selectorSupplier; + + public EventHandler(Consumer exceptionHandler, Supplier selectorSupplier) { + this.exceptionHandler = exceptionHandler; + this.selectorSupplier = selectorSupplier; } /** - * This method is called when a NioSocketChannel is successfully registered. It should only be called - * once per channel. + * This method is called when a server channel signals it is ready to accept a connection. All of the + * accept logic should occur in this call. + * + * @param context that can accept a connection + */ + protected void acceptChannel(ServerChannelContext context) throws IOException { + context.acceptChannels(selectorSupplier); + } + + /** + * This method is called when an attempt to accept a connection throws an exception. + * + * @param context that accepting a connection + * @param exception that occurred + */ + protected void acceptException(ServerChannelContext context, Exception exception) { + context.handleException(exception); + } + + /** + * This method is called when a NioChannel is being registered with the selector. It should + * only be called once per channel. * * @param context that was registered */ - protected void handleRegistration(SocketChannelContext context) throws IOException { + protected void handleRegistration(ChannelContext context) throws IOException { context.register(); SelectionKey selectionKey = context.getSelectionKey(); selectionKey.attach(context); - if (context.readyForFlush()) { - SelectionKeyUtils.setConnectReadAndWriteInterested(selectionKey); + if (context instanceof SocketChannelContext) { + if (((SocketChannelContext) context).readyForFlush()) { + SelectionKeyUtils.setConnectReadAndWriteInterested(context.getSelectionKey()); + } else { + SelectionKeyUtils.setConnectAndReadInterested(context.getSelectionKey()); + } } else { - SelectionKeyUtils.setConnectAndReadInterested(selectionKey); + assert context instanceof ServerChannelContext : "If not SocketChannelContext the context must be a ServerChannelContext"; + SelectionKeyUtils.setAcceptInterested(context.getSelectionKey()); } } @@ -55,7 +83,7 @@ public class SocketEventHandler extends EventHandler { * @param context that was registered * @param exception that occurred */ - protected void registrationException(SocketChannelContext context, Exception exception) { + protected void registrationException(ChannelContext context, Exception exception) { context.handleException(exception); } @@ -131,6 +159,9 @@ public class SocketEventHandler extends EventHandler { } /** + * This method is called after ready events (READ, ACCEPT, WRITE, CONNECT) have been handled for a + * channel. + * * @param context that was handled */ protected void postHandling(SocketChannelContext context) { @@ -147,4 +178,60 @@ public class SocketEventHandler extends EventHandler { } } } + + /** + * This method handles an IOException that was thrown during a call to {@link Selector#select(long)} or + * {@link Selector#close()}. + * + * @param exception the exception + */ + protected void selectorException(IOException exception) { + exceptionHandler.accept(exception); + } + + /** + * This method handles an exception that was uncaught during a select loop. + * + * @param exception that was uncaught + */ + protected void uncaughtException(Exception exception) { + Thread thread = Thread.currentThread(); + thread.getUncaughtExceptionHandler().uncaughtException(thread, exception); + } + + /** + * This method handles the closing of an NioChannel + * + * @param context that should be closed + */ + protected void handleClose(ChannelContext context) { + try { + context.closeFromSelector(); + } catch (IOException e) { + closeException(context, e); + } + assert context.isOpen() == false : "Should always be done as we are on the selector thread"; + } + + /** + * This method is called when an attempt to close a channel throws an exception. + * + * @param channel that was being closed + * @param exception that occurred + */ + protected void closeException(ChannelContext channel, Exception exception) { + channel.handleException(exception); + } + + /** + * This method is called when handling an event from a channel fails due to an unexpected exception. + * An example would be if checking ready ops on a {@link java.nio.channels.SelectionKey} threw + * {@link java.nio.channels.CancelledKeyException}. + * + * @param channel that caused the exception + * @param exception that was thrown + */ + protected void genericChannelException(ChannelContext channel, Exception exception) { + channel.handleException(exception); + } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java b/libs/nio/src/main/java/org/elasticsearch/nio/FlushOperation.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java rename to libs/nio/src/main/java/org/elasticsearch/nio/FlushOperation.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java b/libs/nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java rename to libs/nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java b/libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java rename to libs/nio/src/main/java/org/elasticsearch/nio/InboundChannelBuffer.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioChannel.java similarity index 95% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java rename to libs/nio/src/main/java/org/elasticsearch/nio/NioChannel.java index ea633bd3276..8262d9c87e3 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioChannel.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioChannel.java @@ -22,10 +22,11 @@ package org.elasticsearch.nio; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.channels.NetworkChannel; +import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; /** - * This is a basic channel abstraction used by the {@link ESSelector}. + * This is a basic channel abstraction used by the {@link NioSelector}. *

* A channel is open once it is constructed. The channel remains open and {@link #isOpen()} will return * true until the channel is explicitly closed. diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java new file mode 100644 index 00000000000..fe1bc1cf404 --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioGroup.java @@ -0,0 +1,179 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import org.elasticsearch.nio.utils.ExceptionsHelper; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * The NioGroup is a group of selectors for interfacing with java nio. When it is started it will create the + * configured number of selectors. Each selector will be running in a dedicated thread. Server connections + * can be bound using the {@link #bindServerChannel(InetSocketAddress, ChannelFactory)} method. Client + * connections can be opened using the {@link #openChannel(InetSocketAddress, ChannelFactory)} method. + *

+ * The logic specific to a particular channel is provided by the {@link ChannelFactory} passed to the method + * when the channel is created. This is what allows an NioGroup to support different channel types. + */ +public class NioGroup implements AutoCloseable { + + + private final List dedicatedAcceptors; + private final RoundRobinSupplier acceptorSupplier; + + private final List selectors; + private final RoundRobinSupplier selectorSupplier; + + private final AtomicBoolean isOpen = new AtomicBoolean(true); + + /** + * This will create an NioGroup with no dedicated acceptors. All server channels will be handled by the + * same selectors that are handling child channels. + * + * @param threadFactory factory to create selector threads + * @param selectorCount the number of selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioGroup(ThreadFactory threadFactory, int selectorCount, Function, EventHandler> eventHandlerFunction) + throws IOException { + this(null, 0, threadFactory, selectorCount, eventHandlerFunction); + } + + /** + * This will create an NioGroup with dedicated acceptors. All server channels will be handled by a group + * of selectors dedicated to accepting channels. These accepted channels will be handed off the + * non-server selectors. + * + * @param acceptorThreadFactory factory to create acceptor selector threads + * @param dedicatedAcceptorCount the number of dedicated acceptor selectors to be created + * @param selectorThreadFactory factory to create non-acceptor selector threads + * @param selectorCount the number of non-acceptor selectors to be created + * @param eventHandlerFunction function for creating event handlers + * @throws IOException occurs if there is a problem while opening a java.nio.Selector + */ + public NioGroup(ThreadFactory acceptorThreadFactory, int dedicatedAcceptorCount, ThreadFactory selectorThreadFactory, int selectorCount, + Function, EventHandler> eventHandlerFunction) throws IOException { + dedicatedAcceptors = new ArrayList<>(dedicatedAcceptorCount); + selectors = new ArrayList<>(selectorCount); + + try { + List> suppliersToSet = new ArrayList<>(selectorCount); + for (int i = 0; i < selectorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(); + suppliersToSet.add(supplier); + NioSelector selector = new NioSelector(eventHandlerFunction.apply(supplier)); + selectors.add(selector); + } + for (RoundRobinSupplier supplierToSet : suppliersToSet) { + supplierToSet.setSelectors(selectors.toArray(new NioSelector[0])); + assert supplierToSet.count() == selectors.size() : "Supplier should have same count as selector list."; + } + + for (int i = 0; i < dedicatedAcceptorCount; ++i) { + RoundRobinSupplier supplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + NioSelector acceptor = new NioSelector(eventHandlerFunction.apply(supplier)); + dedicatedAcceptors.add(acceptor); + } + + if (dedicatedAcceptorCount != 0) { + acceptorSupplier = new RoundRobinSupplier<>(dedicatedAcceptors.toArray(new NioSelector[0])); + } else { + acceptorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + } + selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + assert selectorCount == selectors.size() : "We need to have created all the selectors at this point."; + assert dedicatedAcceptorCount == dedicatedAcceptors.size() : "We need to have created all the acceptors at this point."; + + startSelectors(selectors, selectorThreadFactory); + startSelectors(dedicatedAcceptors, acceptorThreadFactory); + } catch (Exception e) { + try { + close(); + } catch (Exception e1) { + e.addSuppressed(e1); + } + throw e; + } + } + + public S bindServerChannel(InetSocketAddress address, ChannelFactory factory) + throws IOException { + ensureOpen(); + return factory.openNioServerSocketChannel(address, acceptorSupplier); + } + + public S openChannel(InetSocketAddress address, ChannelFactory factory) throws IOException { + ensureOpen(); + return factory.openNioChannel(address, selectorSupplier); + } + + @Override + public void close() throws IOException { + if (isOpen.compareAndSet(true, false)) { + List toClose = Stream.concat(dedicatedAcceptors.stream(), selectors.stream()).collect(Collectors.toList()); + List closingExceptions = new ArrayList<>(); + for (NioSelector selector : toClose) { + try { + selector.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + } + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + + private static void startSelectors(Iterable selectors, ThreadFactory threadFactory) { + for (NioSelector selector : selectors) { + if (selector.isRunning() == false) { + threadFactory.newThread(selector::runLoop).start(); + try { + selector.isRunningFuture().get(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Interrupted while waiting for selector to start.", e); + } catch (ExecutionException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new RuntimeException("Exception during selector start.", e); + } + } + } + } + } + + private void ensureOpen() { + if (isOpen.get() == false) { + throw new IllegalStateException("NioGroup is closed."); + } + } +} diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java new file mode 100644 index 00000000000..ab6709bcc5b --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/NioSelector.java @@ -0,0 +1,428 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.Closeable; +import java.io.IOException; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ClosedSelectorException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; + +/** + * This is a nio selector implementation. This selector wraps a raw nio {@link Selector}. When you call + * {@link #runLoop()}, the selector will run until {@link #close()} is called. This instance handles closing + * of channels. Users should call {@link #queueChannelClose(NioChannel)} to schedule a channel for close by + * this selector. + *

+ * Children of this class should implement the specific {@link #processKey(SelectionKey)}, + * {@link #preSelect()}, and {@link #cleanup()} functionality. + */ +public class NioSelector implements Closeable { + + private final ConcurrentLinkedQueue queuedWrites = new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue> channelsToClose = new ConcurrentLinkedQueue<>(); + private final ConcurrentLinkedQueue> channelsToRegister = new ConcurrentLinkedQueue<>(); + private final EventHandler eventHandler; + private final Selector selector; + + private final ReentrantLock runLock = new ReentrantLock(); + private final CountDownLatch exitedLoop = new CountDownLatch(1); + private final AtomicBoolean isClosed = new AtomicBoolean(false); + private final CompletableFuture isRunningFuture = new CompletableFuture<>(); + private final AtomicReference thread = new AtomicReference<>(null); + + public NioSelector(EventHandler eventHandler) throws IOException { + this(eventHandler, Selector.open()); + } + + public NioSelector(EventHandler eventHandler, Selector selector) throws IOException { + this.selector = selector; + this.eventHandler = eventHandler; + } + + public Selector rawSelector() { + return selector; + } + + public boolean isOpen() { + return isClosed.get() == false; + } + + public boolean isRunning() { + return runLock.isLocked(); + } + + Future isRunningFuture() { + return isRunningFuture; + } + + void setThread() { + boolean wasSet = thread.compareAndSet(null, Thread.currentThread()); + assert wasSet : "Failed to set thread as it was already set. Should only set once."; + } + + public boolean isOnCurrentThread() { + return Thread.currentThread() == thread.get(); + } + + public void assertOnSelectorThread() { + assert isOnCurrentThread() : "Must be on selector thread [" + thread.get().getName() + "} to perform this operation. " + + "Currently on thread [" + Thread.currentThread().getName() + "]."; + } + + /** + * Starts this selector. The selector will run until {@link #close()} is called. + */ + public void runLoop() { + if (runLock.tryLock()) { + isRunningFuture.complete(null); + try { + setThread(); + while (isOpen()) { + singleLoop(); + } + } finally { + try { + cleanupAndCloseChannels(); + } finally { + try { + selector.close(); + } catch (IOException e) { + eventHandler.selectorException(e); + } finally { + runLock.unlock(); + exitedLoop.countDown(); + } + } + } + } else { + throw new IllegalStateException("selector is already running"); + } + } + + void singleLoop() { + try { + closePendingChannels(); + preSelect(); + + int ready = selector.select(300); + if (ready > 0) { + Set selectionKeys = selector.selectedKeys(); + Iterator keyIterator = selectionKeys.iterator(); + while (keyIterator.hasNext()) { + SelectionKey sk = keyIterator.next(); + keyIterator.remove(); + if (sk.isValid()) { + try { + processKey(sk); + } catch (CancelledKeyException cke) { + eventHandler.genericChannelException((ChannelContext) sk.attachment(), cke); + } + } else { + eventHandler.genericChannelException((ChannelContext) sk.attachment(), new CancelledKeyException()); + } + } + } + } catch (ClosedSelectorException e) { + if (isOpen()) { + throw e; + } + } catch (IOException e) { + eventHandler.selectorException(e); + } catch (Exception e) { + eventHandler.uncaughtException(e); + } + } + + void cleanupAndCloseChannels() { + cleanup(); + channelsToClose.addAll(channelsToRegister); + channelsToRegister.clear(); + channelsToClose.addAll(selector.keys().stream().map(sk -> (ChannelContext) sk.attachment()).collect(Collectors.toList())); + closePendingChannels(); + } + + @Override + public void close() throws IOException { + if (isClosed.compareAndSet(false, true)) { + wakeup(); + if (isRunning()) { + try { + exitedLoop.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException("Thread was interrupted while waiting for selector to close", e); + } + } else if (selector.isOpen()) { + selector.close(); + } + } + } + + void processKey(SelectionKey selectionKey) { + ChannelContext context = (ChannelContext) selectionKey.attachment(); + if (selectionKey.isAcceptable()) { + assert context instanceof ServerChannelContext : "Only server channels can receive accept events"; + ServerChannelContext serverChannelContext = (ServerChannelContext) context; + int ops = selectionKey.readyOps(); + if ((ops & SelectionKey.OP_ACCEPT) != 0) { + try { + eventHandler.acceptChannel(serverChannelContext); + } catch (IOException e) { + eventHandler.acceptException(serverChannelContext, e); + } + } + } else { + assert context instanceof SocketChannelContext : "Only sockets channels can receive non-accept events"; + SocketChannelContext channelContext = (SocketChannelContext) context; + int ops = selectionKey.readyOps(); + if ((ops & SelectionKey.OP_CONNECT) != 0) { + attemptConnect(channelContext, true); + } + + if (channelContext.isConnectComplete()) { + if ((ops & SelectionKey.OP_WRITE) != 0) { + handleWrite(channelContext); + } + + if ((ops & SelectionKey.OP_READ) != 0) { + handleRead(channelContext); + } + } + eventHandler.postHandling(channelContext); + } + + } + + /** + * Called immediately prior to a raw {@link Selector#select()} call. Should be used to implement + * channel registration, handling queued writes, and other work that is not specifically processing + * a selection key. + */ + void preSelect() { + setUpNewChannels(); + handleQueuedWrites(); + } + + /** + * Called once as the selector is being closed. + */ + void cleanup() { + WriteOperation op; + while ((op = queuedWrites.poll()) != null) { + executeFailedListener(op.getListener(), new ClosedSelectorException()); + } + } + + /** + * Queues a write operation to be handled by the event loop. This can be called by any thread and is the + * api available for non-selector threads to schedule writes. + * + * @param writeOperation to be queued + */ + public void queueWrite(WriteOperation writeOperation) { + queuedWrites.offer(writeOperation); + if (isOpen() == false) { + boolean wasRemoved = queuedWrites.remove(writeOperation); + if (wasRemoved) { + writeOperation.getListener().accept(null, new ClosedSelectorException()); + } + } else { + wakeup(); + } + } + + public void queueChannelClose(NioChannel channel) { + ChannelContext context = channel.getContext(); + assert context.getSelector() == this : "Must schedule a channel for closure with its selector"; + channelsToClose.offer(context); + ensureSelectorOpenForEnqueuing(channelsToClose, context); + wakeup(); + } + + /** + * Schedules a NioChannel to be registered with this selector. The channel will by queued and + * eventually registered next time through the event loop. + * + * @param channel to register + */ + public void scheduleForRegistration(NioChannel channel) { + ChannelContext context = channel.getContext(); + channelsToRegister.add(context); + ensureSelectorOpenForEnqueuing(channelsToRegister, context); + wakeup(); + } + + /** + * Queues a write operation directly in a channel's buffer. Channel buffers are only safe to be accessed + * by the selector thread. As a result, this method should only be called by the selector thread. + * + * @param writeOperation to be queued in a channel's buffer + */ + public void queueWriteInChannelBuffer(WriteOperation writeOperation) { + assertOnSelectorThread(); + SocketChannelContext context = writeOperation.getChannel(); + try { + SelectionKeyUtils.setWriteInterested(context.getSelectionKey()); + context.queueWriteOperation(writeOperation); + } catch (Exception e) { + executeFailedListener(writeOperation.getListener(), e); + } + } + + /** + * Executes a success listener with consistent exception handling. This can only be called from current + * selector thread. + * + * @param listener to be executed + * @param value to provide to listener + */ + public void executeListener(BiConsumer listener, V value) { + assertOnSelectorThread(); + try { + listener.accept(value, null); + } catch (Exception e) { + eventHandler.listenerException(e); + } + } + + /** + * Executes a failed listener with consistent exception handling. This can only be called from current + * selector thread. + * + * @param listener to be executed + * @param exception to provide to listener + */ + public void executeFailedListener(BiConsumer listener, Exception exception) { + assertOnSelectorThread(); + try { + listener.accept(null, exception); + } catch (Exception e) { + eventHandler.listenerException(e); + } + } + + private void wakeup() { + // TODO: Do we need the wakeup optimizations that some other libraries use? + selector.wakeup(); + } + + private void handleWrite(SocketChannelContext context) { + try { + eventHandler.handleWrite(context); + } catch (Exception e) { + eventHandler.writeException(context, e); + } + } + + private void handleRead(SocketChannelContext context) { + try { + eventHandler.handleRead(context); + } catch (Exception e) { + eventHandler.readException(context, e); + } + } + + private void attemptConnect(SocketChannelContext context, boolean connectEvent) { + try { + eventHandler.handleConnect(context); + if (connectEvent && context.isConnectComplete() == false) { + eventHandler.connectException(context, new IOException("Received OP_CONNECT but connect failed")); + } + } catch (Exception e) { + eventHandler.connectException(context, e); + } + } + + private void setUpNewChannels() { + ChannelContext newChannel; + while ((newChannel = this.channelsToRegister.poll()) != null) { + assert newChannel.getSelector() == this : "The channel must be registered with the selector with which it was created"; + try { + if (newChannel.isOpen()) { + eventHandler.handleRegistration(newChannel); + if (newChannel instanceof SocketChannelContext) { + attemptConnect((SocketChannelContext) newChannel, false); + } + } else { + eventHandler.registrationException(newChannel, new ClosedChannelException()); + } + } catch (Exception e) { + eventHandler.registrationException(newChannel, e); + } + } + } + + private void closePendingChannels() { + ChannelContext channelContext; + while ((channelContext = channelsToClose.poll()) != null) { + eventHandler.handleClose(channelContext); + } + } + + private void handleQueuedWrites() { + WriteOperation writeOperation; + while ((writeOperation = queuedWrites.poll()) != null) { + if (writeOperation.getChannel().isOpen()) { + queueWriteInChannelBuffer(writeOperation); + } else { + executeFailedListener(writeOperation.getListener(), new ClosedChannelException()); + } + } + } + + /** + * This is a convenience method to be called after some object (normally channels) are enqueued with this + * selector. This method will check if the selector is still open. If it is open, normal operation can + * proceed. + * + * If the selector is closed, then we attempt to remove the object from the queue. If the removal + * succeeds then we throw an {@link IllegalStateException} indicating that normal operation failed. If + * the object cannot be removed from the queue, then the object has already been handled by the selector + * and operation can proceed normally. + * + * If this method is called from the selector thread, we will not allow the queuing to occur as the + * selector thread can manipulate its queues internally even if it is no longer open. + * + * @param queue the queue to which the object was added + * @param objectAdded the objected added + * @param the object type + */ + private void ensureSelectorOpenForEnqueuing(ConcurrentLinkedQueue queue, O objectAdded) { + if (isOpen() == false && isOnCurrentThread() == false) { + if (queue.remove(objectAdded)) { + throw new IllegalStateException("selector is already closed"); + } + } + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java rename to libs/nio/src/main/java/org/elasticsearch/nio/NioServerSocketChannel.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java b/libs/nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java rename to libs/nio/src/main/java/org/elasticsearch/nio/NioSocketChannel.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java b/libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java rename to libs/nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java diff --git a/libs/nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java b/libs/nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java new file mode 100644 index 00000000000..c35dca1c959 --- /dev/null +++ b/libs/nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +final class RoundRobinSupplier implements Supplier { + + private final AtomicBoolean selectorsSet = new AtomicBoolean(false); + private volatile S[] selectors; + private AtomicInteger counter = new AtomicInteger(0); + + RoundRobinSupplier() { + this.selectors = null; + } + + RoundRobinSupplier(S[] selectors) { + this.selectors = selectors; + this.selectorsSet.set(true); + } + + @Override + public S get() { + S[] selectors = this.selectors; + return selectors[counter.getAndIncrement() % selectors.length]; + } + + void setSelectors(S[] selectors) { + if (selectorsSet.compareAndSet(false, true)) { + this.selectors = selectors; + } else { + throw new AssertionError("Selectors already set. Should only be set once."); + } + } + + int count() { + return selectors.length; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java b/libs/nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java rename to libs/nio/src/main/java/org/elasticsearch/nio/SelectionKeyUtils.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java similarity index 89% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java rename to libs/nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java index 4b47ce063f9..9e1af3e9973 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/ServerChannelContext.java @@ -28,12 +28,12 @@ import java.util.function.Supplier; public class ServerChannelContext extends ChannelContext { private final NioServerSocketChannel channel; - private final AcceptingSelector selector; + private final NioSelector selector; private final Consumer acceptor; private final AtomicBoolean isClosing = new AtomicBoolean(false); private final ChannelFactory channelFactory; - public ServerChannelContext(NioServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector, + public ServerChannelContext(NioServerSocketChannel channel, ChannelFactory channelFactory, NioSelector selector, Consumer acceptor, Consumer exceptionHandler) { super(channel.getRawChannel(), exceptionHandler); this.channel = channel; @@ -42,7 +42,7 @@ public class ServerChannelContext extends ChannelContext { this.acceptor = acceptor; } - public void acceptChannels(Supplier selectorSupplier) throws IOException { + public void acceptChannels(Supplier selectorSupplier) throws IOException { NioSocketChannel acceptedChannel; while ((acceptedChannel = channelFactory.acceptNioChannel(this, selectorSupplier)) != null) { acceptor.accept(acceptedChannel); @@ -57,7 +57,7 @@ public class ServerChannelContext extends ChannelContext { } @Override - public AcceptingSelector getSelector() { + public NioSelector getSelector() { return selector; } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java similarity index 97% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java rename to libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index 6a769b4d173..53be0e7f89f 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -47,14 +47,14 @@ public abstract class SocketChannelContext extends ChannelContext protected final InboundChannelBuffer channelBuffer; protected final AtomicBoolean isClosing = new AtomicBoolean(false); private final ReadWriteHandler readWriteHandler; - private final SocketSelector selector; + private final NioSelector selector; private final CompletableContext connectContext = new CompletableContext<>(); private final LinkedList pendingFlushes = new LinkedList<>(); private boolean ioException; private boolean peerClosed; private Exception connectException; - protected SocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + protected SocketChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { super(channel.getRawChannel(), exceptionHandler); this.selector = selector; @@ -64,7 +64,7 @@ public abstract class SocketChannelContext extends ChannelContext } @Override - public SocketSelector getSelector() { + public NioSelector getSelector() { return selector; } @@ -129,7 +129,7 @@ public abstract class SocketChannelContext extends ChannelContext WriteOperation writeOperation = readWriteHandler.createWriteOperation(this, message, listener); - SocketSelector selector = getSelector(); + NioSelector selector = getSelector(); if (selector.isOnCurrentThread() == false) { selector.queueWrite(writeOperation); return; diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/nio/src/main/java/org/elasticsearch/nio/WriteOperation.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java rename to libs/nio/src/main/java/org/elasticsearch/nio/WriteOperation.java diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/utils/ExceptionsHelper.java b/libs/nio/src/main/java/org/elasticsearch/nio/utils/ExceptionsHelper.java similarity index 100% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/utils/ExceptionsHelper.java rename to libs/nio/src/main/java/org/elasticsearch/nio/utils/ExceptionsHelper.java diff --git a/libs/elasticsearch-nio/src/test/eclipse-build.gradle b/libs/nio/src/test/eclipse-build.gradle similarity index 55% rename from libs/elasticsearch-nio/src/test/eclipse-build.gradle rename to libs/nio/src/test/eclipse-build.gradle index 6b5d202dddd..e30e76b0da5 100644 --- a/libs/elasticsearch-nio/src/test/eclipse-build.gradle +++ b/libs/nio/src/test/eclipse-build.gradle @@ -1,7 +1,7 @@ -// this is just shell gradle file for eclipse to have separate projects for elasticsearch-nio src and tests +// this is just shell gradle file for eclipse to have separate projects for nio src and tests apply from: '../../build.gradle' dependencies { - testCompile project(':libs:elasticsearch-nio') + testCompile project(':libs:nio') } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java similarity index 99% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java index e5c236e48a8..2ab20522db6 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -44,7 +44,7 @@ public class BytesChannelContextTests extends ESTestCase { private SocketChannel rawChannel; private BytesChannelContext context; private InboundChannelBuffer channelBuffer; - private SocketSelector selector; + private NioSelector selector; private BiConsumer listener; private int messageLength; @@ -54,7 +54,7 @@ public class BytesChannelContextTests extends ESTestCase { readConsumer = mock(CheckedFunction.class); messageLength = randomInt(96) + 20; - selector = mock(SocketSelector.class); + selector = mock(NioSelector.class); listener = mock(BiConsumer.class); channel = mock(NioSocketChannel.class); rawChannel = mock(SocketChannel.class); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java similarity index 99% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java index 586dae83d08..13372055668 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/ChannelContextTests.java @@ -115,7 +115,7 @@ public class ChannelContextTests extends ESTestCase { } @Override - public ESSelector getSelector() { + public NioSelector getSelector() { throw new UnsupportedOperationException("not implemented"); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java similarity index 91% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java index 858f547f8a6..8ff0cfcd0c8 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/ChannelFactoryTests.java @@ -43,18 +43,18 @@ public class ChannelFactoryTests extends ESTestCase { private ChannelFactory.RawChannelFactory rawChannelFactory; private SocketChannel rawChannel; private ServerSocketChannel rawServerChannel; - private SocketSelector socketSelector; - private Supplier socketSelectorSupplier; - private Supplier acceptingSelectorSupplier; - private AcceptingSelector acceptingSelector; + private NioSelector socketSelector; + private Supplier socketSelectorSupplier; + private Supplier acceptingSelectorSupplier; + private NioSelector acceptingSelector; @Before @SuppressWarnings("unchecked") public void setupFactory() throws IOException { rawChannelFactory = mock(ChannelFactory.RawChannelFactory.class); channelFactory = new TestChannelFactory(rawChannelFactory); - socketSelector = mock(SocketSelector.class); - acceptingSelector = mock(AcceptingSelector.class); + socketSelector = mock(NioSelector.class); + acceptingSelector = mock(NioSelector.class); socketSelectorSupplier = mock(Supplier.class); acceptingSelectorSupplier = mock(Supplier.class); rawChannel = SocketChannel.open(); @@ -139,14 +139,14 @@ public class ChannelFactoryTests extends ESTestCase { @SuppressWarnings("unchecked") @Override - public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + public NioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { NioSocketChannel nioSocketChannel = new NioSocketChannel(channel); nioSocketChannel.setContext(mock(SocketChannelContext.class)); return nioSocketChannel; } @Override - public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { return new NioServerSocketChannel(channel); } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java similarity index 66% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java index c85d9c0c5a8..a9e1836199e 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/EventHandlerTests.java @@ -25,25 +25,29 @@ import org.junit.Before; import java.io.IOException; import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; +import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.util.ArrayList; import java.util.Collections; import java.util.function.Consumer; +import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class SocketEventHandlerTests extends ESTestCase { +public class EventHandlerTests extends ESTestCase { private Consumer channelExceptionHandler; private Consumer genericExceptionHandler; private ReadWriteHandler readWriteHandler; - private SocketEventHandler handler; - private NioSocketChannel channel; - private SocketChannel rawChannel; - private DoNotRegisterContext context; + private EventHandler handler; + private DoNotRegisterSocketContext context; + private DoNotRegisterServerContext serverContext; + private ChannelFactory channelFactory; + private RoundRobinSupplier selectorSupplier; @Before @SuppressWarnings("unchecked") @@ -51,16 +55,24 @@ public class SocketEventHandlerTests extends ESTestCase { channelExceptionHandler = mock(Consumer.class); genericExceptionHandler = mock(Consumer.class); readWriteHandler = mock(ReadWriteHandler.class); - SocketSelector selector = mock(SocketSelector.class); - handler = new SocketEventHandler(genericExceptionHandler); - rawChannel = mock(SocketChannel.class); - channel = new NioSocketChannel(rawChannel); - when(rawChannel.finishConnect()).thenReturn(true); + channelFactory = mock(ChannelFactory.class); + NioSelector selector = mock(NioSelector.class); + ArrayList selectors = new ArrayList<>(); + selectors.add(selector); + selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new NioSelector[0])); + handler = new EventHandler(genericExceptionHandler, selectorSupplier); - context = new DoNotRegisterContext(channel, selector, channelExceptionHandler, new TestSelectionKey(0), readWriteHandler); + SocketChannel rawChannel = mock(SocketChannel.class); + when(rawChannel.finishConnect()).thenReturn(true); + NioSocketChannel channel = new NioSocketChannel(rawChannel); + context = new DoNotRegisterSocketContext(channel, selector, channelExceptionHandler, readWriteHandler); channel.setContext(context); handler.handleRegistration(context); + NioServerSocketChannel serverChannel = new NioServerSocketChannel(mock(ServerSocketChannel.class)); + serverContext = new DoNotRegisterServerContext(serverChannel, mock(NioSelector.class), mock(Consumer.class)); + serverChannel.setContext(serverContext); + when(selector.isOnCurrentThread()).thenReturn(true); } @@ -73,7 +85,7 @@ public class SocketEventHandlerTests extends ESTestCase { verify(channelContext).register(); } - public void testRegisterAddsOP_CONNECTAndOP_READInterest() throws IOException { + public void testRegisterNonServerAddsOP_CONNECTAndOP_READInterest() throws IOException { SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(new TestSelectionKey(0)); handler.handleRegistration(context); @@ -81,16 +93,55 @@ public class SocketEventHandlerTests extends ESTestCase { } public void testRegisterAddsAttachment() throws IOException { - SocketChannelContext context = mock(SocketChannelContext.class); + ChannelContext context = randomBoolean() ? mock(SocketChannelContext.class) : mock(ServerChannelContext.class); when(context.getSelectionKey()).thenReturn(new TestSelectionKey(0)); handler.handleRegistration(context); assertEquals(context, context.getSelectionKey().attachment()); } + public void testHandleServerRegisterSetsOP_ACCEPTInterest() throws IOException { + assertNull(serverContext.getSelectionKey()); + + handler.handleRegistration(serverContext); + + assertEquals(SelectionKey.OP_ACCEPT, serverContext.getSelectionKey().interestOps()); + } + + public void testHandleAcceptCallsChannelFactory() throws IOException { + NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class)); + NioSocketChannel nullChannel = null; + when(channelFactory.acceptNioChannel(same(serverContext), same(selectorSupplier))).thenReturn(childChannel, nullChannel); + + handler.acceptChannel(serverContext); + + verify(channelFactory, times(2)).acceptNioChannel(same(serverContext), same(selectorSupplier)); + } + + @SuppressWarnings("unchecked") + public void testHandleAcceptCallsServerAcceptCallback() throws IOException { + NioSocketChannel childChannel = new NioSocketChannel(mock(SocketChannel.class)); + SocketChannelContext childContext = mock(SocketChannelContext.class); + childChannel.setContext(childContext); + ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); + when(channelFactory.acceptNioChannel(same(serverContext), same(selectorSupplier))).thenReturn(childChannel); + + handler.acceptChannel(serverChannelContext); + + verify(serverChannelContext).acceptChannels(selectorSupplier); + } + + public void testAcceptExceptionCallsExceptionHandler() throws IOException { + ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); + IOException exception = new IOException(); + handler.acceptException(serverChannelContext, exception); + + verify(serverChannelContext).handleException(exception); + } + public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { FlushReadyWrite flushReadyWrite = mock(FlushReadyWrite.class); when(readWriteHandler.writeToBytes(flushReadyWrite)).thenReturn(Collections.singletonList(flushReadyWrite)); - channel.getContext().queueWriteOperation(flushReadyWrite); + context.queueWriteOperation(flushReadyWrite); handler.handleRegistration(context); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, context.getSelectionKey().interestOps()); } @@ -120,11 +171,7 @@ public class SocketEventHandlerTests extends ESTestCase { } public void testHandleReadDelegatesToContext() throws IOException { - NioSocketChannel channel = new NioSocketChannel(rawChannel); SocketChannelContext context = mock(SocketChannelContext.class); - channel.setContext(context); - - when(context.read()).thenReturn(1); handler.handleRead(context); verify(context).read(); } @@ -200,19 +247,31 @@ public class SocketEventHandlerTests extends ESTestCase { verify(genericExceptionHandler).accept(listenerException); } - private class DoNotRegisterContext extends BytesChannelContext { + private class DoNotRegisterSocketContext extends BytesChannelContext { - private final TestSelectionKey selectionKey; - DoNotRegisterContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, - TestSelectionKey selectionKey, ReadWriteHandler handler) { + DoNotRegisterSocketContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, + ReadWriteHandler handler) { super(channel, selector, exceptionHandler, handler, InboundChannelBuffer.allocatingInstance()); - this.selectionKey = selectionKey; } @Override public void register() { - setSelectionKey(selectionKey); + setSelectionKey(new TestSelectionKey(0)); + } + } + + private class DoNotRegisterServerContext extends ServerChannelContext { + + + @SuppressWarnings("unchecked") + DoNotRegisterServerContext(NioServerSocketChannel channel, NioSelector selector, Consumer acceptor) { + super(channel, channelFactory, selector, acceptor, mock(Consumer.class)); + } + + @Override + public void register() { + setSelectionKey(new TestSelectionKey(0)); } } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java similarity index 100% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java similarity index 100% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/InboundChannelBufferTests.java diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java similarity index 88% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java index 13ce2c13654..027f1255a59 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioGroupTests.java @@ -38,9 +38,8 @@ public class NioGroupTests extends ESTestCase { @SuppressWarnings("unchecked") public void setUp() throws Exception { super.setUp(); - nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, - (s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), 1, - () -> new SocketEventHandler(mock(Consumer.class))); + nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, daemonThreadFactory(Settings.EMPTY, "selector"), 1, + (s) -> new EventHandler(mock(Consumer.class), s)); } @Override @@ -76,8 +75,8 @@ public class NioGroupTests extends ESTestCase { public void testExceptionAtStartIsHandled() throws IOException { RuntimeException ex = new RuntimeException(); CheckedRunnable ctor = () -> new NioGroup(r -> {throw ex;}, 1, - (s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), - 1, () -> new SocketEventHandler(mock(Consumer.class))); + daemonThreadFactory(Settings.EMPTY, "selector"), + 1, (s) -> new EventHandler(mock(Consumer.class), s)); RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run); assertSame(ex, runtimeException); // ctor starts threads. So we are testing that a failure to construct will stop threads. Our thread diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java similarity index 61% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java index f8775d03b42..dd3fea8bf50 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/NioSelectorTests.java @@ -43,13 +43,15 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class SocketSelectorTests extends ESTestCase { +public class NioSelectorTests extends ESTestCase { - private SocketSelector socketSelector; - private SocketEventHandler eventHandler; + private NioSelector selector; + private EventHandler eventHandler; private NioSocketChannel channel; + private NioServerSocketChannel serverChannel; private TestSelectionKey selectionKey; private SocketChannelContext channelContext; + private ServerChannelContext serverChannelContext; private BiConsumer listener; private ByteBuffer[] buffers = {ByteBuffer.allocate(1)}; private Selector rawSelector; @@ -59,75 +61,172 @@ public class SocketSelectorTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); rawSelector = mock(Selector.class); - eventHandler = mock(SocketEventHandler.class); + eventHandler = mock(EventHandler.class); channel = mock(NioSocketChannel.class); channelContext = mock(SocketChannelContext.class); + serverChannel = mock(NioServerSocketChannel.class); + serverChannelContext = mock(ServerChannelContext.class); listener = mock(BiConsumer.class); selectionKey = new TestSelectionKey(0); - selectionKey.attach(channelContext); - this.socketSelector = new SocketSelector(eventHandler, rawSelector); - this.socketSelector.setThread(); + this.selector = new NioSelector(eventHandler, rawSelector); + this.selector.setThread(); when(channel.getContext()).thenReturn(channelContext); when(channelContext.isOpen()).thenReturn(true); - when(channelContext.getSelector()).thenReturn(socketSelector); + when(channelContext.getSelector()).thenReturn(selector); when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(channelContext.isConnectComplete()).thenReturn(true); + + when(serverChannel.getContext()).thenReturn(serverChannelContext); + when(serverChannelContext.isOpen()).thenReturn(true); + when(serverChannelContext.getSelector()).thenReturn(selector); + when(serverChannelContext.getSelectionKey()).thenReturn(selectionKey); } - public void testRegisterChannel() throws Exception { - socketSelector.scheduleForRegistration(channel); + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testQueueChannelForClosed() throws IOException { + NioChannel channel = mock(NioChannel.class); + ChannelContext context = mock(ChannelContext.class); + when(channel.getContext()).thenReturn(context); + when(context.getSelector()).thenReturn(selector); - socketSelector.preSelect(); + selector.queueChannelClose(channel); - verify(eventHandler).handleRegistration(channelContext); + selector.singleLoop(); + + verify(eventHandler).handleClose(context); } - public void testClosedChannelWillNotBeRegistered() throws Exception { + public void testSelectorClosedExceptionIsNotCaughtWhileRunning() throws IOException { + boolean closedSelectorExceptionCaught = false; + when(rawSelector.select(anyInt())).thenThrow(new ClosedSelectorException()); + try { + this.selector.singleLoop(); + } catch (ClosedSelectorException e) { + closedSelectorExceptionCaught = true; + } + + assertTrue(closedSelectorExceptionCaught); + } + + public void testIOExceptionWhileSelect() throws IOException { + IOException ioException = new IOException(); + + when(rawSelector.select(anyInt())).thenThrow(ioException); + + this.selector.singleLoop(); + + verify(eventHandler).selectorException(ioException); + } + + public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException { + when(rawSelector.isOpen()).thenReturn(true); + selector.close(); + verify(rawSelector).close(); + } + + public void testRegisteredChannel() throws IOException { + selector.scheduleForRegistration(serverChannel); + + selector.preSelect(); + + verify(eventHandler).handleRegistration(serverChannelContext); + } + + public void testClosedServerChannelWillNotBeRegistered() { + when(serverChannelContext.isOpen()).thenReturn(false); + selector.scheduleForRegistration(serverChannel); + + selector.preSelect(); + + verify(eventHandler).registrationException(same(serverChannelContext), any(ClosedChannelException.class)); + } + + public void testRegisterServerChannelFailsDueToException() throws Exception { + selector.scheduleForRegistration(serverChannel); + + ClosedChannelException closedChannelException = new ClosedChannelException(); + doThrow(closedChannelException).when(eventHandler).handleRegistration(serverChannelContext); + + selector.preSelect(); + + verify(eventHandler).registrationException(serverChannelContext, closedChannelException); + } + + public void testClosedSocketChannelWillNotBeRegistered() throws Exception { when(channelContext.isOpen()).thenReturn(false); - socketSelector.scheduleForRegistration(channel); + selector.scheduleForRegistration(channel); - socketSelector.preSelect(); + selector.preSelect(); verify(eventHandler).registrationException(same(channelContext), any(ClosedChannelException.class)); verify(eventHandler, times(0)).handleConnect(channelContext); } - public void testRegisterChannelFailsDueToException() throws Exception { - socketSelector.scheduleForRegistration(channel); + public void testRegisterSocketChannelFailsDueToException() throws Exception { + selector.scheduleForRegistration(channel); ClosedChannelException closedChannelException = new ClosedChannelException(); doThrow(closedChannelException).when(eventHandler).handleRegistration(channelContext); - socketSelector.preSelect(); + selector.preSelect(); verify(eventHandler).registrationException(channelContext, closedChannelException); verify(eventHandler, times(0)).handleConnect(channelContext); } - public void testSuccessfullyRegisterChannelWillAttemptConnect() throws Exception { - socketSelector.scheduleForRegistration(channel); + public void testAcceptEvent() throws IOException { + selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); - socketSelector.preSelect(); + selectionKey.attach(serverChannelContext); + selector.processKey(selectionKey); + + verify(eventHandler).acceptChannel(serverChannelContext); + } + + public void testAcceptException() throws IOException { + selectionKey.setReadyOps(SelectionKey.OP_ACCEPT); + IOException ioException = new IOException(); + + doThrow(ioException).when(eventHandler).acceptChannel(serverChannelContext); + + selectionKey.attach(serverChannelContext); + selector.processKey(selectionKey); + + verify(eventHandler).acceptException(serverChannelContext, ioException); + } + + public void testRegisterChannel() throws Exception { + selector.scheduleForRegistration(channel); + + selector.preSelect(); + + verify(eventHandler).handleRegistration(channelContext); + } + + public void testSuccessfullyRegisterChannelWillAttemptConnect() throws Exception { + selector.scheduleForRegistration(channel); + + selector.preSelect(); verify(eventHandler).handleConnect(channelContext); } public void testQueueWriteWhenNotRunning() throws Exception { - socketSelector.close(); + selector.close(); - socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); + selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } public void testQueueWriteChannelIsClosed() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - socketSelector.queueWrite(writeOperation); + selector.queueWrite(writeOperation); when(channelContext.isOpen()).thenReturn(false); - socketSelector.preSelect(); + selector.preSelect(); verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); @@ -138,11 +237,11 @@ public class SocketSelectorTests extends ESTestCase { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); - socketSelector.queueWrite(writeOperation); + selector.queueWrite(writeOperation); when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); - socketSelector.preSelect(); + selector.preSelect(); verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); @@ -150,11 +249,11 @@ public class SocketSelectorTests extends ESTestCase { public void testQueueWriteSuccessful() throws Exception { WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); - socketSelector.queueWrite(writeOperation); + selector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - socketSelector.preSelect(); + selector.preSelect(); verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); @@ -165,7 +264,7 @@ public class SocketSelectorTests extends ESTestCase { assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); - socketSelector.queueWriteInChannelBuffer(writeOperation); + selector.queueWriteInChannelBuffer(writeOperation); verify(channelContext).queueWriteOperation(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) != 0); @@ -179,7 +278,7 @@ public class SocketSelectorTests extends ESTestCase { when(channelContext.getSelectionKey()).thenReturn(selectionKey); when(selectionKey.interestOps(anyInt())).thenThrow(cancelledKeyException); - socketSelector.queueWriteInChannelBuffer(writeOperation); + selector.queueWriteInChannelBuffer(writeOperation); verify(channelContext, times(0)).queueWriteOperation(writeOperation); verify(listener).accept(null, cancelledKeyException); @@ -188,7 +287,8 @@ public class SocketSelectorTests extends ESTestCase { public void testConnectEvent() throws Exception { selectionKey.setReadyOps(SelectionKey.OP_CONNECT); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).handleConnect(channelContext); } @@ -199,7 +299,8 @@ public class SocketSelectorTests extends ESTestCase { selectionKey.setReadyOps(SelectionKey.OP_CONNECT); doThrow(ioException).when(eventHandler).handleConnect(channelContext); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).connectException(channelContext, ioException); } @@ -212,7 +313,8 @@ public class SocketSelectorTests extends ESTestCase { doThrow(ioException).when(eventHandler).handleWrite(channelContext); when(channelContext.isConnectComplete()).thenReturn(false); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler, times(0)).handleWrite(channelContext); verify(eventHandler, times(0)).handleRead(channelContext); @@ -221,7 +323,8 @@ public class SocketSelectorTests extends ESTestCase { public void testSuccessfulWriteEvent() throws Exception { selectionKey.setReadyOps(SelectionKey.OP_WRITE); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).handleWrite(channelContext); } @@ -229,11 +332,13 @@ public class SocketSelectorTests extends ESTestCase { public void testWriteEventWithException() throws Exception { IOException ioException = new IOException(); + selectionKey.attach(channelContext); selectionKey.setReadyOps(SelectionKey.OP_WRITE); doThrow(ioException).when(eventHandler).handleWrite(channelContext); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).writeException(channelContext, ioException); } @@ -241,7 +346,8 @@ public class SocketSelectorTests extends ESTestCase { public void testSuccessfulReadEvent() throws Exception { selectionKey.setReadyOps(SelectionKey.OP_READ); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).handleRead(channelContext); } @@ -253,7 +359,8 @@ public class SocketSelectorTests extends ESTestCase { doThrow(ioException).when(eventHandler).handleRead(channelContext); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).readException(channelContext, ioException); } @@ -261,7 +368,8 @@ public class SocketSelectorTests extends ESTestCase { public void testWillCallPostHandleAfterChannelHandling() throws Exception { selectionKey.setReadyOps(SelectionKey.OP_WRITE | SelectionKey.OP_READ); - socketSelector.processKey(selectionKey); + selectionKey.attach(channelContext); + selector.processKey(selectionKey); verify(eventHandler).handleWrite(channelContext); verify(eventHandler).handleRead(channelContext); @@ -273,18 +381,18 @@ public class SocketSelectorTests extends ESTestCase { SocketChannelContext unregisteredContext = mock(SocketChannelContext.class); when(unregisteredChannel.getContext()).thenReturn(unregisteredContext); - socketSelector.scheduleForRegistration(channel); + selector.scheduleForRegistration(channel); - socketSelector.preSelect(); + selector.preSelect(); - socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); - socketSelector.scheduleForRegistration(unregisteredChannel); + selector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); + selector.scheduleForRegistration(unregisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); testSelectionKey.attach(channelContext); when(rawSelector.keys()).thenReturn(new HashSet<>(Collections.singletonList(testSelectionKey))); - socketSelector.cleanupAndCloseChannels(); + selector.cleanupAndCloseChannels(); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); verify(eventHandler).handleClose(channelContext); @@ -295,7 +403,7 @@ public class SocketSelectorTests extends ESTestCase { RuntimeException exception = new RuntimeException(); doThrow(exception).when(listener).accept(null, null); - socketSelector.executeListener(listener, null); + selector.executeListener(listener, null); verify(eventHandler).listenerException(exception); } @@ -305,7 +413,7 @@ public class SocketSelectorTests extends ESTestCase { RuntimeException exception = new RuntimeException(); doThrow(exception).when(listener).accept(null, ioException); - socketSelector.executeFailedListener(listener, ioException); + selector.executeFailedListener(listener, ioException); verify(eventHandler).listenerException(exception); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java similarity index 98% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java rename to libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index f27052ac5d5..fdb4a77b922 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -51,7 +51,7 @@ public class SocketChannelContextTests extends ESTestCase { private Consumer exceptionHandler; private NioSocketChannel channel; private BiConsumer listener; - private SocketSelector selector; + private NioSelector selector; private ReadWriteHandler readWriteHandler; @SuppressWarnings("unchecked") @@ -64,7 +64,7 @@ public class SocketChannelContextTests extends ESTestCase { listener = mock(BiConsumer.class); when(channel.getRawChannel()).thenReturn(rawChannel); exceptionHandler = mock(Consumer.class); - selector = mock(SocketSelector.class); + selector = mock(NioSelector.class); readWriteHandler = mock(ReadWriteHandler.class); InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); @@ -275,7 +275,7 @@ public class SocketChannelContextTests extends ESTestCase { private static class TestSocketChannelContext extends SocketChannelContext { - private TestSocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + private TestSocketChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { super(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/TestSelectionKey.java b/libs/nio/src/test/java/org/elasticsearch/nio/TestSelectionKey.java similarity index 100% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/TestSelectionKey.java rename to libs/nio/src/test/java/org/elasticsearch/nio/TestSelectionKey.java diff --git a/libs/elasticsearch-nio/src/test/resources/testks.jks b/libs/nio/src/test/resources/testks.jks similarity index 100% rename from libs/elasticsearch-nio/src/test/resources/testks.jks rename to libs/nio/src/test/resources/testks.jks diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index 69c8afb3e2f..433bef902c1 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -19,6 +19,7 @@ package org.elasticsearch.analysis.common; +import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopFilter; @@ -79,7 +80,9 @@ import org.apache.lucene.analysis.util.ElisionFilter; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -87,6 +90,7 @@ import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; +import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.tartarus.snowball.ext.DutchStemmer; @@ -103,6 +107,15 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(CommonAnalysisPlugin.class)); + @Override + public Map>> getAnalyzers() { + Map>> analyzers = new TreeMap<>(); + analyzers.put("fingerprint", FingerprintAnalyzerProvider::new); + analyzers.put("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); + analyzers.put("pattern", PatternAnalyzerProvider::new); + return analyzers; + } + @Override public Map> getTokenFilters() { Map> filters = new TreeMap<>(); @@ -197,6 +210,16 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin { return tokenizers; } + @Override + public List getPreBuiltAnalyzerProviderFactories() { + List analyzers = new ArrayList<>(); + analyzers.add(new PreBuiltAnalyzerProviderFactory("standard_html_strip", CachingStrategy.LUCENE, + version -> new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET))); + analyzers.add(new PreBuiltAnalyzerProviderFactory("pattern", CachingStrategy.ELASTICSEARCH, version -> + new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET))); + return analyzers; + } + @Override public List getPreConfiguredCharFilters() { List filters = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java similarity index 94% rename from server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java index 0a550f19aa7..d37239304cd 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzer.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; @@ -35,7 +35,7 @@ public final class FingerprintAnalyzer extends Analyzer { private final int maxOutputSize; private final CharArraySet stopWords; - public FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) { + FingerprintAnalyzer(CharArraySet stopWords, char separator, int maxOutputSize) { this.separator = separator; this.maxOutputSize = maxOutputSize; this.stopWords = stopWords; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java similarity index 90% rename from server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java rename to modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java index 6a777e7c931..f54b04bf309 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/FingerprintAnalyzerProvider.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/FingerprintAnalyzerProvider.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CharArraySet; @@ -25,6 +25,8 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.Analysis; /** @@ -42,7 +44,7 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider { private final StandardHtmlStripAnalyzer analyzer; - public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { + StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, name, settings); final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET; CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java similarity index 98% rename from server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java index c5e854879e9..0933f3bf13a 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/FingerprintAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/FingerprintAnalyzerTests.java @@ -1,4 +1,4 @@ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; /* * Licensed to Elasticsearch under one or more contributor diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java rename to modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java index d80cbf66c34..d2d226d6250 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PatternAnalyzerTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PatternAnalyzerTests.java @@ -1,4 +1,4 @@ -package org.elasticsearch.index.analysis; +package org.elasticsearch.analysis.common; /* * Licensed to Elasticsearch under one or more contributor diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml index 6ff3b8c8027..d38f63f5429 100644 --- a/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/analysis-common/20_analyzers.yml @@ -37,3 +37,35 @@ analyzer: bengali - length: { tokens: 1 } - match: { tokens.0.token: বার } + +--- +"fingerprint": + - do: + indices.analyze: + body: + text: A1 B2 A1 D4 C3 + analyzer: fingerprint + - length: { tokens: 1 } + - match: { tokens.0.token: a1 b2 c3 d4 } + +--- +"standard_html_strip": + - do: + indices.analyze: + body: + text: + analyzer: standard_html_strip + - length: { tokens: 2 } + - match: { tokens.0.token: bold } + - match: { tokens.1.token: italic } + +--- +"pattern": + - do: + indices.analyze: + body: + text: foo bar + analyzer: pattern + - length: { tokens: 2 } + - match: { tokens.0.token: foo } + - match: { tokens.1.token: bar } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java index 30e18ae6d68..70ecf06e58a 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureFieldMapper.java @@ -165,8 +165,7 @@ public class FeatureFieldMapper extends FieldMapper { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { - failIfNoDocValues(); - return new DocValuesIndexFieldData.Builder(); + throw new UnsupportedOperationException("[feature] fields do not support sorting, scripting or aggregating"); } @Override @@ -229,10 +228,6 @@ public class FeatureFieldMapper extends FieldMapper { protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || fieldType().nullValue() != null) { - builder.field("null_value", fieldType().nullValue()); - } - if (includeDefaults || fieldType().positiveScoreImpact() == false) { builder.field("positive_score_impact", fieldType().positiveScoreImpact()); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java new file mode 100644 index 00000000000..7b4b9e62fa3 --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapper.java @@ -0,0 +1,182 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse + * vector of features. + */ +public class FeatureVectorFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "feature_vector"; + + public static class Defaults { + public static final MappedFieldType FIELD_TYPE = new FeatureVectorFieldType(); + + static { + FIELD_TYPE.setTokenized(false); + FIELD_TYPE.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE.setHasDocValues(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.freeze(); + } + } + + public static class Builder extends FieldMapper.Builder { + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + builder = this; + } + + @Override + public FeatureVectorFieldType fieldType() { + return (FeatureVectorFieldType) super.fieldType(); + } + + @Override + public FeatureVectorFieldMapper build(BuilderContext context) { + setupFieldType(context); + return new FeatureVectorFieldMapper( + name, fieldType, defaultFieldType, + context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); + } + } + + public static class TypeParser implements Mapper.TypeParser { + @Override + public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { + FeatureVectorFieldMapper.Builder builder = new FeatureVectorFieldMapper.Builder(name); + return builder; + } + } + + public static final class FeatureVectorFieldType extends MappedFieldType { + + public FeatureVectorFieldType() { + setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); + setSearchAnalyzer(Lucene.KEYWORD_ANALYZER); + } + + protected FeatureVectorFieldType(FeatureVectorFieldType ref) { + super(ref); + } + + public FeatureVectorFieldType clone() { + return new FeatureVectorFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException("[feature_vector] fields do not support [exists] queries"); + } + + @Override + public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { + throw new UnsupportedOperationException("[feature_vector] fields do not support sorting, scripting or aggregating"); + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + throw new UnsupportedOperationException("Queries on [feature_vector] fields are not supported"); + } + } + + private FeatureVectorFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, + Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); + assert fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) <= 0; + } + + @Override + protected FeatureVectorFieldMapper clone() { + return (FeatureVectorFieldMapper) super.clone(); + } + + @Override + public FeatureVectorFieldType fieldType() { + return (FeatureVectorFieldType) super.fieldType(); + } + + @Override + public FieldMapper parse(ParseContext context) throws IOException { + if (context.externalValueSet()) { + throw new IllegalArgumentException("[feature_vector] fields can't be used in multi-fields"); + } + + if (context.parser().currentToken() != Token.START_OBJECT) { + throw new IllegalArgumentException("[feature_vector] fields must be json objects, expected a START_OBJECT but got: " + + context.parser().currentToken()); + } + + String feature = null; + for (Token token = context.parser().nextToken(); token != Token.END_OBJECT; token = context.parser().nextToken()) { + if (token == Token.FIELD_NAME) { + feature = context.parser().currentName(); + } else if (token == Token.VALUE_NULL) { + // ignore feature, this is consistent with numeric fields + } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { + final String key = name() + "." + feature; + float value = context.parser().floatValue(true); + if (context.doc().getByKey(key) != null) { + throw new IllegalArgumentException("[feature_vector] fields do not support indexing multiple values for the same " + + "feature [" + key + "] in the same document"); + } + context.doc().addWithKey(key, new FeatureField(name(), feature, value)); + } else { + throw new IllegalArgumentException("[feature_vector] fields take hashes that map a feature to a strictly positive " + + "float, but got unexpected token " + token); + } + } + return null; // no mapping update + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + throw new AssertionError("parse is implemented directly"); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index 4a9aea21a8a..513f052212f 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -38,6 +38,7 @@ public class MapperExtrasPlugin extends Plugin implements MapperPlugin, SearchPl mappers.put(ScaledFloatFieldMapper.CONTENT_TYPE, new ScaledFloatFieldMapper.TypeParser()); mappers.put(TokenCountFieldMapper.CONTENT_TYPE, new TokenCountFieldMapper.TypeParser()); mappers.put(FeatureFieldMapper.CONTENT_TYPE, new FeatureFieldMapper.TypeParser()); + mappers.put(FeatureVectorFieldMapper.CONTENT_TYPE, new FeatureVectorFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java index 761de46731d..3b7fb97eab1 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/query/FeatureQueryBuilder.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.FeatureFieldMapper.FeatureFieldType; +import org.elasticsearch.index.mapper.FeatureMetaFieldMapper; +import org.elasticsearch.index.mapper.FeatureVectorFieldMapper.FeatureVectorFieldType; import org.elasticsearch.index.mapper.MappedFieldType; import java.io.IOException; @@ -48,7 +50,7 @@ public final class FeatureQueryBuilder extends AbstractQueryBuilder mapper.parse(SourceToParse.source("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() @@ -151,7 +152,7 @@ public class FeatureFieldMapperTests extends ESSingleNodeTestCase { .endObject()), XContentType.JSON))); assertEquals("[feature] fields do not support indexing multiple values for the same field [field] in the same document", - e.getCause().getMessage());*/ + e.getCause().getMessage()); e = expectThrows(MapperParsingException.class, () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java new file mode 100644 index 00000000000..fccb62b1a34 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldMapperTests.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +public class FeatureVectorFieldMapperTests extends ESSingleNodeTestCase { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void setup() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testDefaults() throws Exception { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature_vector").endObject().endObject() + .endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .field("foo", 10) + .field("bar", 20) + .endObject() + .endObject()), + XContentType.JSON)); + + IndexableField[] fields = doc1.rootDoc().getFields("field"); + assertEquals(2, fields.length); + assertThat(fields[0], Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = (FeatureField) fields[0]; + assertThat(featureField1.stringValue(), Matchers.equalTo("foo")); + FeatureField featureField2 = (FeatureField) fields[1]; + assertThat(featureField2.stringValue(), Matchers.equalTo("bar")); + + int freq1 = FeatureFieldMapperTests.getFrequency(featureField1.tokenStream(null, null)); + int freq2 = FeatureFieldMapperTests.getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); + } + + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field").field("type", "feature_vector").endObject().startObject("foo") + .startObject("properties").startObject("field").field("type", "feature_vector").endObject().endObject() + .endObject().endObject().endObject().endObject()); + + DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); + + assertEquals(mapping, mapper.mappingSource().toString()); + + MapperParsingException e = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startObject("field") + .field("foo", Arrays.asList(10, 20)) + .endObject() + .endObject()), + XContentType.JSON))); + assertEquals("[feature_vector] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + + "START_ARRAY", e.getCause().getMessage()); + + e = expectThrows(MapperParsingException.class, + () -> mapper.parse(SourceToParse.source("test", "type", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .startArray("foo") + .startObject() + .startObject("field") + .field("bar", 10) + .endObject() + .endObject() + .startObject() + .startObject("field") + .field("bar", 20) + .endObject() + .endObject() + .endArray() + .endObject()), + XContentType.JSON))); + assertEquals("[feature_vector] fields do not support indexing multiple values for the same feature [foo.field.bar] in the same " + + "document", e.getCause().getMessage()); + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java similarity index 61% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java rename to modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java index 311403a4885..e8d84ce7a02 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/RoundRobinSupplier.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/FeatureVectorFieldTypeTests.java @@ -17,23 +17,13 @@ * under the License. */ -package org.elasticsearch.nio; +package org.elasticsearch.index.mapper; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Supplier; +public class FeatureVectorFieldTypeTests extends FieldTypeTestCase { -public class RoundRobinSupplier implements Supplier { - - private final S[] selectors; - private final int count; - private AtomicInteger counter = new AtomicInteger(0); - - RoundRobinSupplier(S[] selectors) { - this.count = selectors.length; - this.selectors = selectors; + @Override + protected MappedFieldType createDefaultFieldType() { + return new FeatureVectorFieldMapper.FeatureVectorFieldType(); } - public S get() { - return selectors[counter.getAndIncrement() % count]; - } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java index f9101447ef7..dd7b42487fd 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/query/FeatureQueryBuilderTests.java @@ -33,8 +33,10 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.either; @@ -46,7 +48,8 @@ public class FeatureQueryBuilderTests extends AbstractQueryTestCase fields = new ArrayList<>(); + fields.add("my_feature_field"); + fields.add("unmapped_field"); + fields.add("my_feature_vector_field.feature"); + if (mayUseNegativeField) { + fields.add("my_negative_feature_field"); + } + + final String field = randomFrom(fields); + return new FeatureQueryBuilder(field, function); } @Override @@ -103,7 +117,7 @@ public class FeatureQueryBuilderTests extends AbstractQueryTestCase parseQuery(query).toQuery(createShardContext())); - assertEquals("[feature] query only works on [feature] fields, not [text]", e.getMessage()); + assertEquals("[feature] query only works on [feature] fields and features of [feature_vector] fields, not [text]", e.getMessage()); } public void testIllegalCombination() throws IOException { diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml new file mode 100644 index 00000000000..9cb8fa9c18d --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/feature_vector/10_basic.yml @@ -0,0 +1,103 @@ +setup: + - skip: + version: " - 6.99.99" + reason: "The feature_vector field was introduced in 7.0.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + tags: + type: feature_vector + + - do: + index: + index: test + type: _doc + id: 1 + body: + tags: + foo: 3 + bar: 5 + + - do: + index: + index: test + type: _doc + id: 2 + body: + tags: + bar: 6 + quux: 10 + + - do: + indices.refresh: {} + +--- +"Log": + + - do: + search: + body: + query: + feature: + field: tags.bar + log: + scaling_factor: 3 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Saturation": + + - do: + search: + body: + query: + feature: + field: tags.bar + saturation: + pivot: 20 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" + +--- +"Sigmoid": + + - do: + search: + body: + query: + feature: + field: tags.bar + sigmoid: + pivot: 20 + exponent: 0.6 + + - match: + hits.total: 2 + + - match: + hits.hits.0._id: "2" + + - match: + hits.hits.1._id: "1" diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 1be1acb1317..f77951dd58b 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -131,22 +131,19 @@ public class RatedRequestsTests extends ESTestCase { } } - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31104") public void testXContentParsingIsNotLenient() throws IOException { RatedRequest testItem = createTestItem(randomBoolean()); XContentType xContentType = randomFrom(XContentType.values()); BytesReference originalBytes = toShuffledXContent(testItem, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); BytesReference withRandomFields = insertRandomFields(xContentType, originalBytes, null, random()); try (XContentParser parser = createParser(xContentType.xContent(), withRandomFields)) { - Exception exception = expectThrows(Exception.class, () -> RatedRequest.fromXContent(parser)); - if (exception instanceof XContentParseException) { - XContentParseException xcpe = (XContentParseException) exception; - assertThat(xcpe.getCause().getMessage(), containsString("unknown field")); - assertThat(xcpe.getCause().getMessage(), containsString("parser not found")); - } - if (exception instanceof XContentParseException) { + Throwable exception = expectThrows(XContentParseException.class, () -> RatedRequest.fromXContent(parser)); + if (exception.getCause() != null) { assertThat(exception.getMessage(), containsString("[request] failed to parse field")); + exception = exception.getCause(); } + assertThat(exception.getMessage(), containsString("unknown field")); + assertThat(exception.getMessage(), containsString("parser not found")); } } diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java index 04be1d0a751..fb20b73b61c 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobContainer.java @@ -82,11 +82,6 @@ public class URLBlobContainer extends AbstractBlobContainer { throw new UnsupportedOperationException("URL repository doesn't support this operation"); } - @Override - public void move(String from, String to) throws IOException { - throw new UnsupportedOperationException("URL repository doesn't support this operation"); - } - /** * This operation is not supported by URLBlobContainer */ diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 31c1214f034..61825ad4d27 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -202,15 +202,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); - this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), - Math.toIntExact(maxChunkSize.getBytes()), - Math.toIntExact(maxHeaderSize.getBytes()), - Math.toIntExact(maxInitialLineLength.getBytes()), - SETTING_HTTP_RESET_COOKIES.get(settings), - SETTING_HTTP_COMPRESSION.get(settings), - SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), - pipeliningMaxEvents); + this.httpHandlingSettings = HttpHandlingSettings.fromSettings(settings); this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); @@ -446,7 +438,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport { if (handlingSettings.isCompression()) { ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } - if (SETTING_CORS_ENABLED.get(transport.settings())) { + if (handlingSettings.isCorsEnabled()) { ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig())); } ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents)); diff --git a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java index 584428f9a45..b1450b79fab 100644 --- a/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java +++ b/plugins/repository-azure/qa/microsoft-azure-storage/src/test/java/org/elasticsearch/repositories/azure/AzureStorageTestServer.java @@ -33,7 +33,6 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -159,44 +158,13 @@ public class AzureStorageTestServer { objectsPaths("PUT " + endpoint + "/{container}").forEach(path -> handlers.insert(path, (params, headers, body, requestId) -> { final String destContainerName = params.get("container"); + final String destBlobName = objectName(params); final Container destContainer =containers.get(destContainerName); if (destContainer == null) { return newContainerNotFoundError(requestId); } - - final String destBlobName = objectName(params); - - // Request is a copy request - List headerCopySource = headers.getOrDefault("x-ms-copy-source", emptyList()); - if (headerCopySource.isEmpty() == false) { - String srcBlobName = headerCopySource.get(0); - - Container srcContainer = null; - for (Container container : containers.values()) { - String prefix = endpoint + "/" + container.name + "/"; - if (srcBlobName.startsWith(prefix)) { - srcBlobName = srcBlobName.replaceFirst(prefix, ""); - srcContainer = container; - break; - } - } - - if (srcContainer == null || srcContainer.objects.containsKey(srcBlobName) == false) { - return newBlobNotFoundError(requestId); - } - - byte[] bytes = srcContainer.objects.get(srcBlobName); - if (bytes != null) { - destContainer.objects.put(destBlobName, bytes); - return new Response(RestStatus.ACCEPTED, singletonMap("x-ms-copy-status", "success"), "text/plain", EMPTY_BYTE); - } else { - return newBlobNotFoundError(requestId); - } - } else { - destContainer.objects.put(destBlobName, body); - } - + destContainer.objects.put(destBlobName, body); return new Response(RestStatus.CREATED, emptyMap(), "text/plain", EMPTY_BYTE); }) ); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 8f7671697db..dd85bfc2181 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -127,22 +127,6 @@ public class AzureBlobContainer extends AbstractBlobContainer { } } - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - logger.trace("move({}, {})", sourceBlobName, targetBlobName); - try { - String source = keyPath + sourceBlobName; - String target = keyPath + targetBlobName; - - logger.debug("moving blob [{}] to [{}] in container {{}}", source, target, blobStore); - - blobStore.moveBlob(source, target); - } catch (URISyntaxException | StorageException e) { - logger.warn("can not move blob [{}] to [{}] in container {{}}: {}", sourceBlobName, targetBlobName, blobStore, e.getMessage()); - throw new IOException(e); - } - } - @Override public Map listBlobs() throws IOException { logger.trace("listBlobs()"); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 7e8987ae945..fede20bfb76 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -97,31 +97,23 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { return this.client.doesContainerExist(this.clientName, this.locMode, container); } - public boolean blobExists(String blob) throws URISyntaxException, StorageException - { + public boolean blobExists(String blob) throws URISyntaxException, StorageException { return this.client.blobExists(this.clientName, this.locMode, container, blob); } - public void deleteBlob(String blob) throws URISyntaxException, StorageException - { + public void deleteBlob(String blob) throws URISyntaxException, StorageException { this.client.deleteBlob(this.clientName, this.locMode, container, blob); } - public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException - { + public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { return this.client.getInputStream(this.clientName, this.locMode, container, blob); } - public Map listBlobsByPrefix(String keyPath, String prefix) + public Map listBlobsByPrefix(String keyPath, String prefix) throws URISyntaxException, StorageException { return this.client.listBlobsByPrefix(this.clientName, this.locMode, container, keyPath, prefix); } - public void moveBlob(String sourceBlob, String targetBlob) throws URISyntaxException, StorageException - { - this.client.moveBlob(this.clientName, this.locMode, container, sourceBlob, targetBlob); - } - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { this.client.writeBlob(this.clientName, this.locMode, container, blobName, inputStream, blobSize); } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 3337c07e6ee..8cd3835e3e6 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -57,9 +57,6 @@ public interface AzureStorageService { Map listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) throws URISyntaxException, StorageException; - void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) - throws URISyntaxException, StorageException; - void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index 6f4f8cfea96..339aec43c25 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -287,24 +287,6 @@ public class AzureStorageServiceImpl extends AbstractComponent implements AzureS return blobsBuilder.immutableMap(); } - @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) - throws URISyntaxException, StorageException { - logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}]", container, sourceBlob, targetBlob); - - CloudBlobClient client = this.getSelectedClient(account, mode); - CloudBlobContainer blobContainer = client.getContainerReference(container); - CloudBlockBlob blobSource = blobContainer.getBlockBlobReference(sourceBlob); - if (SocketAccess.doPrivilegedException(() -> blobSource.exists(null, null, generateOperationContext(account)))) { - CloudBlockBlob blobTarget = blobContainer.getBlockBlobReference(targetBlob); - SocketAccess.doPrivilegedVoidException(() -> { - blobTarget.startCopy(blobSource, null, null, null, generateOperationContext(account)); - blobSource.delete(DeleteSnapshotsOption.NONE, null, null, generateOperationContext(account)); - }); - logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob); - } - } - @Override public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 80035d8f788..0e0f73446ba 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -106,18 +106,6 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS return blobsBuilder.immutableMap(); } - @Override - public void moveBlob(String account, LocationMode mode, String container, String sourceBlob, String targetBlob) - throws URISyntaxException, StorageException { - for (String blobName : blobs.keySet()) { - if (endsWithIgnoreCase(blobName, sourceBlob)) { - ByteArrayOutputStream outputStream = blobs.get(blobName); - blobs.put(blobName.replace(sourceBlob, targetBlob), outputStream); - blobs.remove(blobName); - } - } - } - @Override public void writeBlob(String account, LocationMode mode, String container, String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException { @@ -137,7 +125,7 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS * @param prefix the prefix to look for * @see java.lang.String#startsWith */ - public static boolean startsWithIgnoreCase(String str, String prefix) { + private static boolean startsWithIgnoreCase(String str, String prefix) { if (str == null || prefix == null) { return false; } @@ -152,29 +140,6 @@ public class AzureStorageServiceMock extends AbstractComponent implements AzureS return lcStr.equals(lcPrefix); } - /** - * Test if the given String ends with the specified suffix, - * ignoring upper/lower case. - * - * @param str the String to check - * @param suffix the suffix to look for - * @see java.lang.String#startsWith - */ - public static boolean endsWithIgnoreCase(String str, String suffix) { - if (str == null || suffix == null) { - return false; - } - if (str.endsWith(suffix)) { - return true; - } - if (str.length() < suffix.length()) { - return false; - } - String lcStr = str.substring(0, suffix.length()).toLowerCase(Locale.ROOT); - String lcPrefix = suffix.toLowerCase(Locale.ROOT); - return lcStr.equals(lcPrefix); - } - private static class PermissionRequiringInputStream extends ByteArrayInputStream { private PermissionRequiringInputStream(byte[] buf) { diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java index a9832ae318d..fd09b46c73f 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java @@ -367,47 +367,6 @@ public class GoogleCloudStorageTestServer { return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, body)); }); - // Rewrite or Copy Object - // - // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite - // https://cloud.google.com/storage/docs/json_api/v1/objects/copy - handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/{action}/b/{destBucket}/o/{dest}", - (params, headers, body) -> { - final String action = params.get("action"); - if ((action.equals("rewriteTo") == false) && (action.equals("copyTo") == false)) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "Action not implemented. None of \"rewriteTo\" or \"copyTo\"."); - } - final String source = params.get("src"); - if (Strings.hasText(source) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); - } - final Bucket srcBucket = buckets.get(params.get("srcBucket")); - if (srcBucket == null) { - return newError(RestStatus.NOT_FOUND, "source bucket not found"); - } - final String dest = params.get("dest"); - if (Strings.hasText(dest) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); - } - final Bucket destBucket = buckets.get(params.get("destBucket")); - if (destBucket == null) { - return newError(RestStatus.NOT_FOUND, "destination bucket not found"); - } - final byte[] sourceBytes = srcBucket.objects.get(source); - if (sourceBytes == null) { - return newError(RestStatus.NOT_FOUND, "source object not found"); - } - destBucket.objects.put(dest, sourceBytes); - if (action.equals("rewriteTo")) { - final XContentBuilder respBuilder = jsonBuilder(); - buildRewriteResponse(respBuilder, destBucket.name, dest, sourceBytes.length); - return newResponse(RestStatus.OK, emptyMap(), respBuilder); - } else { - assert action.equals("copyTo"); - return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); - } - }); - // List Objects // // https://cloud.google.com/storage/docs/json_api/v1/objects/list @@ -701,28 +660,4 @@ public class GoogleCloudStorageTestServer { .field("size", String.valueOf(bytes.length)) .endObject(); } - - /** - * Builds the rewrite response as defined by - * https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite - */ - private static XContentBuilder buildRewriteResponse(final XContentBuilder builder, - final String destBucket, - final String dest, - final int byteSize) throws IOException { - builder.startObject() - .field("kind", "storage#rewriteResponse") - .field("totalBytesRewritten", String.valueOf(byteSize)) - .field("objectSize", String.valueOf(byteSize)) - .field("done", true) - .startObject("resource") - .field("kind", "storage#object") - .field("id", String.join("/", destBucket, dest)) - .field("name", dest) - .field("bucket", destBucket) - .field("size", String.valueOf(byteSize)) - .endObject() - .endObject(); - return builder; - } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java index 83353990510..54bbd316f76 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobContainer.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import java.io.IOException; import java.io.InputStream; -import java.nio.file.FileAlreadyExistsException; import java.util.Map; class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { @@ -74,11 +73,6 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer { blobStore.deleteBlob(buildKey(blobName)); } - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - blobStore.moveBlob(buildKey(sourceBlobName), buildKey(targetBlobName)); - } - protected String buildKey(String blobName) { assert blobName != null; return path + blobName; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 83aafdde2b1..78fd9461ad5 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -27,7 +27,6 @@ import com.google.cloud.storage.BlobInfo; import com.google.cloud.storage.Bucket; import com.google.cloud.storage.Storage; import com.google.cloud.storage.Storage.BlobListOption; -import com.google.cloud.storage.Storage.CopyRequest; import com.google.cloud.storage.StorageException; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; @@ -314,29 +313,6 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore } } - /** - * Moves a blob within the same bucket - * - * @param sourceBlobName name of the blob to move - * @param targetBlobName new name of the blob in the same bucket - */ - void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { - final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); - final BlobId targetBlobId = BlobId.of(bucket, targetBlobName); - final CopyRequest request = CopyRequest.newBuilder() - .setSource(sourceBlobId) - .setTarget(targetBlobId) - .build(); - SocketAccess.doPrivilegedVoidIOException(() -> { - // There's no atomic "move" in GCS so we need to copy and delete - storage.copy(request).getResult(); - final boolean deleted = storage.delete(sourceBlobId); - if (deleted == false) { - throw new IOException("Failed to move source [" + sourceBlobName + "] to target [" + targetBlobName + "]"); - } - }); - } - private static String buildKey(String keyPath, String s) { assert s != null; return keyPath + s; diff --git a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java index f2b8a0571ad..a08ae2f9f8a 100644 --- a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java +++ b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java @@ -21,8 +21,6 @@ package com.google.cloud.storage; import com.google.cloud.storage.spi.v1.StorageRpc; -import static org.mockito.Mockito.mock; - /** * Utility class that exposed Google SDK package protected methods to * create specific StorageRpc objects in unit tests. @@ -42,13 +40,4 @@ public class StorageRpcOptionUtils { } return null; } - - public static CopyWriter createCopyWriter(final Blob result) { - return new CopyWriter(mock(StorageOptions.class), mock(StorageRpc.RewriteResponse.class)) { - @Override - public Blob getResult() { - return result; - } - }; - } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 1b31b3018e4..605d1798ee8 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -38,7 +38,6 @@ import com.google.cloud.storage.StorageException; import com.google.cloud.storage.StorageOptions; import com.google.cloud.storage.StorageRpcOptionUtils; import com.google.cloud.storage.StorageTestUtils; - import org.elasticsearch.core.internal.io.IOUtils; import java.io.ByteArrayInputStream; @@ -125,24 +124,6 @@ class MockStorage implements Storage { return get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); } - @Override - public CopyWriter copy(CopyRequest copyRequest) { - if (bucketName.equals(copyRequest.getSource().getBucket()) == false) { - throw new StorageException(404, "Source bucket not found"); - } - if (bucketName.equals(copyRequest.getTarget().getBucket()) == false) { - throw new StorageException(404, "Target bucket not found"); - } - - final byte[] bytes = blobs.get(copyRequest.getSource().getName()); - if (bytes == null) { - throw new StorageException(404, "Source blob does not exist"); - } - blobs.put(copyRequest.getTarget().getName(), bytes); - return StorageRpcOptionUtils - .createCopyWriter(get(BlobId.of(copyRequest.getTarget().getBucket(), copyRequest.getTarget().getName()))); - } - @Override public Page list(String bucket, BlobListOption... options) { if (bucketName.equals(bucket) == false) { @@ -269,6 +250,11 @@ class MockStorage implements Storage { // Everything below this line is not implemented. + @Override + public CopyWriter copy(CopyRequest copyRequest) { + return null; + } + @Override public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { return null; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 926cf0b2ad4..23557ae6cf8 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -23,7 +23,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Path; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -31,14 +30,12 @@ import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.repositories.hdfs.HdfsBlobStore.Operation; +import java.io.FileNotFoundException; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; -import java.security.AccessController; -import java.security.PrivilegedActionException; -import java.security.PrivilegedExceptionAction; import java.util.Collections; import java.util.EnumSet; import java.util.LinkedHashMap; @@ -69,33 +66,28 @@ final class HdfsBlobContainer extends AbstractBlobContainer { @Override public void deleteBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { - throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + try { + if (store.execute(fileContext -> fileContext.delete(new Path(path, blobName), true)) == false) { + throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); + } + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); } - - store.execute(fileContext -> fileContext.delete(new Path(path, blobName), true)); - } - - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - store.execute((Operation) fileContext -> { - fileContext.rename(new Path(path, sourceBlobName), new Path(path, targetBlobName)); - return null; - }); } @Override public InputStream readBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { - throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); - } // FSDataInputStream does buffering internally // FSDataInputStream can open connections on read() or skip() so we wrap in // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. - return store.execute(fileContext -> - new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) - ); + try { + return store.execute(fileContext -> + new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) + ); + } catch (FileNotFoundException fnfe) { + throw new NoSuchFileException("[" + blobName + "] blob not found"); + } } @Override diff --git a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java index a3ea287b7f8..029b28320d2 100644 --- a/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java +++ b/plugins/repository-s3/qa/amazon-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java @@ -162,10 +162,9 @@ public class AmazonS3TestServer { }) ); - // PUT Object & PUT Object Copy + // PUT Object // // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path -> handlers.insert(path, (params, headers, body, id) -> { final String destBucketName = params.get("bucket"); @@ -177,65 +176,38 @@ public class AmazonS3TestServer { final String destObjectName = objectName(params); - // Request is a copy request - List headerCopySource = headers.getOrDefault("x-amz-copy-source", emptyList()); - if (headerCopySource.isEmpty() == false) { - String srcObjectName = headerCopySource.get(0); + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // + List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); + if (headerDecodedContentLength.size() == 1) { + int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); - Bucket srcBucket = null; - for (Bucket bucket : buckets.values()) { - String prefix = "/" + bucket.name + "/"; - if (srcObjectName.startsWith(prefix)) { - srcObjectName = srcObjectName.replaceFirst(prefix, ""); - srcBucket = bucket; - break; - } - } - - if (srcBucket == null || srcBucket.objects.containsKey(srcObjectName) == false) { - return newObjectNotFoundError(id, srcObjectName); - } - - byte[] bytes = srcBucket.objects.get(srcObjectName); - if (bytes != null) { - destBucket.objects.put(destObjectName, bytes); - return newCopyResultResponse(id); - } else { - return newObjectNotFoundError(id, srcObjectName); - } - } else { - // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" - // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // Chunked requests have a payload like this: // - // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 + // ... bytes of data .... + // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 // - List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); - if (headerDecodedContentLength.size() == 1) { - int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); - - // Chunked requests have a payload like this: - // - // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 - // ... bytes of data .... - // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 - // - try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { - int b; - // Moves to the end of the first signature line - while ((b = inputStream.read()) != -1) { - if (b == '\n') { - break; - } + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { + int b; + // Moves to the end of the first signature line + while ((b = inputStream.read()) != -1) { + if (b == '\n') { + break; } - - final byte[] bytes = new byte[contentLength]; - inputStream.read(bytes, 0, contentLength); - - destBucket.objects.put(destObjectName, bytes); - return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); } + + final byte[] bytes = new byte[contentLength]; + inputStream.read(bytes, 0, contentLength); + + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); } } + return newInternalError(id, "Something is wrong with this PUT request"); }) ); @@ -466,20 +438,6 @@ public class AmazonS3TestServer { return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); } - /** - * S3 Copy Result Response - */ - private static Response newCopyResultResponse(final long requestId) { - final String id = Long.toString(requestId); - final StringBuilder response = new StringBuilder(); - response.append(""); - response.append(""); - response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); - response.append("").append(requestId).append(""); - response.append(""); - return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); - } - /** * S3 DeleteResult Response */ diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 401ef0933a8..173609b7cfe 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -24,7 +24,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.CopyObjectRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; @@ -96,10 +95,6 @@ class S3BlobContainer extends AbstractBlobContainer { @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - if (blobExists(blobName)) { - throw new FileAlreadyExistsException("Blob [" + blobName + "] already exists, cannot overwrite"); - } - SocketAccess.doPrivilegedIOException(() -> { if (blobSize <= blobStore.bufferSizeInBytes()) { executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); @@ -156,28 +151,6 @@ class S3BlobContainer extends AbstractBlobContainer { }); } - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - try { - CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), - blobStore.bucket(), buildKey(targetBlobName)); - - if (blobStore.serverSideEncryption()) { - ObjectMetadata objectMetadata = new ObjectMetadata(); - objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); - request.setNewObjectMetadata(objectMetadata); - } - - SocketAccess.doPrivilegedVoid(() -> { - blobStore.client().copyObject(request); - blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); - }); - - } catch (AmazonS3Exception e) { - throw new IOException(e); - } - } - @Override public Map listBlobs() throws IOException { return listBlobsByPrefix(null); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java index caa1c0b467e..33d5d5fbc20 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/MockAmazonS3.java @@ -23,8 +23,6 @@ import com.amazonaws.AmazonClientException; import com.amazonaws.SdkClientException; import com.amazonaws.services.s3.AbstractAmazonS3; import com.amazonaws.services.s3.model.AmazonS3Exception; -import com.amazonaws.services.s3.model.CopyObjectRequest; -import com.amazonaws.services.s3.model.CopyObjectResult; import com.amazonaws.services.s3.model.DeleteObjectRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.DeleteObjectsResult; @@ -148,24 +146,6 @@ class MockAmazonS3 extends AbstractAmazonS3 { return listing; } - @Override - public CopyObjectResult copyObject(final CopyObjectRequest request) throws AmazonClientException { - assertThat(request.getSourceBucketName(), equalTo(bucket)); - assertThat(request.getDestinationBucketName(), equalTo(bucket)); - - final String sourceBlobName = request.getSourceKey(); - - final byte[] content = blobs.get(sourceBlobName); - if (content == null) { - AmazonS3Exception exception = new AmazonS3Exception("[" + sourceBlobName + "] does not exist."); - exception.setStatusCode(404); - throw exception; - } - - blobs.put(request.getDestinationKey(), content); - return new CopyObjectResult(); - } - @Override public void deleteObject(final DeleteObjectRequest request) throws AmazonClientException { assertThat(request.getBucketName(), equalTo(bucket)); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java index 453ef3213f0..c760e86d135 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreContainerTests.java @@ -64,6 +64,11 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase { return randomMockS3BlobStore(); } + @Override + public void testVerifyOverwriteFails() { + assumeFalse("not implemented because of S3's weak consistency model", true); + } + public void testExecuteSingleUploadBlobSizeTooLarge() { final long blobSize = ByteSizeUnit.GB.toBytes(randomIntBetween(6, 10)); final S3BlobStore blobStore = mock(S3BlobStore.class); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java index ce0ed83aad4..92289350bba 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -47,10 +47,9 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.nio.cors.NioCorsConfig; import org.elasticsearch.http.nio.cors.NioCorsConfigBuilder; -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioChannel; import org.elasticsearch.nio.NioGroup; @@ -58,8 +57,7 @@ import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.nio.SocketEventHandler; -import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -110,9 +108,6 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); - private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "http_nio_transport_worker"; - private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = "http_nio_transport_acceptor"; - private final BigArrays bigArrays; private final ThreadPool threadPool; private final NamedXContentRegistry xContentRegistry; @@ -142,15 +137,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); - this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), - Math.toIntExact(maxChunkSize.getBytes()), - Math.toIntExact(maxHeaderSize.getBytes()), - Math.toIntExact(maxInitialLineLength.getBytes()), - SETTING_HTTP_RESET_COOKIES.get(settings), - SETTING_HTTP_COMPRESSION.get(settings), - SETTING_HTTP_COMPRESSION_LEVEL.get(settings), - SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), - pipeliningMaxEvents); + this.httpHandlingSettings = HttpHandlingSettings.fromSettings(settings);; this.corsConfig = buildCorsConfig(settings); this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); @@ -179,10 +166,9 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { try { int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - (s) -> new AcceptorEventHandler(s, this::nonChannelExceptionCaught), - daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), workerCount, - () -> new SocketEventHandler(this::nonChannelExceptionCaught)); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX), workerCount, + (s) -> new EventHandler(this::nonChannelExceptionCaught, s)); channelFactory = new HttpChannelFactory(); this.boundAddress = createBoundHttpAddress(); @@ -360,7 +346,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { } @Override - public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + public NioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { NioSocketChannel nioChannel = new NioSocketChannel(channel); HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, httpHandlingSettings, xContentRegistry, corsConfig, threadPool.getThreadContext()); @@ -372,7 +358,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport { } @Override - public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + public NioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); Consumer exceptionHandler = (e) -> logger.error(() -> new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index 2ef49d77912..b85d707dcd9 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -31,16 +31,14 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; -import org.elasticsearch.nio.SocketEventHandler; -import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; @@ -55,23 +53,18 @@ import java.util.concurrent.ConcurrentMap; import java.util.function.Consumer; import java.util.function.Supplier; -import static org.elasticsearch.common.settings.Setting.intSetting; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class NioTransport extends TcpTransport { private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; - private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX; public static final Setting NIO_WORKER_COUNT = new Setting<>("transport.nio.worker_count", (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.nio.worker_count"), Setting.Property.NodeScope); - public static final Setting NIO_ACCEPTOR_COUNT = - intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); - protected final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); private volatile NioGroup nioGroup; @@ -101,20 +94,13 @@ public class NioTransport extends TcpTransport { protected void doStart() { boolean success = false; try { - int acceptorCount = 0; - boolean useNetworkServer = NetworkService.NETWORK_SERVER.get(settings); - if (useNetworkServer) { - acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings); - } - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - (s) -> new AcceptorEventHandler(s, this::onNonChannelException), - daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), NioTransport.NIO_WORKER_COUNT.get(settings), - () -> new SocketEventHandler(this::onNonChannelException)); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), + NioTransport.NIO_WORKER_COUNT.get(settings), (s) -> new EventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = channelFactory(clientProfileSettings, true); - if (useNetworkServer) { + if (NetworkService.NETWORK_SERVER.get(settings)) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; @@ -178,7 +164,7 @@ public class NioTransport extends TcpTransport { } @Override - public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + public TcpNioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { TcpNioSocketChannel nioChannel = new TcpNioSocketChannel(profileName, channel); Supplier pageSupplier = () -> { Recycler.V bytes = pageCacheRecycler.bytePage(false); @@ -193,7 +179,7 @@ public class NioTransport extends TcpTransport { } @Override - public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + public TcpNioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel); Consumer exceptionHandler = (e) -> logger.error(() -> new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 422e3e9b833..1cc94f18dd3 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -50,8 +50,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { return Arrays.asList( NioHttpServerTransport.NIO_HTTP_ACCEPTOR_COUNT, NioHttpServerTransport.NIO_HTTP_WORKER_COUNT, - NioTransport.NIO_WORKER_COUNT, - NioTransport.NIO_ACCEPTOR_COUNT + NioTransport.NIO_WORKER_COUNT ); } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java index c63acc9f4de..946563225c6 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioServerSocketChannel.java @@ -21,8 +21,6 @@ package org.elasticsearch.transport.nio; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.transport.TcpChannel; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java index 44ab17457e8..ef2bc875aa9 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpNioSocketChannel.java @@ -22,7 +22,6 @@ package org.elasticsearch.transport.nio; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.transport.TcpChannel; import java.io.IOException; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java index 56cbab5295a..dbd247b9671 100644 --- a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -56,6 +56,7 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.function.BiConsumer; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -94,7 +95,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase { SETTING_HTTP_COMPRESSION.getDefault(settings), SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings), - SETTING_PIPELINING_MAX_EVENTS.getDefault(settings)); + SETTING_PIPELINING_MAX_EVENTS.getDefault(settings), + SETTING_CORS_ENABLED.getDefault(settings)); ThreadContext threadContext = new ThreadContext(settings); nioSocketChannel = mock(NioSocketChannel.class); handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index f7b87905b24..73df782c920 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -36,6 +36,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -133,19 +134,20 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { updateRemoteClusterSettings(Collections.singletonMap("seeds", remoteNode.getAddress().toString())); for (int i = 0; i < 10; i++) { - restHighLevelClient.index(new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value")); + restHighLevelClient.index( + new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value"), RequestOptions.DEFAULT); } Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index"), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, response.getClusters()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(2, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); @@ -153,7 +155,7 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); assertEquals(1, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); @@ -161,14 +163,15 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m"), + RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(2, response.getClusters().getSuccessful()); assertEquals(0, response.getClusters().getSkipped()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId)); + SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); assertEquals(10, scrollResponse.getHits().totalHits); assertEquals(0, scrollResponse.getHits().getHits().length); @@ -179,7 +182,7 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { updateRemoteClusterSettings(Collections.singletonMap("skip_unavailable", true)); { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); @@ -187,7 +190,7 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { assertEquals(10, response.getHits().getHits().length); } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT); assertEquals(1, response.getClusters().getTotal()); assertEquals(0, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); @@ -195,14 +198,15 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { } { - SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m")); + SearchResponse response = restHighLevelClient.search(new SearchRequest("index", "remote1:index").scroll("1m"), + RequestOptions.DEFAULT); assertEquals(2, response.getClusters().getTotal()); assertEquals(1, response.getClusters().getSuccessful()); assertEquals(1, response.getClusters().getSkipped()); assertEquals(10, response.getHits().totalHits); assertEquals(10, response.getHits().getHits().length); String scrollId = response.getScrollId(); - SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId)); + SearchResponse scrollResponse = restHighLevelClient.searchScroll(new SearchScrollRequest(scrollId), RequestOptions.DEFAULT); assertSame(SearchResponse.Clusters.EMPTY, scrollResponse.getClusters()); assertEquals(10, scrollResponse.getHits().totalHits); assertEquals(0, scrollResponse.getHits().getHits().length); @@ -266,19 +270,19 @@ public class CrossClusterSearchUnavailableClusterIT extends ESRestTestCase { private static void assertSearchConnectFailure() { { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"))); + () -> restHighLevelClient.search(new SearchRequest("index", "remote1:index"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index"))); + () -> restHighLevelClient.search(new SearchRequest("remote1:index"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } { ElasticsearchException exception = expectThrows(ElasticsearchException.class, - () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"))); + () -> restHighLevelClient.search(new SearchRequest("remote1:index").scroll("1m"), RequestOptions.DEFAULT)); ElasticsearchException rootCause = (ElasticsearchException)exception.getRootCause(); assertThat(rootCause.getMessage(), containsString("connect_exception")); } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 6c4004074fa..bfd37863cc2 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -83,13 +83,15 @@ for (Version version : bwcVersions.wireCompatible) { * just stopped's data directory. */ dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'node.name', "upgraded-node-${stopNode}" } } Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) - configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, - 0, { oldClusterTest.nodes.get(1).transportUri() }) + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(1).transportUri() + ',' + oldClusterTest.nodes.get(2).transportUri() }) Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { @@ -100,8 +102,9 @@ for (Version version : bwcVersions.wireCompatible) { Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) - configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, - 1, { oneThirdUpgradedTest.nodes.get(0).transportUri() }) + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(2).transportUri() + ',' + oneThirdUpgradedTest.nodes.get(0).transportUri() }) Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { @@ -112,8 +115,9 @@ for (Version version : bwcVersions.wireCompatible) { Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, - 2, { twoThirdsUpgradedTest.nodes.get(0).transportUri() }) + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oneThirdUpgradedTest.nodes.get(0).transportUri() + ',' + twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index f1e01d24acf..3898746e5c3 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -30,6 +30,10 @@ import java.nio.charset.StandardCharsets; * Basic test that indexed documents survive the rolling restart. See * {@link RecoveryIT} for much more in depth testing of the mechanism * by which they survive. + *

+ * This test is an almost exact copy of IndexingIT in the + * xpack rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. */ public class IndexingIT extends AbstractRollingTestCase { public void testIndexing() throws IOException { diff --git a/qa/smoke-test-http/build.gradle b/qa/smoke-test-http/build.gradle index 03440912d04..ef1a97fc7ab 100644 --- a/qa/smoke-test-http/build.gradle +++ b/qa/smoke-test-http/build.gradle @@ -23,6 +23,7 @@ apply plugin: 'elasticsearch.test-with-dependencies' dependencies { testCompile project(path: ':modules:transport-netty4', configuration: 'runtime') // for http + testCompile project(path: ':plugins:transport-nio', configuration: 'runtime') // for http } integTestRunner { diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java index 316acb02a75..bac5423e751 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.transport.MockTcpTransportPlugin; import org.elasticsearch.transport.Netty4Plugin; import org.elasticsearch.transport.nio.MockNioTransportPlugin; +import org.elasticsearch.transport.nio.NioTransportPlugin; import org.junit.BeforeClass; import java.util.Arrays; @@ -39,9 +40,9 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { @SuppressWarnings("unchecked") @BeforeClass public static void setUpTransport() { - nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class)); - nodeHttpTypeKey = getTypeKey(Netty4Plugin.class); - clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class)); + nodeTransportTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); + nodeHttpTypeKey = getHttpTypeKey(randomFrom(Netty4Plugin.class, NioTransportPlugin.class)); + clientTypeKey = getTypeKey(randomFrom(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class)); } private static String getTypeKey(Class clazz) { @@ -49,12 +50,23 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { return MockTcpTransportPlugin.MOCK_TCP_TRANSPORT_NAME; } else if (clazz.equals(MockNioTransportPlugin.class)) { return MockNioTransportPlugin.MOCK_NIO_TRANSPORT_NAME; + } else if (clazz.equals(NioTransportPlugin.class)) { + return NioTransportPlugin.NIO_TRANSPORT_NAME; } else { assert clazz.equals(Netty4Plugin.class); return Netty4Plugin.NETTY_TRANSPORT_NAME; } } + private static String getHttpTypeKey(Class clazz) { + if (clazz.equals(NioTransportPlugin.class)) { + return NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME; + } else { + assert clazz.equals(Netty4Plugin.class); + return Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME; + } + } + @Override protected boolean addMockHttpTransport() { return false; // enable http @@ -70,12 +82,12 @@ public abstract class HttpSmokeTestCase extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); } @Override protected Collection> transportClientPlugins() { - return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class); + return Arrays.asList(getTestTransportPlugin(), Netty4Plugin.class, NioTransportPlugin.class); } @Override diff --git a/qa/vagrant/build.gradle b/qa/vagrant/build.gradle index a8dfe89b678..23d171f3125 100644 --- a/qa/vagrant/build.gradle +++ b/qa/vagrant/build.gradle @@ -31,7 +31,7 @@ dependencies { compile "org.hamcrest:hamcrest-core:${versions.hamcrest}" compile "org.hamcrest:hamcrest-library:${versions.hamcrest}" - compile project(':libs:elasticsearch-core') + compile project(':libs:core') // pulls in the jar built by this project and its dependencies packagingTest project(path: project.path, configuration: 'runtime') diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml index 905635e1d10..8b51a6c2d39 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/30_limits.yml @@ -122,3 +122,25 @@ setup: ]))|\\[([^\\[\\]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*\\>(?:(?:\\r\\n)?[ \\t])*)(?:,\\s*( | \".\\[\\]]))|\"(?:[^\\\"\\r\\\\]|\\\\.|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[ \\t])*)(?:\\.(?:( | \\[\"()<>@,;:\\\\\".\\[\\]]))|\"(?:[^\\\"\\r\\\\]|\\\\.|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[\\t" + + - do: + catch: /The length of regex \[1110\]/ + search: + index: test_1 + body: + query: + query_string: + query: "/^\\[\\]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*)(?:\\.(?:(?:\\r\\n)?[\\t])*(?:[^()<>@,;:\\\\\" | + .\\[\\]\\000-\\031]+(?:(?:(?:\\r\\n)?[\\t])+|\\Z|(?=[\\[\"()<>@,;:\\\\\".\\[\\]]))|\\[([^\\[\\ | + ]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*(?:,@(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@,;:\\\\\".\\ | + [\\]\\000-\\031]+(?:(?:(?:\\r\\n)?[\\t])+|\\Z|(?=[\\[\"()<>@,;:\\\\\".\\[\\]]))|\\[([^\\[\\]\\ | + r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*)(?:\\.(?:(?:\\r\\n)?[\\t])*(?:[^()<>@,;:\\\\\".\\[\\] | + \\000-\\031]+(?:(?:(?:\\r\\n)?[\\t])+|\\Z|(?=[\\[\"()<>@,;:\\\\\".\\[\\]]))|\\[([^\\[\\]\\r\\\\] | + |\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*)*:(?:(?:\\r\\n)?[\\t])*)?(?:[^()<>@,;:\\\\\".\\[\\] \\0 | + 00-\\031]+(?:(?:(?:\\r\\n)?[\\t])+|\\Z|(?=[\\[\"()<>@,;:\\\\\".\\[\\]]))|\"(?:[^\\\"\\r\\\\]|\\\\ | + .|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[\\t])*)(?:\\.(?:(?:\\r\\n)?[ \\t])*(?:[^()<>@, | + ;:\\\\\".\\[\\]\\000-\\031]+(?:(?:(?:\\r\\n)?[\\t])+|\\Z|(?=[\\[\"()<>@,;:\\\\\".\\[\\]]))|\"(? | + :[^\\\"\\r\\\\]|\\\\.|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[\\t])*))*@(?:(?:\\r\\n)?[ \\t])* | + ]))|\\[([^\\[\\]\\r\\\\]|\\\\.)*\\](?:(?:\\r\\n)?[\\t])*))*\\>(?:(?:\\r\\n)?[ \\t])*)(?:,\\s*( | + \".\\[\\]]))|\"(?:[^\\\"\\r\\\\]|\\\\.|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[ \\t])*)(?:\\.(?:( | + \\[\"()<>@,;:\\\\\".\\[\\]]))|\"(?:[^\\\"\\r\\\\]|\\\\.|(?:(?:\\r\\n)?[\\t]))*\"(?:(?:\\r\\n)?[\\t/" diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index fa4d751a54a..235effdcf44 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -206,6 +206,10 @@ import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.persistent.CompletionPersistentTaskAction; +import org.elasticsearch.persistent.RemovePersistentTaskAction; +import org.elasticsearch.persistent.StartPersistentTaskAction; +import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.ActionPlugin.ActionHandler; import org.elasticsearch.rest.RestController; @@ -241,7 +245,6 @@ import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestAnalyzeAction; import org.elasticsearch.rest.action.admin.indices.RestClearIndicesCacheAction; import org.elasticsearch.rest.action.admin.indices.RestCloseIndexAction; @@ -252,7 +255,6 @@ import org.elasticsearch.rest.action.admin.indices.RestFlushAction; import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -269,6 +271,7 @@ import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestPutMappingAction; import org.elasticsearch.rest.action.admin.indices.RestRecoveryAction; import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.RestResizeHandler; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; @@ -313,10 +316,6 @@ import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.rest.action.search.RestSearchScrollAction; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; -import org.elasticsearch.persistent.CompletionPersistentTaskAction; -import org.elasticsearch.persistent.RemovePersistentTaskAction; -import org.elasticsearch.persistent.StartPersistentTaskAction; -import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import java.util.ArrayList; import java.util.Collections; @@ -556,7 +555,6 @@ public class ActionModule extends AbstractModule { registerHandler.accept(new RestSnapshotsStatusAction(settings, restController)); registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); - registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java index 5e7c2c0f97d..fbc81d29955 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java @@ -19,23 +19,48 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.tasks.TaskInfo; +import java.io.IOException; import java.util.List; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Returns the list of tasks that were cancelled */ public class CancelTasksResponse extends ListTasksResponse { + private static final ConstructingObjectParser PARSER = + setupParser("cancel_tasks_response", CancelTasksResponse::new); + public CancelTasksResponse() { } - public CancelTasksResponse(List tasks, List taskFailures, List + public CancelTasksResponse(List tasks, List taskFailures, List nodeFailures) { super(tasks, taskFailures, nodeFailures); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return super.toXContent(builder, params); + } + + public static CancelTasksResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 53d80853328..cb1fcb0b091 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; @@ -70,8 +71,14 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); } - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("list_tasks_response", true, + + protected static ConstructingObjectParser setupParser(String name, + TriFunction< + List, + List, + List, + T> ctor) { + ConstructingObjectParser parser = new ConstructingObjectParser<>(name, true, constructingObjects -> { int i = 0; @SuppressWarnings("unchecked") @@ -80,16 +87,18 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContentOb List tasksFailures = (List) constructingObjects[i++]; @SuppressWarnings("unchecked") List nodeFailures = (List) constructingObjects[i]; - return new ListTasksResponse(tasks, tasksFailures, nodeFailures); + return ctor.apply(tasks,tasksFailures, nodeFailures); }); - - static { - PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); - PARSER.declareObjectArray(optionalConstructorArg(), - (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES)); + parser.declareObjectArray(optionalConstructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); + parser.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); + parser.declareObjectArray(optionalConstructorArg(), + (p, c) -> ElasticsearchException.fromXContent(p), new ParseField(NODE_FAILURES)); + return parser; } + private static final ConstructingObjectParser PARSER = + setupParser("list_tasks_response", ListTasksResponse::new); + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java index 1f1fe524c64..f67d74cd118 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.repositories.delete; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -28,13 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; */ public class DeleteRepositoryResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("delete_repository", true, args -> new DeleteRepositoryResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - DeleteRepositoryResponse() { } @@ -43,6 +35,6 @@ public class DeleteRepositoryResponse extends AcknowledgedResponse { } public static DeleteRepositoryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new DeleteRepositoryResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java index 52a1a736ec7..23aae119e0d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -28,13 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; */ public class PutRepositoryResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_repository", - true, args -> new PutRepositoryResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - PutRepositoryResponse() { } @@ -43,6 +35,6 @@ public class PutRepositoryResponse extends AcknowledgedResponse { } public static PutRepositoryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new PutRepositoryResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java index ebfc82fec74..31ebcbf0f0a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesResponse.java @@ -42,6 +42,6 @@ public class IndicesAliasesResponse extends AcknowledgedResponse { } public static IndicesAliasesResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new IndicesAliasesResponse(parseAcknowledged(parser)); } } \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index bfebaee5e59..96a3e150af1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -20,20 +20,12 @@ package org.elasticsearch.action.admin.indices.close; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** * A response for a close index action. */ public class CloseIndexResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("close_index", true, - args -> new CloseIndexResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - CloseIndexResponse() { } @@ -42,6 +34,6 @@ public class CloseIndexResponse extends AcknowledgedResponse { } public static CloseIndexResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new CloseIndexResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java index 3a04dc5b70b..b86549f536d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.delete; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -28,13 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; */ public class DeleteIndexResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("delete_index", - true, args -> new DeleteIndexResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - DeleteIndexResponse() { } @@ -43,6 +35,6 @@ public class DeleteIndexResponse extends AcknowledgedResponse { } public static DeleteIndexResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new DeleteIndexResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java index 1e022474955..4b2eac95c3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -28,15 +27,7 @@ import org.elasticsearch.common.xcontent.XContentParser; */ public class PutMappingResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_mapping", - true, args -> new PutMappingResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - protected PutMappingResponse() { - } protected PutMappingResponse(boolean acknowledged) { @@ -44,6 +35,6 @@ public class PutMappingResponse extends AcknowledgedResponse { } public static PutMappingResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new PutMappingResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java index 022f575f1d0..6792d185926 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -28,13 +27,6 @@ import org.elasticsearch.common.xcontent.XContentParser; */ public class UpdateSettingsResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "update_index_settings", true, args -> new UpdateSettingsResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - UpdateSettingsResponse() { } @@ -43,7 +35,6 @@ public class UpdateSettingsResponse extends AcknowledgedResponse { } public static UpdateSettingsResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new UpdateSettingsResponse(parseAcknowledged(parser)); } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java index 59b00bd719b..b2dab55a3d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; /** @@ -34,13 +33,7 @@ public class PutIndexTemplateResponse extends AcknowledgedResponse { super(acknowledged); } - private static final ConstructingObjectParser PARSER; - static { - PARSER = new ConstructingObjectParser<>("put_index_template", true, args -> new PutIndexTemplateResponse((boolean) args[0])); - declareAcknowledgedField(PARSER); - } - public static PutIndexTemplateResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new PutIndexTemplateResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java b/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java index b3bda3f1521..293a62b66f2 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/WritePipelineResponse.java @@ -20,21 +20,12 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentParser; public class WritePipelineResponse extends AcknowledgedResponse implements ToXContentObject { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "write_pipeline_response", true, args -> new WritePipelineResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - WritePipelineResponse() { - } public WritePipelineResponse(boolean acknowledged) { @@ -42,6 +33,6 @@ public class WritePipelineResponse extends AcknowledgedResponse implements ToXCo } public static WritePipelineResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + return new WritePipelineResponse(parseAcknowledged(parser)); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java index 41c806bc205..594dcda8c66 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedResponse.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; @@ -88,6 +89,21 @@ public abstract class AcknowledgedResponse extends ActionResponse implements ToX } + /** + * A generic parser that simply parses the acknowledged flag + */ + private static final ConstructingObjectParser ACKNOWLEDGED_FLAG_PARSER = new ConstructingObjectParser<>( + "acknowledged_flag", true, args -> (Boolean) args[0]); + + static { + ACKNOWLEDGED_FLAG_PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), ACKNOWLEDGED, + ObjectParser.ValueType.BOOLEAN); + } + + protected static boolean parseAcknowledged(XContentParser parser) { + return ACKNOWLEDGED_FLAG_PARSER.apply(parser, null); + } + @Override public boolean equals(Object o) { if (this == o) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 3bdc949752a..024cc44dd6a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; @@ -32,8 +31,6 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -57,6 +54,7 @@ import java.util.HashSet; import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.UnaryOperator; @@ -74,7 +72,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster public final Client client; - private final AtomicInteger updatesInProgress = new AtomicInteger(); + final AtomicInteger upgradesInProgress = new AtomicInteger(); private ImmutableOpenMap lastTemplateMetaData; @@ -103,8 +101,8 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster return; } - if (updatesInProgress.get() > 0) { - // we are already running some updates - skip this cluster state update + if (upgradesInProgress.get() > 0) { + // we are already running some upgrades - skip this cluster state update return; } @@ -124,7 +122,7 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster lastTemplateMetaData = templates; Optional, Set>> changes = calculateTemplateChanges(templates); if (changes.isPresent()) { - if (updatesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size())) { + if (upgradesInProgress.compareAndSet(0, changes.get().v1().size() + changes.get().v2().size() + 1)) { logger.info("Starting template upgrade to version {}, {} templates will be updated and {} will be removed", Version.CURRENT, changes.get().v1().size(), @@ -133,13 +131,14 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster final ThreadContext threadContext = threadPool.getThreadContext(); try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { threadContext.markAsSystemContext(); - threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2())); + threadPool.generic().execute(() -> upgradeTemplates(changes.get().v1(), changes.get().v2())); } } } } - void updateTemplates(Map changes, Set deletions) { + void upgradeTemplates(Map changes, Set deletions) { + final AtomicBoolean anyUpgradeFailed = new AtomicBoolean(false); if (threadPool.getThreadContext().isSystemContext() == false) { throw new IllegalStateException("template updates from the template upgrade service should always happen in a system context"); } @@ -151,20 +150,18 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster client.admin().indices().putTemplate(request, new ActionListener() { @Override public void onResponse(PutIndexTemplateResponse response) { - if (updatesInProgress.decrementAndGet() == 0) { - logger.info("Finished upgrading templates to version {}", Version.CURRENT); - } if (response.isAcknowledged() == false) { + anyUpgradeFailed.set(true); logger.warn("Error updating template [{}], request was not acknowledged", change.getKey()); } + tryFinishUpgrade(anyUpgradeFailed); } @Override public void onFailure(Exception e) { - if (updatesInProgress.decrementAndGet() == 0) { - logger.info("Templates were upgraded to version {}", Version.CURRENT); - } + anyUpgradeFailed.set(true); logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e); + tryFinishUpgrade(anyUpgradeFailed); } }); } @@ -175,27 +172,51 @@ public class TemplateUpgradeService extends AbstractComponent implements Cluster client.admin().indices().deleteTemplate(request, new ActionListener() { @Override public void onResponse(DeleteIndexTemplateResponse response) { - updatesInProgress.decrementAndGet(); if (response.isAcknowledged() == false) { + anyUpgradeFailed.set(true); logger.warn("Error deleting template [{}], request was not acknowledged", template); } + tryFinishUpgrade(anyUpgradeFailed); } @Override public void onFailure(Exception e) { - updatesInProgress.decrementAndGet(); + anyUpgradeFailed.set(true); if (e instanceof IndexTemplateMissingException == false) { // we might attempt to delete the same template from different nodes - so that's ok if template doesn't exist // otherwise we need to warn logger.warn(new ParameterizedMessage("Error deleting template [{}]", template), e); } + tryFinishUpgrade(anyUpgradeFailed); } }); } } - int getUpdatesInProgress() { - return updatesInProgress.get(); + void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { + assert upgradesInProgress.get() > 0; + if (upgradesInProgress.decrementAndGet() == 1) { + try { + // this is the last upgrade, the templates should now be in the desired state + if (anyUpgradeFailed.get()) { + logger.info("Templates were partially upgraded to version {}", Version.CURRENT); + } else { + logger.info("Templates were upgraded successfuly to version {}", Version.CURRENT); + } + // Check upgraders are satisfied after the update completed. If they still + // report that changes are required, this might indicate a bug or that something + // else tinkering with the templates during the upgrade. + final ImmutableOpenMap upgradedTemplates = + clusterService.state().getMetaData().getTemplates(); + final boolean changesRequired = calculateTemplateChanges(upgradedTemplates).isPresent(); + if (changesRequired) { + logger.warn("Templates are still reported as out of date after the upgrade. The template upgrade will be retried."); + } + } finally { + final int noMoreUpgrades = upgradesInProgress.decrementAndGet(); + assert noMoreUpgrades == 0; + } + } } Optional, Set>> calculateTemplateChanges( diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 7e3a385443f..db185f1e8c1 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -142,20 +142,4 @@ public interface BlobContainer { * @throws IOException if there were any failures in reading from the blob container. */ Map listBlobsByPrefix(String blobNamePrefix) throws IOException; - - /** - * Renames the source blob into the target blob. If the source blob does not exist or the - * target blob already exists, an exception is thrown. Atomicity of the move operation - * can only be guaranteed on an implementation-by-implementation basis. The only current - * implementation of {@link BlobContainer} for which atomicity can be guaranteed is the - * {@link org.elasticsearch.common.blobstore.fs.FsBlobContainer}. - * - * @param sourceBlobName - * The blob to rename. - * @param targetBlobName - * The name of the blob after the renaming. - * @throws IOException if the source blob does not exist, the target blob already exists, - * or there were any failures in reading from the blob container. - */ - void move(String sourceBlobName, String targetBlobName) throws IOException; } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 6f1df0011b1..a58802ecd18 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -142,14 +142,7 @@ public class FsBlobContainer extends AbstractBlobContainer { Streams.copy(inputStream, outputStream); } IOUtils.fsync(tempBlobPath, false); - - final Path blobPath = path.resolve(blobName); - // If the target file exists then Files.move() behaviour is implementation specific - // the existing file might be replaced or this method fails by throwing an IOException. - if (Files.exists(blobPath)) { - throw new FileAlreadyExistsException("blob [" + blobPath + "] already exists, cannot overwrite"); - } - Files.move(tempBlobPath, blobPath, StandardCopyOption.ATOMIC_MOVE); + moveBlobAtomic(tempBlob, blobName); } catch (IOException ex) { try { deleteBlobIgnoringIfNotExists(tempBlob); @@ -162,6 +155,17 @@ public class FsBlobContainer extends AbstractBlobContainer { } } + public void moveBlobAtomic(final String sourceBlobName, final String targetBlobName) throws IOException { + final Path sourceBlobPath = path.resolve(sourceBlobName); + final Path targetBlobPath = path.resolve(targetBlobName); + // If the target file exists then Files.move() behaviour is implementation specific + // the existing file might be replaced or this method fails by throwing an IOException. + if (Files.exists(targetBlobPath)) { + throw new FileAlreadyExistsException("blob [" + targetBlobPath + "] already exists, cannot overwrite"); + } + Files.move(sourceBlobPath, targetBlobPath, StandardCopyOption.ATOMIC_MOVE); + } + public static String tempBlobName(final String blobName) { return "pending-" + blobName + "-" + UUIDs.randomBase64UUID(); } @@ -174,17 +178,4 @@ public class FsBlobContainer extends AbstractBlobContainer { public static boolean isTempBlobName(final String blobName) { return blobName.startsWith(TEMP_FILE_PREFIX); } - - @Override - public void move(String source, String target) throws IOException { - Path sourcePath = path.resolve(source); - Path targetPath = path.resolve(target); - // If the target file exists then Files.move() behaviour is implementation specific - // the existing file might be replaced or this method fails by throwing an IOException. - if (Files.exists(targetPath)) { - throw new FileAlreadyExistsException("blob [" + targetPath + "] already exists, cannot overwrite"); - } - Files.move(sourcePath, targetPath, StandardCopyOption.ATOMIC_MOVE); - IOUtils.fsync(path, true); - } } diff --git a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java index 7559b058ea7..b296050822f 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpHandlingSettings.java @@ -22,6 +22,7 @@ package org.elasticsearch.http; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -47,7 +48,7 @@ public class HttpHandlingSettings { public HttpHandlingSettings(int maxContentLength, int maxChunkSize, int maxHeaderSize, int maxInitialLineLength, boolean resetCookies, boolean compression, int compressionLevel, boolean detailedErrorsEnabled, - int pipeliningMaxEvents) { + int pipeliningMaxEvents, boolean corsEnabled) { this.maxContentLength = maxContentLength; this.maxChunkSize = maxChunkSize; this.maxHeaderSize = maxHeaderSize; @@ -57,6 +58,7 @@ public class HttpHandlingSettings { this.compressionLevel = compressionLevel; this.detailedErrorsEnabled = detailedErrorsEnabled; this.pipeliningMaxEvents = pipeliningMaxEvents; + this.corsEnabled = corsEnabled; } public static HttpHandlingSettings fromSettings(Settings settings) { @@ -68,7 +70,8 @@ public class HttpHandlingSettings { SETTING_HTTP_COMPRESSION.get(settings), SETTING_HTTP_COMPRESSION_LEVEL.get(settings), SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings), - SETTING_PIPELINING_MAX_EVENTS.get(settings)); + SETTING_PIPELINING_MAX_EVENTS.get(settings), + SETTING_CORS_ENABLED.get(settings)); } public int getMaxContentLength() { diff --git a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java index de345a39fd6..0ce8edcf3b6 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/HttpServerTransport.java @@ -29,6 +29,8 @@ public interface HttpServerTransport extends LifecycleComponent { String HTTP_SERVER_WORKER_THREAD_NAME_PREFIX = "http_server_worker"; + String HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX = "http_server_acceptor"; + BoundTransportAddress boundAddress(); HttpInfo info(); diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index d5f8a55f4d9..9e859a16956 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -19,8 +19,8 @@ package org.elasticsearch.index; -import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.client.Client; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index e421a19b2ac..61b5cb91712 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -70,14 +69,16 @@ public final class AnalysisRegistry implements Closeable { Map>> normalizers, Map preConfiguredCharFilters, Map preConfiguredTokenFilters, - Map preConfiguredTokenizers) { + Map preConfiguredTokenizers, + Map preConfiguredAnalyzers) { this.environment = environment; this.charFilters = unmodifiableMap(charFilters); this.tokenFilters = unmodifiableMap(tokenFilters); this.tokenizers = unmodifiableMap(tokenizers); this.analyzers = unmodifiableMap(analyzers); this.normalizers = unmodifiableMap(normalizers); - prebuiltAnalysis = new PrebuiltAnalysis(preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers); + prebuiltAnalysis = + new PrebuiltAnalysis(preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers, preConfiguredAnalyzers); } /** @@ -398,13 +399,15 @@ public final class AnalysisRegistry implements Closeable { private PrebuiltAnalysis( Map preConfiguredCharFilters, Map preConfiguredTokenFilters, - Map preConfiguredTokenizers) { - Map analyzerProviderFactories = new HashMap<>(); + Map preConfiguredTokenizers, + Map preConfiguredAnalyzers) { - // Analyzers + Map analyzerProviderFactories = new HashMap<>(); + analyzerProviderFactories.putAll(preConfiguredAnalyzers); + // Pre-build analyzers for (PreBuiltAnalyzers preBuiltAnalyzerEnum : PreBuiltAnalyzers.values()) { String name = preBuiltAnalyzerEnum.name().toLowerCase(Locale.ROOT); - analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, AnalyzerScope.INDICES, preBuiltAnalyzerEnum.getAnalyzer(Version.CURRENT))); + analyzerProviderFactories.put(name, new PreBuiltAnalyzerProviderFactory(name, preBuiltAnalyzerEnum)); } this.analyzerProviderFactories = Collections.unmodifiableMap(analyzerProviderFactories); @@ -429,17 +432,10 @@ public final class AnalysisRegistry implements Closeable { return analyzerProviderFactories.get(name); } - Analyzer analyzer(String name) { - PreBuiltAnalyzerProviderFactory analyzerProviderFactory = (PreBuiltAnalyzerProviderFactory) analyzerProviderFactories.get(name); - if (analyzerProviderFactory == null) { - return null; - } - return analyzerProviderFactory.analyzer(); - } - @Override public void close() throws IOException { - IOUtils.close(analyzerProviderFactories.values().stream().map((a) -> ((PreBuiltAnalyzerProviderFactory)a).analyzer()).collect(Collectors.toList())); + IOUtils.close(analyzerProviderFactories.values().stream() + .map((a) -> ((PreBuiltAnalyzerProviderFactory)a)).collect(Collectors.toList())); } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java index 3e59377ecc2..eedff2c349c 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerProviderFactory.java @@ -22,41 +22,101 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.analysis.PreBuiltAnalyzers; +import org.elasticsearch.indices.analysis.PreBuiltCacheFactory; +import java.io.Closeable; import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.function.Function; +import java.util.stream.Collectors; -public class PreBuiltAnalyzerProviderFactory implements AnalysisModule.AnalysisProvider> { +public class PreBuiltAnalyzerProviderFactory extends PreConfiguredAnalysisComponent> implements Closeable { - private final PreBuiltAnalyzerProvider analyzerProvider; + private final Function create; + private final PreBuiltAnalyzerProvider current; - public PreBuiltAnalyzerProviderFactory(String name, AnalyzerScope scope, Analyzer analyzer) { - analyzerProvider = new PreBuiltAnalyzerProvider(name, scope, analyzer); + /** + * This constructor only exists to expose analyzers defined in {@link PreBuiltAnalyzers} as {@link PreBuiltAnalyzerProviderFactory}. + */ + PreBuiltAnalyzerProviderFactory(String name, PreBuiltAnalyzers preBuiltAnalyzer) { + super(name, new PreBuiltAnalyzersDelegateCache(name, preBuiltAnalyzer)); + this.create = preBuiltAnalyzer::getAnalyzer; + current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, preBuiltAnalyzer.getAnalyzer(Version.CURRENT)); } - public AnalyzerProvider create(String name, Settings settings) { - Version indexVersion = Version.indexCreated(settings); - if (!Version.CURRENT.equals(indexVersion)) { - PreBuiltAnalyzers preBuiltAnalyzers = PreBuiltAnalyzers.getOrDefault(name, null); - if (preBuiltAnalyzers != null) { - Analyzer analyzer = preBuiltAnalyzers.getAnalyzer(indexVersion); - return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer); - } - } - - return analyzerProvider; + public PreBuiltAnalyzerProviderFactory(String name, PreBuiltCacheFactory.CachingStrategy cache, Function create) { + super(name, cache); + this.create = create; + this.current = new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, create.apply(Version.CURRENT)); } @Override - public AnalyzerProvider get(IndexSettings indexSettings, Environment environment, String name, Settings settings) - throws IOException { - return create(name, settings); + public AnalyzerProvider get(IndexSettings indexSettings, + Environment environment, + String name, + Settings settings) throws IOException { + Version versionCreated = Version.indexCreated(settings); + if (Version.CURRENT.equals(versionCreated) == false) { + return super.get(indexSettings, environment, name, settings); + } else { + return current; + } } - public Analyzer analyzer() { - return analyzerProvider.get(); + @Override + protected AnalyzerProvider create(Version version) { + assert Version.CURRENT.equals(version) == false; + return new PreBuiltAnalyzerProvider(getName(), AnalyzerScope.INDICES, create.apply(version)); + } + + @Override + public void close() throws IOException { + List closeables = cache.values().stream() + .map(AnalyzerProvider::get) + .collect(Collectors.toList()); + closeables.add(current.get()); + IOUtils.close(closeables); + } + + /** + * A special cache that closes the gap between PreBuiltAnalyzers and PreBuiltAnalyzerProviderFactory. + * + * This can be removed when all analyzers have been moved away from PreBuiltAnalyzers to + * PreBuiltAnalyzerProviderFactory either in server or analysis-common. + */ + static class PreBuiltAnalyzersDelegateCache implements PreBuiltCacheFactory.PreBuiltCache> { + + private final String name; + private final PreBuiltAnalyzers preBuiltAnalyzer; + + private PreBuiltAnalyzersDelegateCache(String name, PreBuiltAnalyzers preBuiltAnalyzer) { + this.name = name; + this.preBuiltAnalyzer = preBuiltAnalyzer; + } + + @Override + public AnalyzerProvider get(Version version) { + return new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, preBuiltAnalyzer.getAnalyzer(version)); + } + + @Override + public void put(Version version, AnalyzerProvider analyzerProvider) { + // No need to put, because we delegate in get() directly to PreBuiltAnalyzers which already caches. + } + + @Override + public Collection> values() { + return preBuiltAnalyzer.getCache().values().stream() + // Wrap the analyzer instance in a PreBuiltAnalyzerProvider, this is what PreBuiltAnalyzerProviderFactory#close expects + // (other caches are not directly caching analyzers, but analyzer provider instead) + .map(analyzer -> new PreBuiltAnalyzerProvider(name, AnalyzerScope.INDICES, analyzer)) + .collect(Collectors.toList()); + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java index fdd525d0c80..f7450c15ee9 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/PreConfiguredAnalysisComponent.java @@ -33,13 +33,18 @@ import java.io.IOException; */ public abstract class PreConfiguredAnalysisComponent implements AnalysisModule.AnalysisProvider { private final String name; - private final PreBuiltCacheFactory.PreBuiltCache cache; + protected final PreBuiltCacheFactory.PreBuiltCache cache; - protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.CachingStrategy cache) { + protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.CachingStrategy cache) { this.name = name; this.cache = PreBuiltCacheFactory.getCache(cache); } + protected PreConfiguredAnalysisComponent(String name, PreBuiltCacheFactory.PreBuiltCache cache) { + this.name = name; + this.cache = cache; + } + @Override public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException { Version versionCreated = Version.indexCreated(settings); diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 57e896bffc5..23a90553f60 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -29,8 +29,8 @@ import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.CodecService; @@ -142,10 +142,20 @@ public final class EngineConfig { this.codecService = codecService; this.eventListener = eventListener; codecName = indexSettings.getValue(INDEX_CODEC_SETTING); - // We give IndexWriter a "huge" (256 MB) buffer, so it won't flush on its own unless the ES indexing buffer is also huge and/or - // there are not too many shards allocated to this node. Instead, IndexingMemoryController periodically checks - // and refreshes the most heap-consuming shards when total indexing heap usage across all shards is too high: - indexingBufferSize = new ByteSizeValue(256, ByteSizeUnit.MB); + // We need to make the indexing buffer for this shard at least as large + // as the amount of memory that is available for all engines on the + // local node so that decisions to flush segments to disk are made by + // IndexingMemoryController rather than Lucene. + // Add an escape hatch in case this change proves problematic - it used + // to be a fixed amound of RAM: 256 MB. + // TODO: Remove this escape hatch in 8.x + final String escapeHatchProperty = "es.index.memory.max_index_buffer_size"; + String maxBufferSize = System.getProperty(escapeHatchProperty); + if (maxBufferSize != null) { + indexingBufferSize = MemorySizeValue.parseBytesSizeValueOrHeapRatio(maxBufferSize, escapeHatchProperty); + } else { + indexingBufferSize = IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING.get(indexSettings.getNodeSettings()); + } this.queryCache = queryCache; this.queryCachingPolicy = queryCachingPolicy; this.translogConfig = translogConfig; diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java index b574cadc423..9aca0c0fc0d 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilder.java @@ -19,6 +19,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.TermContext; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiTermQuery; @@ -26,11 +29,15 @@ import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.ScoringRewrite; +import org.apache.lucene.search.TopTermsRewrite; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; import org.elasticsearch.Version; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -42,6 +49,8 @@ import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.support.QueryParsers; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.Objects; /** @@ -49,12 +58,10 @@ import java.util.Objects; * as a {@link SpanQueryBuilder} so it can be nested. */ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder - implements SpanQueryBuilder { + implements SpanQueryBuilder { public static final String NAME = "span_multi"; - private static final ParseField MATCH_FIELD = new ParseField("match"); - private final MultiTermQueryBuilder multiTermQueryBuilder; public SpanMultiTermQueryBuilder(MultiTermQueryBuilder multiTermQueryBuilder) { @@ -83,7 +90,7 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder>() { + @Override + protected List getTopLevelBuilder() { + return new ArrayList(); + } + + @Override + protected Query build(List builder) { + return new SpanOrQuery((SpanQuery[]) builder.toArray(new SpanQuery[builder.size()])); + } + + @Override + protected void checkMaxClauseCount(int count) { + if (count > maxExpansions) { + throw new RuntimeException("[" + query.toString() + " ] " + + "exceeds maxClauseCount [ Boolean maxClauseCount is set to " + BooleanQuery.getMaxClauseCount() + "]"); + } + } + + @Override + protected void addClause(List topLevel, Term term, int docCount, float boost, TermContext states) { + SpanTermQuery q = new SpanTermQuery(term, states); + topLevel.add(q); + } + }; + return (SpanQuery) delegate.rewrite(reader, query); + } + } + @Override protected Query doToQuery(QueryShardContext context) throws IOException { Query subQuery = multiTermQueryBuilder.toQuery(context); @@ -172,6 +217,7 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder(prefixQuery); } else { String origFieldName = ((PrefixQueryBuilder) multiTermQueryBuilder).fieldName(); @@ -191,9 +237,17 @@ public class SpanMultiTermQueryBuilder extends AbstractQueryBuilder((MultiTermQuery) subQuery); } + if (subQuery instanceof MultiTermQuery) { + MultiTermQuery multiTermQuery = (MultiTermQuery) subQuery; + SpanMultiTermQueryWrapper wrapper = (SpanMultiTermQueryWrapper) spanQuery; + if (multiTermQuery.getRewriteMethod() instanceof TopTermsRewrite == false) { + wrapper.setRewriteMethod(new TopTermSpanBooleanQueryRewriteWithMaxClause()); + } + } if (boost != AbstractQueryBuilder.DEFAULT_BOOST) { return new SpanBoostQuery(spanQuery, boost); } + return spanQuery; } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 590bcf5e4e0..50406ed5834 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -46,6 +46,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.FieldNamesFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -657,6 +658,13 @@ public class QueryStringQueryParser extends XQueryParser { @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { + final int maxAllowedRegexLength = context.getIndexSettings().getMaxRegexLength(); + if (termStr.length() > maxAllowedRegexLength) { + throw new IllegalArgumentException( + "The length of regex [" + termStr.length() + "] used in the [query_string] has exceeded " + + "the allowed maximum of [" + maxAllowedRegexLength + "]. This maximum can be set by changing the [" + + IndexSettings.MAX_REGEX_LENGTH_SETTING.getKey() + "] index level setting."); + } Map fields = extractMultiFields(field, false); if (fields.isEmpty()) { return newUnmappedFieldQuery(termStr); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 6c786763003..11024286b22 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -27,6 +27,8 @@ import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.CompletionFieldMapper; @@ -60,10 +62,12 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.plugins.MapperPlugin; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.function.Predicate; @@ -234,4 +238,9 @@ public class IndicesModule extends AbstractModule { public MapperRegistry getMapperRegistry() { return mapperRegistry; } + + public Collection>> getEngineFactories() { + return Collections.emptyList(); + } + } diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index e787f574fe9..8bc9a6e7804 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -25,7 +25,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -68,6 +67,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; @@ -180,7 +180,7 @@ public class IndicesService extends AbstractLifecycleComponent private final IndicesRequestCache indicesRequestCache; private final IndicesQueryCache indicesQueryCache; private final MetaStateService metaStateService; - private final Collection enginePlugins; + private final Collection>> engineFactoryProviders; @Override protected void doStart() { @@ -193,7 +193,7 @@ public class IndicesService extends AbstractLifecycleComponent MapperRegistry mapperRegistry, NamedWriteableRegistry namedWriteableRegistry, ThreadPool threadPool, IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService, BigArrays bigArrays, ScriptService scriptService, Client client, MetaStateService metaStateService, - Collection enginePlugins) { + Collection>> engineFactoryProviders) { super(settings); this.threadPool = threadPool; this.pluginsService = pluginsService; @@ -224,7 +224,7 @@ public class IndicesService extends AbstractLifecycleComponent this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); this.metaStateService = metaStateService; - this.enginePlugins = enginePlugins; + this.engineFactoryProviders = engineFactoryProviders; } @Override @@ -478,27 +478,27 @@ public class IndicesService extends AbstractLifecycleComponent } private EngineFactory getEngineFactory(final IndexSettings idxSettings) { - final List>> engineFactories = - enginePlugins + final List> engineFactories = + engineFactoryProviders .stream() - .map(p -> Tuple.tuple(p, p.getEngineFactory(idxSettings))) - .filter(t -> Objects.requireNonNull(t.v2()).isPresent()) + .map(engineFactoryProvider -> engineFactoryProvider.apply(idxSettings)) + .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .collect(Collectors.toList()); if (engineFactories.isEmpty()) { return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { - assert engineFactories.get(0).v2().isPresent(); - return engineFactories.get(0).v2().get(); + assert engineFactories.get(0).isPresent(); + return engineFactories.get(0).get(); } else { final String message = String.format( Locale.ROOT, - "multiple plugins provided engine factories for %s: %s", + "multiple engine factories provided for %s: %s", idxSettings.getIndex(), engineFactories .stream() .map(t -> { - assert t.v2().isPresent(); - return "[" + t.v1().getClass().getName() + "/" + t.v2().get().getClass().getName() + "]"; + assert t.isPresent(); + return "[" + t.get().getClass().getName() + "]"; }) .collect(Collectors.joining(","))); throw new IllegalStateException(message); diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java index bc590381c3c..13aaf44c82e 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/AnalysisModule.java @@ -43,7 +43,6 @@ import org.elasticsearch.index.analysis.CzechAnalyzerProvider; import org.elasticsearch.index.analysis.DanishAnalyzerProvider; import org.elasticsearch.index.analysis.DutchAnalyzerProvider; import org.elasticsearch.index.analysis.EnglishAnalyzerProvider; -import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider; import org.elasticsearch.index.analysis.FinnishAnalyzerProvider; import org.elasticsearch.index.analysis.FrenchAnalyzerProvider; import org.elasticsearch.index.analysis.GalicianAnalyzerProvider; @@ -59,9 +58,9 @@ import org.elasticsearch.index.analysis.KeywordAnalyzerProvider; import org.elasticsearch.index.analysis.LatvianAnalyzerProvider; import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider; import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider; -import org.elasticsearch.index.analysis.PatternAnalyzerProvider; import org.elasticsearch.index.analysis.PersianAnalyzerProvider; import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -73,7 +72,6 @@ import org.elasticsearch.index.analysis.SnowballAnalyzerProvider; import org.elasticsearch.index.analysis.SoraniAnalyzerProvider; import org.elasticsearch.index.analysis.SpanishAnalyzerProvider; import org.elasticsearch.index.analysis.StandardAnalyzerProvider; -import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzerProvider; import org.elasticsearch.index.analysis.StandardTokenFilterFactory; import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.StopAnalyzerProvider; @@ -122,11 +120,12 @@ public final class AnalysisModule { Map preConfiguredCharFilters = setupPreConfiguredCharFilters(plugins); Map preConfiguredTokenFilters = setupPreConfiguredTokenFilters(plugins); Map preConfiguredTokenizers = setupPreConfiguredTokenizers(plugins); + Map preConfiguredAnalyzers = setupPreBuiltAnalyzerProviderFactories(plugins); analysisRegistry = new AnalysisRegistry(environment, charFilters.getRegistry(), tokenFilters.getRegistry(), tokenizers.getRegistry(), analyzers.getRegistry(), normalizers.getRegistry(), - preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers); + preConfiguredCharFilters, preConfiguredTokenFilters, preConfiguredTokenizers, preConfiguredAnalyzers); } HunspellService getHunspellService() { @@ -162,6 +161,16 @@ public final class AnalysisModule { return tokenFilters; } + static Map setupPreBuiltAnalyzerProviderFactories(List plugins) { + NamedRegistry preConfiguredCharFilters = new NamedRegistry<>("pre-built analyzer"); + for (AnalysisPlugin plugin : plugins) { + for (PreBuiltAnalyzerProviderFactory factory : plugin.getPreBuiltAnalyzerProviderFactories()) { + preConfiguredCharFilters.register(factory.getName(), factory); + } + } + return unmodifiableMap(preConfiguredCharFilters.getRegistry()); + } + static Map setupPreConfiguredCharFilters(List plugins) { NamedRegistry preConfiguredCharFilters = new NamedRegistry<>("pre-configured char_filter"); @@ -232,12 +241,10 @@ public final class AnalysisModule { NamedRegistry>> analyzers = new NamedRegistry<>("analyzer"); analyzers.register("default", StandardAnalyzerProvider::new); analyzers.register("standard", StandardAnalyzerProvider::new); - analyzers.register("standard_html_strip", StandardHtmlStripAnalyzerProvider::new); analyzers.register("simple", SimpleAnalyzerProvider::new); analyzers.register("stop", StopAnalyzerProvider::new); analyzers.register("whitespace", WhitespaceAnalyzerProvider::new); analyzers.register("keyword", KeywordAnalyzerProvider::new); - analyzers.register("pattern", PatternAnalyzerProvider::new); analyzers.register("snowball", SnowballAnalyzerProvider::new); analyzers.register("arabic", ArabicAnalyzerProvider::new); analyzers.register("armenian", ArmenianAnalyzerProvider::new); @@ -274,7 +281,6 @@ public final class AnalysisModule { analyzers.register("swedish", SwedishAnalyzerProvider::new); analyzers.register("turkish", TurkishAnalyzerProvider::new); analyzers.register("thai", ThaiAnalyzerProvider::new); - analyzers.register("fingerprint", FingerprintAnalyzerProvider::new); analyzers.extractAndRegister(plugins, AnalysisPlugin::getAnalyzers); return analyzers; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 3c286f7dd5e..18cc247b844 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -61,10 +61,7 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; import org.elasticsearch.Version; -import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.index.analysis.PatternAnalyzer; import org.elasticsearch.index.analysis.SnowballAnalyzer; -import org.elasticsearch.index.analysis.StandardHtmlStripAnalyzer; import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy; import java.util.Locale; @@ -141,22 +138,6 @@ public enum PreBuiltAnalyzers { } }, - PATTERN(CachingStrategy.ELASTICSEARCH) { - @Override - protected Analyzer create(Version version) { - return new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET); - } - }, - - STANDARD_HTML_STRIP(CachingStrategy.ELASTICSEARCH) { - @Override - protected Analyzer create(Version version) { - final Analyzer analyzer = new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET); - analyzer.setVersion(version.luceneVersion); - return analyzer; - } - }, - ARABIC { @Override protected Analyzer create(Version version) { @@ -484,7 +465,7 @@ public enum PreBuiltAnalyzers { cache = PreBuiltCacheFactory.getCache(cachingStrategy); } - PreBuiltCacheFactory.PreBuiltCache getCache() { + public PreBuiltCacheFactory.PreBuiltCache getCache() { return cache; } diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java index 8636e04f20f..22b5a8ffaf4 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/PreBuiltCacheFactory.java @@ -21,6 +21,8 @@ package org.elasticsearch.indices.analysis; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -36,8 +38,12 @@ public class PreBuiltCacheFactory { public enum CachingStrategy { ONE, LUCENE, ELASTICSEARCH }; public interface PreBuiltCache { + T get(Version version); + void put(Version version, T t); + + Collection values(); } private PreBuiltCacheFactory() {} @@ -71,6 +77,11 @@ public class PreBuiltCacheFactory { public void put(Version version, T model) { this.model = model; } + + @Override + public Collection values() { + return Collections.singleton(model); + } } /** @@ -89,6 +100,11 @@ public class PreBuiltCacheFactory { public void put(Version version, T model) { mapModel.put(version, model); } + + @Override + public Collection values() { + return mapModel.values(); + } } /** @@ -107,5 +123,10 @@ public class PreBuiltCacheFactory { public void put(org.elasticsearch.Version version, T model) { mapModel.put(version.luceneVersion, model); } + + @Override + public Collection values() { + return mapModel.values(); + } } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index c5df30a2d87..885ba291cfd 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -21,7 +21,6 @@ package org.elasticsearch.node; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; @@ -58,12 +57,10 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.LifecycleComponent; -import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Key; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.inject.util.Providers; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.DeprecationLogger; @@ -82,6 +79,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; @@ -93,8 +91,10 @@ import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.http.HttpServerTransport; -import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -109,6 +109,10 @@ import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.persistent.PersistentTasksClusterService; +import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksExecutorRegistry; +import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.ClusterPlugin; @@ -141,10 +145,6 @@ import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; import org.elasticsearch.usage.UsageService; import org.elasticsearch.watcher.ResourceWatcherService; -import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksExecutor; -import org.elasticsearch.persistent.PersistentTasksExecutorRegistry; -import org.elasticsearch.persistent.PersistentTasksService; import java.io.BufferedWriter; import java.io.Closeable; @@ -162,6 +162,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -397,14 +398,19 @@ public class Node implements Closeable { modules.add(new RepositoriesModule(this.environment, pluginsService.filterPlugins(RepositoryPlugin.class), xContentRegistry)); final MetaStateService metaStateService = new MetaStateService(settings, nodeEnvironment, xContentRegistry); - // collect engine factory providers per plugin + // collect engine factory providers from server and from plugins final Collection enginePlugins = pluginsService.filterPlugins(EnginePlugin.class); + final Collection>> engineFactoryProviders = + Stream.concat( + indicesModule.getEngineFactories().stream(), + enginePlugins.stream().map(plugin -> plugin::getEngineFactory)) + .collect(Collectors.toList()); final IndicesService indicesService = new IndicesService(settings, pluginsService, nodeEnvironment, xContentRegistry, analysisModule.getAnalysisRegistry(), clusterModule.getIndexNameExpressionResolver(), indicesModule.getMapperRegistry(), namedWriteableRegistry, threadPool, settingsModule.getIndexScopedSettings(), circuitBreakerService, bigArrays, - scriptModule.getScriptService(), client, metaStateService, enginePlugins); + scriptModule.getScriptService(), client, metaStateService, engineFactoryProviders); Collection pluginComponents = pluginsService.filterPlugins(Plugin.class).stream() .flatMap(p -> p.createComponents(client, clusterService, threadPool, resourceWatcherService, diff --git a/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java b/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java index cc04ed875d9..e740fddc6ec 100644 --- a/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/AnalysisPlugin.java @@ -28,6 +28,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.PreBuiltAnalyzerProviderFactory; import org.elasticsearch.index.analysis.PreConfiguredCharFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenFilter; import org.elasticsearch.index.analysis.PreConfiguredTokenizer; @@ -92,6 +93,13 @@ public interface AnalysisPlugin { return emptyMap(); } + /** + * Override to add additional pre-configured {@link Analyzer}s. + */ + default List getPreBuiltAnalyzerProviderFactories() { + return emptyList(); + } + /** * Override to add additional pre-configured {@link CharFilter}s. */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java deleted file mode 100644 index 9892717cd77..00000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllMappingsAction.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * The REST handler for retrieving all mappings - */ -public class RestGetAllMappingsAction extends BaseRestHandler { - - public RestGetAllMappingsAction(final Settings settings, final RestController controller) { - super(settings); - controller.registerHandler(GET, "/_mapping", this); - controller.registerHandler(GET, "/_mappings", this); - } - - @Override - public String getName() { - return "get_all_mappings_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); - getIndexRequest.indices(Strings.EMPTY_ARRAY); - getIndexRequest.features(Feature.MAPPINGS); - getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); - getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - writeMappings(response.mappings().get(index), builder); - } - builder.endObject(); - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - private void writeMappings(final ImmutableOpenMap mappings, - final XContentBuilder builder) throws IOException { - builder.startObject("mappings"); - { - for (final ObjectObjectCursor typeEntry : mappings) { - builder.field(typeEntry.key); - builder.map(typeEntry.value.sourceAsMap()); - } - } - builder.endObject(); - } - }); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 46388e6947f..08f8449b701 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -20,8 +20,6 @@ package org.elasticsearch.rest.action.admin.indices; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -56,12 +54,13 @@ import java.util.stream.Collectors; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetMappingAction extends BaseRestHandler { public RestGetMappingAction(final Settings settings, final RestController controller) { super(settings); + controller.registerHandler(GET, "/_mapping", this); + controller.registerHandler(GET, "/_mappings", this); controller.registerHandler(GET, "/{index}/{type}/_mapping", this); controller.registerHandler(GET, "/{index}/_mappings", this); controller.registerHandler(GET, "/{index}/_mapping", this); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index d9fa50cf941..6dead806042 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -20,23 +20,18 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestStatus.OK; public class RestGetSettingsAction extends BaseRestHandler { @@ -68,15 +63,6 @@ public class RestGetSettingsAction extends BaseRestHandler { .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(GetSettingsResponse getSettingsResponse, XContentBuilder builder) throws Exception { - getSettingsResponse.toXContent(builder, request); - return new BytesRestResponse(OK, builder); - } - }); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestToXContentListener<>(channel)); } - } diff --git a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index cfa5a240dea..30f1dfb14fc 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -81,6 +81,7 @@ public class GeoDistanceSortBuilder extends SortBuilder private static final ParseField DISTANCE_TYPE_FIELD = new ParseField("distance_type"); private static final ParseField VALIDATION_METHOD_FIELD = new ParseField("validation_method"); private static final ParseField SORTMODE_FIELD = new ParseField("mode", "sort_mode"); + private static final ParseField IGNORE_UNMAPPED = new ParseField("ignore_unmapped"); private final String fieldName; private final List points = new ArrayList<>(); @@ -97,6 +98,8 @@ public class GeoDistanceSortBuilder extends SortBuilder private GeoValidationMethod validation = DEFAULT_VALIDATION; + private boolean ignoreUnmapped = false; + /** * Constructs a new distance based sort on a geo point like field. * @@ -152,6 +155,7 @@ public class GeoDistanceSortBuilder extends SortBuilder this.nestedPath = original.nestedPath; this.validation = original.validation; this.nestedSort = original.nestedSort; + this.ignoreUnmapped = original.ignoreUnmapped; } /** @@ -171,6 +175,9 @@ public class GeoDistanceSortBuilder extends SortBuilder nestedSort = in.readOptionalWriteable(NestedSortBuilder::new); } validation = GeoValidationMethod.readFromStream(in); + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { + ignoreUnmapped = in.readBoolean(); + } } @Override @@ -187,6 +194,9 @@ public class GeoDistanceSortBuilder extends SortBuilder out.writeOptionalWriteable(nestedSort); } validation.writeTo(out); + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { + out.writeBoolean(ignoreUnmapped); + } } /** @@ -374,6 +384,18 @@ public class GeoDistanceSortBuilder extends SortBuilder return this; } + /** + * Returns true if unmapped geo fields should be treated as located at an infinite distance + */ + public boolean ignoreUnmapped() { + return ignoreUnmapped; + } + + public GeoDistanceSortBuilder ignoreUnmapped(boolean ignoreUnmapped) { + this.ignoreUnmapped = ignoreUnmapped; + return this; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -403,6 +425,7 @@ public class GeoDistanceSortBuilder extends SortBuilder builder.field(NESTED_FIELD.getPreferredName(), nestedSort); } builder.field(VALIDATION_METHOD_FIELD.getPreferredName(), validation); + builder.field(IGNORE_UNMAPPED.getPreferredName(), ignoreUnmapped); builder.endObject(); builder.endObject(); @@ -434,14 +457,15 @@ public class GeoDistanceSortBuilder extends SortBuilder Objects.equals(nestedFilter, other.nestedFilter) && Objects.equals(nestedPath, other.nestedPath) && Objects.equals(validation, other.validation) && - Objects.equals(nestedSort, other.nestedSort); + Objects.equals(nestedSort, other.nestedSort) && + ignoreUnmapped == other.ignoreUnmapped; } @Override public int hashCode() { return Objects.hash(this.fieldName, this.points, this.geoDistance, this.unit, this.sortMode, this.order, this.nestedFilter, - this.nestedPath, this.validation, this.nestedSort); + this.nestedPath, this.validation, this.nestedSort, this.ignoreUnmapped); } /** @@ -465,6 +489,7 @@ public class GeoDistanceSortBuilder extends SortBuilder String nestedPath = null; NestedSortBuilder nestedSort = null; GeoValidationMethod validation = null; + boolean ignoreUnmapped = false; XContentParser.Token token; String currentName = parser.currentName(); @@ -509,6 +534,8 @@ public class GeoDistanceSortBuilder extends SortBuilder } else if (NESTED_PATH_FIELD.match(currentName, parser.getDeprecationHandler())) { DEPRECATION_LOGGER.deprecated("[nested_path] has been deprecated in favour of the [nested] parameter"); nestedPath = parser.text(); + } else if (IGNORE_UNMAPPED.match(currentName, parser.getDeprecationHandler())) { + ignoreUnmapped = parser.booleanValue(); } else if (token == Token.VALUE_STRING){ if (fieldName != null && fieldName.equals(currentName) == false) { throw new ParsingException( @@ -554,6 +581,7 @@ public class GeoDistanceSortBuilder extends SortBuilder if (validation != null) { result.validation(validation); } + result.ignoreUnmapped(ignoreUnmapped); return result; } @@ -596,8 +624,11 @@ public class GeoDistanceSortBuilder extends SortBuilder MappedFieldType fieldType = context.fieldMapper(fieldName); if (fieldType == null) { - throw new IllegalArgumentException("failed to find mapper for [" + fieldName - + "] for geo distance based sort"); + if (ignoreUnmapped) { + fieldType = context.getMapperService().unmappedFieldType("geo_point"); + } else { + throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); + } } final IndexGeoPointFieldData geoIndexFieldData = context.getForField(fieldType); diff --git a/server/src/main/java/org/elasticsearch/transport/Transports.java b/server/src/main/java/org/elasticsearch/transport/Transports.java index d07846835c2..ddd22a7fb86 100644 --- a/server/src/main/java/org/elasticsearch/transport/Transports.java +++ b/server/src/main/java/org/elasticsearch/transport/Transports.java @@ -30,7 +30,6 @@ public enum Transports { public static final String TEST_MOCK_TRANSPORT_THREAD_PREFIX = "__mock_network_thread"; public static final String NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX = "es_nio_transport_worker"; - public static final String NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = "es_nio_transport_acceptor"; /** * Utility method to detect whether a thread is a network thread. Typically @@ -41,11 +40,11 @@ public enum Transports { final String threadName = t.getName(); for (String s : Arrays.asList( HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, + HttpServerTransport.HTTP_SERVER_ACCEPTOR_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, TcpTransport.TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX, TEST_MOCK_TRANSPORT_THREAD_PREFIX, - NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX, - NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX)) { + NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX)) { if (threadName.contains(s)) { return true; } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java index e46f2e06fe1..9ad4aeb69fb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -35,12 +35,16 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; @@ -52,13 +56,16 @@ import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; +import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; @@ -75,8 +82,20 @@ import static org.mockito.Mockito.when; public class TemplateUpgradeServiceTests extends ESTestCase { - private final ClusterService clusterService = new ClusterService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null, Collections.emptyMap()); + private ThreadPool threadPool; + private ClusterService clusterService; + + @Before + public void setUpTest() throws Exception { + threadPool = new TestThreadPool("TemplateUpgradeServiceTests"); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDownTest() throws Exception { + threadPool.shutdownNow(); + clusterService.close(); + } public void testCalculateChangesAddChangeAndDelete() { @@ -90,7 +109,7 @@ public class TemplateUpgradeServiceTests extends ESTestCase { IndexTemplateMetaData.builder("changed_test_template").patterns(randomIndexPatterns()).build() ); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, threadPool, Arrays.asList( templates -> { if (shouldAdd) { @@ -190,18 +209,18 @@ public class TemplateUpgradeServiceTests extends ESTestCase { additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); } - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, Collections.emptyList()); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.updateTemplates(additions, deletions)); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.upgradeTemplates(additions, deletions)); assertThat(ise.getMessage(), containsString("template upgrade service should always happen in a system context")); - threadContext.markAsSystemContext(); - service.updateTemplates(additions, deletions); - int updatesInProgress = service.getUpdatesInProgress(); + service.upgradesInProgress.set(additionsCount + deletionsCount + 2); // +2 to skip tryFinishUpgrade + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + service.upgradeTemplates(additions, deletions); + } assertThat(putTemplateListeners, hasSize(additionsCount)); assertThat(deleteTemplateListeners, hasSize(deletionsCount)); @@ -218,30 +237,34 @@ public class TemplateUpgradeServiceTests extends ESTestCase { for (int i = 0; i < deletionsCount; i++) { if (randomBoolean()) { - int prevUpdatesInProgress = service.getUpdatesInProgress(); + int prevUpdatesInProgress = service.upgradesInProgress.get(); deleteTemplateListeners.get(i).onFailure(new RuntimeException("test - ignore")); - assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + assertThat(prevUpdatesInProgress - service.upgradesInProgress.get(), equalTo(1)); } else { - int prevUpdatesInProgress = service.getUpdatesInProgress(); + int prevUpdatesInProgress = service.upgradesInProgress.get(); deleteTemplateListeners.get(i).onResponse(new DeleteIndexTemplateResponse(randomBoolean()) { }); - assertThat(prevUpdatesInProgress - service.getUpdatesInProgress(), equalTo(1)); + assertThat(prevUpdatesInProgress - service.upgradesInProgress.get(), equalTo(1)); } } - assertThat(updatesInProgress - service.getUpdatesInProgress(), equalTo(additionsCount + deletionsCount)); + // tryFinishUpgrade was skipped + assertThat(service.upgradesInProgress.get(), equalTo(2)); } private static final Set MASTER_DATA_ROLES = Collections.unmodifiableSet(EnumSet.of(DiscoveryNode.Role.MASTER, DiscoveryNode.Role.DATA)); @SuppressWarnings("unchecked") - public void testClusterStateUpdate() { + public void testClusterStateUpdate() throws InterruptedException { - AtomicReference> addedListener = new AtomicReference<>(); - AtomicReference> changedListener = new AtomicReference<>(); - AtomicReference> removedListener = new AtomicReference<>(); - AtomicInteger updateInvocation = new AtomicInteger(); + final AtomicReference> addedListener = new AtomicReference<>(); + final AtomicReference> changedListener = new AtomicReference<>(); + final AtomicReference> removedListener = new AtomicReference<>(); + final Semaphore updateInvocation = new Semaphore(0); + final Semaphore calculateInvocation = new Semaphore(0); + final Semaphore changedInvocation = new Semaphore(0); + final Semaphore finishInvocation = new Semaphore(0); MetaData metaData = randomMetaData( IndexTemplateMetaData.builder("user_template").patterns(randomIndexPatterns()).build(), @@ -249,21 +272,6 @@ public class TemplateUpgradeServiceTests extends ESTestCase { IndexTemplateMetaData.builder("changed_test_template").patterns(randomIndexPatterns()).build() ); - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - ExecutorService executorService = mock(ExecutorService.class); - when(threadPool.generic()).thenReturn(executorService); - doAnswer(invocation -> { - Object[] args = invocation.getArguments(); - assert args.length == 1; - assertTrue(threadContext.isSystemContext()); - Runnable runnable = (Runnable) args[0]; - runnable.run(); - updateInvocation.incrementAndGet(); - return null; - }).when(executorService).execute(any(Runnable.class)); - Client mockClient = mock(Client.class); AdminClient mockAdminClient = mock(AdminClient.class); IndicesAdminClient mockIndicesAdminClient = mock(IndicesAdminClient.class); @@ -293,7 +301,7 @@ public class TemplateUpgradeServiceTests extends ESTestCase { return null; }).when(mockIndicesAdminClient).deleteTemplate(any(DeleteIndexTemplateRequest.class), any(ActionListener.class)); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, + final TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, Arrays.asList( templates -> { assertNull(templates.put("added_test_template", IndexTemplateMetaData.builder("added_test_template") @@ -309,26 +317,63 @@ public class TemplateUpgradeServiceTests extends ESTestCase { .patterns(Collections.singletonList("*")).order(10).build())); return templates; } - )); + )) { + + @Override + void tryFinishUpgrade(AtomicBoolean anyUpgradeFailed) { + super.tryFinishUpgrade(anyUpgradeFailed); + finishInvocation.release(); + } + + @Override + void upgradeTemplates(Map changes, Set deletions) { + super.upgradeTemplates(changes, deletions); + updateInvocation.release(); + } + + @Override + Optional, Set>> + calculateTemplateChanges(ImmutableOpenMap templates) { + final Optional, Set>> ans = super.calculateTemplateChanges(templates); + calculateInvocation.release(); + return ans; + } + + @Override + public void clusterChanged(ClusterChangedEvent event) { + super.clusterChanged(event); + changedInvocation.release(); + } + }; ClusterState prevState = ClusterState.EMPTY_STATE; ClusterState state = ClusterState.builder(prevState).nodes(DiscoveryNodes.builder() .add(new DiscoveryNode("node1", "node1", buildNewFakeTransportAddress(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT) ).localNodeId("node1").masterNodeId("node1").build() ).metaData(metaData).build(); - service.clusterChanged(new ClusterChangedEvent("test", state, prevState)); + setState(clusterService, state); - assertThat(updateInvocation.get(), equalTo(1)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + updateInvocation.acquire(); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); assertThat(addedListener.get(), notNullValue()); assertThat(changedListener.get(), notNullValue()); assertThat(removedListener.get(), notNullValue()); prevState = state; state = ClusterState.builder(prevState).metaData(MetaData.builder(state.metaData()).removeTemplate("user_template")).build(); - service.clusterChanged(new ClusterChangedEvent("test 2", state, prevState)); + setState(clusterService, state); // Make sure that update wasn't invoked since we are still running - assertThat(updateInvocation.get(), equalTo(1)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); addedListener.getAndSet(null).onResponse(new PutIndexTemplateResponse(true) { }); @@ -337,19 +382,40 @@ public class TemplateUpgradeServiceTests extends ESTestCase { removedListener.getAndSet(null).onResponse(new DeleteIndexTemplateResponse(true) { }); - service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + // 3 upgrades should be completed, in addition to the final calculate + finishInvocation.acquire(3); + assertThat(finishInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + + setState(clusterService, state); // Make sure that update was called this time since we are no longer running - assertThat(updateInvocation.get(), equalTo(2)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + updateInvocation.acquire(); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); addedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); changedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); removedListener.getAndSet(null).onFailure(new RuntimeException("test - ignore")); - service.clusterChanged(new ClusterChangedEvent("test 3", state, prevState)); + finishInvocation.acquire(3); + assertThat(finishInvocation.availablePermits(), equalTo(0)); + calculateInvocation.acquire(); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + + setState(clusterService, state); // Make sure that update wasn't called this time since the index template metadata didn't change - assertThat(updateInvocation.get(), equalTo(2)); + changedInvocation.acquire(); + assertThat(changedInvocation.availablePermits(), equalTo(0)); + assertThat(calculateInvocation.availablePermits(), equalTo(0)); + assertThat(updateInvocation.availablePermits(), equalTo(0)); + assertThat(finishInvocation.availablePermits(), equalTo(0)); } private static final int NODE_TEST_ITERS = 100; diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index f209f771ab0..f71ffe28b50 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.Constants; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -94,6 +95,7 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; @@ -137,7 +139,6 @@ public class UnicastZenPingTests extends ESTestCase { private static final UnicastHostsProvider EMPTY_HOSTS_PROVIDER = Collections::emptyList; - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/28685") public void testSimplePings() throws IOException, InterruptedException, ExecutionException { // use ephemeral ports final Settings settings = Settings.builder().put("cluster.name", "test").put(TcpTransport.PORT.getKey(), 0).build(); @@ -233,9 +234,9 @@ public class UnicastZenPingTests extends ESTestCase { ZenPing.PingResponse ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_B")); assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertPingCount(handleA, handleB, 3); - assertPingCount(handleA, handleC, 0); // mismatch, shouldn't ping - assertPingCount(handleA, handleD, 0); // mismatch, shouldn't ping + assertPings(handleA, handleB); + assertNoPings(handleA, handleC); // mismatch, shouldn't ping + assertNoPings(handleA, handleD); // mismatch, shouldn't ping // ping again, this time from B, logger.info("ping from UZP_B"); @@ -244,23 +245,23 @@ public class UnicastZenPingTests extends ESTestCase { ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_A")); assertThat(ping.getClusterStateVersion(), equalTo(ElectMasterService.MasterCandidate.UNRECOVERED_CLUSTER_VERSION)); - assertPingCount(handleB, handleA, 3); - assertPingCount(handleB, handleC, 0); // mismatch, shouldn't ping - assertPingCount(handleB, handleD, 0); // mismatch, shouldn't ping + assertPings(handleB, handleA); + assertNoPings(handleB, handleC); // mismatch, shouldn't ping + assertNoPings(handleB, handleD); // mismatch, shouldn't ping logger.info("ping from UZP_C"); pingResponses = zenPingC.pingAndWait().toList(); assertThat(pingResponses.size(), equalTo(1)); - assertPingCount(handleC, handleA, 0); - assertPingCount(handleC, handleB, 0); - assertPingCount(handleC, handleD, 3); + assertNoPings(handleC, handleA); + assertNoPings(handleC, handleB); + assertPings(handleC, handleD); logger.info("ping from UZP_D"); pingResponses = zenPingD.pingAndWait().toList(); assertThat(pingResponses.size(), equalTo(1)); - assertPingCount(handleD, handleA, 0); - assertPingCount(handleD, handleB, 0); - assertPingCount(handleD, handleC, 3); + assertNoPings(handleD, handleA); + assertNoPings(handleD, handleB); + assertPings(handleD, handleC); zenPingC.close(); handleD.counters.clear(); @@ -268,9 +269,9 @@ public class UnicastZenPingTests extends ESTestCase { pingResponses = zenPingD.pingAndWait().toList(); // check that node does not respond to pings anymore after the ping service has been closed assertThat(pingResponses.size(), equalTo(0)); - assertPingCount(handleD, handleA, 0); - assertPingCount(handleD, handleB, 0); - assertPingCount(handleD, handleC, 3); + assertNoPings(handleD, handleA); + assertNoPings(handleD, handleB); + assertPings(handleD, handleC); } public void testUnknownHostNotCached() throws ExecutionException, InterruptedException { @@ -353,8 +354,8 @@ public class UnicastZenPingTests extends ESTestCase { ZenPing.PingResponse ping = pingResponses.iterator().next(); assertThat(ping.node().getId(), equalTo("UZP_C")); assertThat(ping.getClusterStateVersion(), equalTo(state.version())); - assertPingCount(handleA, handleB, 0); - assertPingCount(handleA, handleC, 3); + assertNoPings(handleA, handleB); + assertPings(handleA, handleC); assertNull(handleA.counters.get(handleB.address)); } @@ -377,8 +378,8 @@ public class UnicastZenPingTests extends ESTestCase { assertThat(secondPingResponses.size(), equalTo(2)); final Set ids = new HashSet<>(secondPingResponses.stream().map(p -> p.node().getId()).collect(Collectors.toList())); assertThat(ids, equalTo(new HashSet<>(Arrays.asList("UZP_B", "UZP_C")))); - assertPingCount(handleA, handleB, 3); - assertPingCount(handleA, handleC, 3); + assertPings(handleA, handleB); + assertPings(handleA, handleC); } } @@ -745,13 +746,30 @@ public class UnicastZenPingTests extends ESTestCase { verify(logger).warn(eq("failed to resolve host [127.0.0.1:9300:9300]"), Matchers.any(ExecutionException.class)); } - private void assertPingCount(final NetworkHandle fromNode, final NetworkHandle toNode, int expectedCount) { + private void assertNoPings(final NetworkHandle fromNode, final NetworkHandle toNode) { final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); final String onNodeName = fromNode.node.getName(); assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); final String forNodeName = toNode.node.getName(); assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", - counter.get(), equalTo(expectedCount)); + counter.get(), equalTo(0)); + } + + private void assertPings(final NetworkHandle fromNode, final NetworkHandle toNode) { + final AtomicInteger counter = fromNode.counters.getOrDefault(toNode.address, new AtomicInteger()); + final String onNodeName = fromNode.node.getName(); + assertNotNull("handle for [" + onNodeName + "] has no 'expected' counter", counter); + final String forNodeName = toNode.node.getName(); + if (Constants.WINDOWS) { + // Some of the ping attempts seem to sporadically fail on Windows (see https://github.com/elastic/elasticsearch/issues/28685) + // Anyhow, the point of the test is not to assert the exact number of pings, but to check if pinging has taken place or not + assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", + counter.get(), greaterThan(0)); + } else { + assertThat("node [" + onNodeName + "] ping count to [" + forNodeName + "] is unexpected", + counter.get(), equalTo(3)); + } + } private NetworkHandle startServices( diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 6f7042ba911..1d531bdeb90 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -30,7 +30,6 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce.AlreadySetException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -40,6 +39,7 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -122,7 +122,7 @@ public class IndexModuleTests extends ESTestCase { index = indexSettings.getIndex(); environment = TestEnvironment.newEnvironment(settings); emptyAnalysisRegistry = new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap()); threadPool = new TestThreadPool("test"); circuitBreakerService = new NoneCircuitBreakerService(); PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 9c0f2b3c7a5..36da9761b97 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.VersionUtils; import java.io.IOException; +import java.util.Collections; import java.util.Map; import static java.util.Collections.emptyMap; @@ -48,6 +49,8 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; public class AnalysisRegistryTests extends ESTestCase { private AnalysisRegistry emptyRegistry; @@ -58,7 +61,7 @@ public class AnalysisRegistryTests extends ESTestCase { private static AnalysisRegistry emptyAnalysisRegistry(Settings settings) { return new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap()); } private static IndexSettings indexSettingsOfCurrentVersion(Settings.Builder settings) { @@ -224,4 +227,16 @@ public class AnalysisRegistryTests extends ESTestCase { indexAnalyzers.close(); indexAnalyzers.close(); } + + public void testEnsureCloseInvocationProperlyDelegated() throws IOException { + Settings settings = Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + PreBuiltAnalyzerProviderFactory mock = mock(PreBuiltAnalyzerProviderFactory.class); + AnalysisRegistry registry = new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), Collections.singletonMap("key", mock)); + + registry.close(); + verify(mock).close(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index b7781682359..c93df5b7519 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -19,7 +19,13 @@ package org.elasticsearch.index.query; +import org.apache.lucene.analysis.core.WhitespaceAnalyzer; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; @@ -30,11 +36,11 @@ import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanQuery; import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.store.Directory; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.search.internal.SearchContext; @@ -42,6 +48,7 @@ import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; +import static java.util.Collections.singleton; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; @@ -238,4 +245,61 @@ public class SpanMultiTermQueryBuilderTests extends AbstractQueryTestCase query.rewrite(reader)); + assertThat(exc.getMessage(), containsString("maxClauseCount")); + + } finally { + BooleanQuery.setMaxClauseCount(origBoolMaxClauseCount); + } + } + } + } + } + + public void testTopNMultiTermsRewriteInsideSpan() throws Exception { + Query query = QueryBuilders.spanMultiTermQueryBuilder( + QueryBuilders.prefixQuery("foo", "b").rewrite("top_terms_boost_2000") + ).toQuery(createShardContext()); + + if (query instanceof SpanBoostQuery) { + query = ((SpanBoostQuery)query).getQuery(); + } + + assertTrue(query instanceof SpanMultiTermQueryWrapper); + if (query instanceof SpanMultiTermQueryWrapper) { + MultiTermQuery.RewriteMethod rewriteMethod = ((SpanMultiTermQueryWrapper)query).getRewriteMethod(); + assertFalse(rewriteMethod instanceof SpanMultiTermQueryBuilder.TopTermSpanBooleanQueryRewriteWithMaxClause); + } + + } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index fac7a792c50..66ca3661965 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1897,8 +1897,13 @@ public class IndexShardTests extends IndexShardTestCase { }; closeShards(shard); IndexShard newShard = newShard( - ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {}, EMPTY_EVENT_LISTENER); + ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), + shard.shardPath(), + shard.indexSettings().getIndexMetaData(), + wrapper, + new InternalEngineFactory(), + () -> {}, + EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2044,8 +2049,13 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(shard); IndexShard newShard = newShard( - ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {}, EMPTY_EVENT_LISTENER); + ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), + shard.shardPath(), + shard.indexSettings().getIndexMetaData(), + wrapper, + new InternalEngineFactory(), + () -> {}, + EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 27b8b4cf157..35416c617fd 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -559,8 +559,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { final IllegalStateException e = expectThrows(IllegalStateException.class, () -> indicesService.createIndex(indexMetaData, Collections.emptyList())); final String pattern = - ".*multiple plugins provided engine factories for \\[foobar/.*\\]: " - + "\\[.*FooEnginePlugin/.*FooEngineFactory\\],\\[.*BarEnginePlugin/.*BarEngineFactory\\].*"; + ".*multiple engine factories provided for \\[foobar/.*\\]: \\[.*FooEngineFactory\\],\\[.*BarEngineFactory\\].*"; assertThat(e, hasToString(new RegexMatcher(pattern))); } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index fd6e8e3fdf3..7a1d3a89420 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -100,9 +100,14 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase { // build a new shard using the same store directory as the closed shard ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE); - shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, - new InternalEngineFactory(), () -> {}, - EMPTY_EVENT_LISTENER); + shard = newShard( + shardRouting, + shard.shardPath(), + shard.indexSettings().getIndexMetaData(), + null, + new InternalEngineFactory(), + () -> {}, + EMPTY_EVENT_LISTENER); // restore the shard recoverShardFromSnapshot(shard, snapshot, repository); diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index d1f91d60e25..717bab12ea5 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -32,6 +34,8 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.analysis.AbstractIndexAnalyzerProvider; +import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; @@ -41,6 +45,8 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.plugins.AnalysisPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; @@ -63,6 +69,7 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import static java.util.Collections.singletonMap; import static org.elasticsearch.client.Requests.searchRequest; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; @@ -106,7 +113,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class); + return Arrays.asList(InternalSettingsPlugin.class, MockKeywordPlugin.class, MockWhitespacePlugin.class); } public void testHighlightingWithStoredKeyword() throws IOException { @@ -1599,8 +1606,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .setSettings(Settings.builder() .put(indexSettings()) - .put("analysis.analyzer.my_analyzer.type", "pattern") - .put("analysis.analyzer.my_analyzer.pattern", "\\s+") + .put("analysis.analyzer.my_analyzer.type", "mock_whitespace") .build()) .addMapping("type", "text", "type=text,analyzer=my_analyzer")); ensureGreen(); @@ -1611,7 +1617,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { SearchResponse response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("text", "test")) .highlighter(new HighlightBuilder().field("text")).execute().actionGet(); - // PatternAnalyzer will throw an exception if it is resetted twice + // Mock tokenizer will throw an exception if it is resetted twice assertHitCount(response, 1L); } @@ -2976,4 +2982,22 @@ public class HighlighterSearchIT extends ESIntegTestCase { assertThat(field.getFragments()[0].string(), equalTo("Hello World")); } } + + public static class MockWhitespacePlugin extends Plugin implements AnalysisPlugin { + + @Override + public Map>> getAnalyzers() { + return singletonMap("mock_whitespace", (indexSettings, environment, name, settings) -> { + return new AbstractIndexAnalyzerProvider(indexSettings, name, settings) { + + MockAnalyzer instance = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false); + + @Override + public Analyzer get() { + return instance; + } + }; + }); + } + } } diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 7e1231f9059..c3df8d778a7 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -19,7 +19,9 @@ package org.elasticsearch.search.query; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.util.English; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -33,8 +35,12 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; +import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.RangeQueryBuilder; +import org.elasticsearch.index.query.SpanMultiTermQueryBuilder; +import org.elasticsearch.index.query.SpanNearQueryBuilder; +import org.elasticsearch.index.query.SpanTermQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders; @@ -52,6 +58,7 @@ import org.joda.time.DateTimeZone; import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.Random; @@ -1818,5 +1825,4 @@ public class SearchQueryIT extends ESIntegTestCase { SearchResponse searchResponse = client().prepareSearch("test").setQuery(range).get(); assertHitCount(searchResponse, 1); } - } diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index 6eff821c5c3..965dcb3e8cc 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -49,6 +49,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class GeoDistanceIT extends ESIntegTestCase { @@ -406,4 +407,57 @@ public class GeoDistanceIT extends ESIntegTestCase { assertHitCount(result, 1); } + public void testDistanceSortingWithUnmappedField() throws Exception { + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") + .startObject("locations").field("type", "geo_point"); + xContentBuilder.endObject().endObject().endObject().endObject(); + assertAcked(prepareCreate("test1").addMapping("type1", xContentBuilder)); + assertAcked(prepareCreate("test2")); + ensureGreen(); + + client().prepareIndex("test1", "type1", "1") + .setSource(jsonBuilder().startObject().array("names", "Times Square", "Tribeca").startArray("locations") + // to NY: 5.286 km + .startObject().field("lat", 40.759011).field("lon", -73.9844722).endObject() + // to NY: 0.4621 km + .startObject().field("lat", 40.718266).field("lon", -74.007819).endObject().endArray().endObject()) + .execute().actionGet(); + + client().prepareIndex("test2", "type1", "2") + .setSource(jsonBuilder().startObject().array("names", "Wall Street", "Soho").endObject()) + .execute().actionGet(); + + refresh(); + + // Order: Asc + SearchResponse searchResponse = client().prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort(SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.ASC)).execute() + .actionGet(); + + assertHitCount(searchResponse, 2); + assertOrderedSearchHits(searchResponse, "1", "2"); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), closeTo(462.1d, 10d)); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + + // Order: Desc + searchResponse = client().prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort( + SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).ignoreUnmapped(true).order(SortOrder.DESC) + ).execute() + .actionGet(); + + // Doc with missing geo point is first, is consistent with 0.20.x + assertHitCount(searchResponse, 2); + assertOrderedSearchHits(searchResponse, "2", "1"); + assertThat(((Number) searchResponse.getHits().getAt(0).getSortValues()[0]).doubleValue(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(((Number) searchResponse.getHits().getAt(1).getSortValues()[0]).doubleValue(), closeTo(5286d, 10d)); + + // Make sure that by default the unmapped fields continue to fail + searchResponse = client().prepareSearch("test1", "test2").setQuery(matchAllQuery()) + .addSort( SortBuilders.geoDistanceSort("locations", 40.7143528, -74.0059731).order(SortOrder.DESC)).execute() + .actionGet(); + assertThat(searchResponse.getFailedShards(), greaterThan(0)); + assertHitCount(searchResponse, 1); + } + } diff --git a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java index 1109bdfc1f1..9a3f520f96f 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; @@ -121,6 +122,9 @@ public class GeoDistanceSortBuilderTests extends AbstractSortTestCase randomFrom(GeoValidationMethod.values()))); break; + case 8: + result.ignoreUnmapped(result.ignoreUnmapped() == false); + break; } return result; } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 95fe7afed16..1f3bdf2e88a 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -3114,7 +3114,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas .put("location", repoPath) .put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f) // test that we can take a snapshot after a failed one, even if a partial index-N was written - .put("atomic_move", false) + .put("allow_atomic_operations", false) .put("random", randomAlphaOfLength(10)))); logger.info("--> indexing some data"); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 089955d140f..b5c63397241 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -77,9 +77,4 @@ public class BlobContainerWrapper implements BlobContainer { public Map listBlobsByPrefix(String blobNamePrefix) throws IOException { return delegate.listBlobsByPrefix(blobNamePrefix); } - - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - delegate.move(sourceBlobName, targetBlobName); - } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 5fa884adbfe..d0702acf103 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -109,7 +109,7 @@ public class MockRepository extends FsRepository { /** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died master node */ private volatile boolean blockAndFailOnWriteSnapFile; - private volatile boolean atomicMove; + private volatile boolean allowAtomicOperations; private volatile boolean blocked = false; @@ -126,7 +126,7 @@ public class MockRepository extends FsRepository { blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false); randomPrefix = metadata.settings().get("random", "default"); waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L); - atomicMove = metadata.settings().getAsBoolean("atomic_move", true); + allowAtomicOperations = metadata.settings().getAsBoolean("allow_atomic_operations", true); logger.info("starting mock repository with random prefix {}", randomPrefix); mockBlobStore = new MockBlobStore(super.blobStore()); } @@ -345,24 +345,6 @@ public class MockRepository extends FsRepository { return super.listBlobsByPrefix(blobNamePrefix); } - @Override - public void move(String sourceBlob, String targetBlob) throws IOException { - if (blockOnWriteIndexFile && targetBlob.startsWith("index-")) { - blockExecutionAndMaybeWait(targetBlob); - } - if (atomicMove) { - // atomic move since this inherits from FsBlobContainer which provides atomic moves - maybeIOExceptionOrBlock(targetBlob); - super.move(sourceBlob, targetBlob); - } else { - // simulate a non-atomic move, since many blob container implementations - // will not have an atomic move, and we should be able to handle that - maybeIOExceptionOrBlock(targetBlob); - super.writeBlob(targetBlob, super.readBlob(sourceBlob), 0L); - super.deleteBlob(sourceBlob); - } - } - @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { maybeIOExceptionOrBlock(blobName); @@ -377,14 +359,14 @@ public class MockRepository extends FsRepository { @Override public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { final Random random = RandomizedContext.current().getRandom(); - if (random.nextBoolean()) { + if (allowAtomicOperations && random.nextBoolean()) { if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { // Simulate a failure between the write and move operation in FsBlobContainer final String tempBlobName = FsBlobContainer.tempBlobName(blobName); super.writeBlob(tempBlobName, inputStream, blobSize); maybeIOExceptionOrBlock(blobName); final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate(); - fsBlobContainer.move(tempBlobName, blobName); + fsBlobContainer.moveBlobAtomic(tempBlobName, blobName); } else { // Atomic write since it is potentially supported // by the delegating blob container diff --git a/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java new file mode 100644 index 00000000000..3233edefb30 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/tasks/CancelTasksResponseTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tasks; + +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.net.ConnectException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.hamcrest.Matchers.equalTo; + +public class CancelTasksResponseTests extends AbstractXContentTestCase { + + @Override + protected CancelTasksResponse createTestInstance() { + List randomTasks = randomTasks(); + return new CancelTasksResponse(randomTasks, Collections.emptyList(), Collections.emptyList()); + } + + private static List randomTasks() { + List randomTasks = new ArrayList<>(); + for (int i = 0; i < randomInt(10); i++) { + randomTasks.add(TaskInfoTests.randomTaskInfo()); + } + return randomTasks; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + //status and headers hold arbitrary content, we can't inject random fields in them + return field -> field.endsWith("status") || field.endsWith("headers"); + } + + @Override + protected void assertEqualInstances(CancelTasksResponse expectedInstance, CancelTasksResponse newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); + ListTasksResponseTests.assertOnNodeFailures(newInstance.getNodeFailures(), expectedInstance.getNodeFailures()); + ListTasksResponseTests.assertOnTaskFailures(newInstance.getTaskFailures(), expectedInstance.getTaskFailures()); + } + + @Override + protected CancelTasksResponse doParseInstance(XContentParser parser) { + return CancelTasksResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected boolean assertToXContentEquivalence() { + return true; + } + + /** + * Test parsing {@link ListTasksResponse} with inner failures as they don't support asserting on xcontent equivalence, given that + * exceptions are not parsed back as the same original class. We run the usual {@link AbstractXContentTestCase#testFromXContent()} + * without failures, and this other test with failures where we disable asserting on xcontent equivalence at the end. + */ + public void testFromXContentWithFailures() throws IOException { + Supplier instanceSupplier = CancelTasksResponseTests::createTestInstanceWithFailures; + //with random fields insertion in the inner exceptions, some random stuff may be parsed back as metadata, + //but that does not bother our assertions, as we only want to test that we don't break. + boolean supportsUnknownFields = true; + //exceptions are not of the same type whenever parsed back + boolean assertToXContentEquivalence = false; + AbstractXContentTestCase.testFromXContent(NUMBER_OF_TEST_RUNS, instanceSupplier, supportsUnknownFields, Strings.EMPTY_ARRAY, + getRandomFieldsExcludeFilter(), this::createParser, this::doParseInstance, + this::assertEqualInstances, assertToXContentEquivalence); + } + + private static CancelTasksResponse createTestInstanceWithFailures() { + int numNodeFailures = randomIntBetween(0, 3); + List nodeFailures = new ArrayList<>(numNodeFailures); + for (int i = 0; i < numNodeFailures; i++) { + nodeFailures.add(new FailedNodeException(randomAlphaOfLength(5), "error message", new ConnectException())); + } + int numTaskFailures = randomIntBetween(0, 3); + List taskFailures = new ArrayList<>(numTaskFailures); + for (int i = 0; i < numTaskFailures; i++) { + taskFailures.add(new TaskOperationFailure(randomAlphaOfLength(5), randomLong(), new IllegalStateException())); + } + return new CancelTasksResponse(randomTasks(), taskFailures, nodeFailures); + } + +} diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index b280446db1c..4862278fac1 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -109,20 +109,30 @@ public class ListTasksResponseTests extends AbstractXContentTestCase nodeFailures, + List expectedFailures) { + assertThat(nodeFailures.size(), equalTo(expectedFailures.size())); + for (int i = 0; i < nodeFailures.size(); i++) { + ElasticsearchException newException = nodeFailures.get(i); + ElasticsearchException expectedException = expectedFailures.get(i); assertThat(newException.getMetadata("es.node_id").get(0), equalTo(((FailedNodeException)expectedException).nodeId())); assertThat(newException.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=error message]")); assertThat(newException.getCause(), instanceOf(ElasticsearchException.class)); ElasticsearchException cause = (ElasticsearchException) newException.getCause(); assertThat(cause.getMessage(), equalTo("Elasticsearch exception [type=connect_exception, reason=null]")); } - assertThat(newInstance.getTaskFailures().size(), equalTo(expectedInstance.getTaskFailures().size())); - for (int i = 0; i < newInstance.getTaskFailures().size(); i++) { - TaskOperationFailure newFailure = newInstance.getTaskFailures().get(i); - TaskOperationFailure expectedFailure = expectedInstance.getTaskFailures().get(i); + } + + protected static void assertOnTaskFailures(List taskFailures, + List expectedFailures) { + assertThat(taskFailures.size(), equalTo(expectedFailures.size())); + for (int i = 0; i < taskFailures.size(); i++) { + TaskOperationFailure newFailure = taskFailures.get(i); + TaskOperationFailure expectedFailure = expectedFailures.get(i); assertThat(newFailure.getNodeId(), equalTo(expectedFailure.getNodeId())); assertThat(newFailure.getTaskId(), equalTo(expectedFailure.getTaskId())); assertThat(newFailure.getStatus(), equalTo(expectedFailure.getStatus())); diff --git a/settings.gradle b/settings.gradle index ee88f9bd0ed..0d9be7c2494 100644 --- a/settings.gradle +++ b/settings.gradle @@ -83,8 +83,8 @@ if (isEclipse) { // eclipse cannot handle an intermediate dependency between main and test, so we must create separate projects // for server-src and server-tests projects << 'server-tests' - projects << 'libs:elasticsearch-core-tests' - projects << 'libs:elasticsearch-nio-tests' + projects << 'libs:core-tests' + projects << 'libs:nio-tests' projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' projects << 'libs:grok-tests' @@ -99,14 +99,14 @@ if (isEclipse) { project(":server").buildFileName = 'eclipse-build.gradle' project(":server-tests").projectDir = new File(rootProject.projectDir, 'server/src/test') project(":server-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-core").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/main') - project(":libs:elasticsearch-core").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-core-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-core/src/test') - project(":libs:elasticsearch-core-tests").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-nio").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/main') - project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' - project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') - project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:core").projectDir = new File(rootProject.projectDir, 'libs/core/src/main') + project(":libs:core").buildFileName = 'eclipse-build.gradle' + project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test') + project(":libs:core-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main') + project(":libs:nio").buildFileName = 'eclipse-build.gradle' + project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test') + project(":libs:nio-tests").buildFileName = 'eclipse-build.gradle' project(":libs:x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') project(":libs:x-content").buildFileName = 'eclipse-build.gradle' project(":libs:x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index df2024de445..13f9e9debc9 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -36,7 +36,6 @@ import java.util.HashMap; import java.util.Map; import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes; -import static org.elasticsearch.repositories.ESBlobStoreTestCase.readBlobFully; import static org.elasticsearch.repositories.ESBlobStoreTestCase.writeRandomBlob; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; @@ -47,6 +46,13 @@ import static org.hamcrest.CoreMatchers.notNullValue; */ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { + public void testReadNonExistingPath() throws IOException { + try(BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + expectThrows(NoSuchFileException.class, () -> container.readBlob("non-existing")); + } + } + public void testWriteRead() throws IOException { try(BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); @@ -66,7 +72,7 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { } } - public void testMoveAndList() throws IOException { + public void testList() throws IOException { try(BlobStore store = newBlobStore()) { final BlobContainer container = store.blobContainer(new BlobPath()); assertThat(container.listBlobs().size(), equalTo(0)); @@ -102,15 +108,6 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { assertThat(container.listBlobsByPrefix("foo-").size(), equalTo(numberOfFooBlobs)); assertThat(container.listBlobsByPrefix("bar-").size(), equalTo(numberOfBarBlobs)); assertThat(container.listBlobsByPrefix("baz-").size(), equalTo(0)); - - String newName = "bar-new"; - // Move to a new location - container.move(name, newName); - assertThat(container.listBlobsByPrefix(name).size(), equalTo(0)); - blobs = container.listBlobsByPrefix(newName); - assertThat(blobs.size(), equalTo(1)); - assertThat(blobs.get(newName).length(), equalTo(generatedBlobs.get(name))); - assertThat(data, equalTo(readBlobFully(container, newName, length))); } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 9481f60d933..cb9e243660a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -30,17 +30,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.nio.AcceptingSelector; -import org.elasticsearch.nio.AcceptorEventHandler; import org.elasticsearch.nio.BytesChannelContext; import org.elasticsearch.nio.BytesWriteHandler; import org.elasticsearch.nio.ChannelFactory; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.NioServerSocketChannel; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; -import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransport; @@ -62,7 +60,6 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF public class MockNioTransport extends TcpTransport { private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX; - private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX; private final PageCacheRecycler pageCacheRecycler; private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); @@ -93,20 +90,13 @@ public class MockNioTransport extends TcpTransport { protected void doStart() { boolean success = false; try { - int acceptorCount = 0; - boolean useNetworkServer = NetworkService.NETWORK_SERVER.get(settings); - if (useNetworkServer) { - acceptorCount = 1; - } - nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, - (s) -> new AcceptorEventHandler(s, this::onNonChannelException), - daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, - () -> new TestingSocketEventHandler(this::onNonChannelException)); + nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, + (s) -> new TestingSocketEventHandler(this::onNonChannelException, s)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = new MockTcpChannelFactory(clientProfileSettings, "client"); - if (useNetworkServer) { + if (NetworkService.NETWORK_SERVER.get(settings)) { // loop through all profiles and start them up, special handling for default one for (ProfileSettings profileSettings : profileSettings) { String profileName = profileSettings.profileName; @@ -159,7 +149,7 @@ public class MockNioTransport extends TcpTransport { } @Override - public MockSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + public MockSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { MockSocketChannel nioChannel = new MockSocketChannel(profileName, channel, selector); Supplier pageSupplier = () -> { Recycler.V bytes = pageCacheRecycler.bytePage(false); @@ -173,7 +163,7 @@ public class MockNioTransport extends TcpTransport { } @Override - public MockServerChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + public MockServerChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { MockServerChannel nioServerChannel = new MockServerChannel(profileName, channel, this, selector); Consumer exceptionHandler = (e) -> logger.error(() -> new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); @@ -205,7 +195,7 @@ public class MockNioTransport extends TcpTransport { private final String profile; - MockServerChannel(String profile, ServerSocketChannel channel, ChannelFactory channelFactory, AcceptingSelector selector) + MockServerChannel(String profile, ServerSocketChannel channel, ChannelFactory channelFactory, NioSelector selector) throws IOException { super(channel); this.profile = profile; @@ -246,7 +236,7 @@ public class MockNioTransport extends TcpTransport { private final String profile; - private MockSocketChannel(String profile, java.nio.channels.SocketChannel socketChannel, SocketSelector selector) + private MockSocketChannel(String profile, java.nio.channels.SocketChannel socketChannel, NioSelector selector) throws IOException { super(socketChannel); this.profile = profile; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java index 810e4201022..cecd3c60613 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestingSocketEventHandler.java @@ -19,21 +19,23 @@ package org.elasticsearch.transport.nio; +import org.elasticsearch.nio.EventHandler; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.nio.SocketEventHandler; import java.io.IOException; import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; import java.util.function.Consumer; +import java.util.function.Supplier; -public class TestingSocketEventHandler extends SocketEventHandler { +public class TestingSocketEventHandler extends EventHandler { private Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); - public TestingSocketEventHandler(Consumer exceptionHandler) { - super(exceptionHandler); + public TestingSocketEventHandler(Consumer exceptionHandler, Supplier selectorSupplier) { + super(exceptionHandler, selectorSupplier); } public void handleConnect(SocketChannelContext context) throws IOException { diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 17e0f2b70fd..a0af24d6cc6 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -16,9 +16,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/ml/functions/rare.asciidoc', 'en/ml/functions/sum.asciidoc', 'en/ml/functions/time.asciidoc', - 'en/ml/aggregations.asciidoc', 'en/ml/customurl.asciidoc', - 'en/monitoring/indices.asciidoc', 'en/rest-api/security/ssl.asciidoc', 'en/rest-api/security/users.asciidoc', 'en/rest-api/security/tokens.asciidoc', @@ -281,6 +279,58 @@ setups['library'] = ''' {"name": "The Moon is a Harsh Mistress", "author": "Robert A. Heinlein", "release_date": "1966-04-01", "page_count": 288} ''' +setups['farequote_index'] = ''' + - do: + indices.create: + index: farequote + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + metric: + properties: + time: + type: date + responsetime: + type: float + airline: + type: keyword +''' +setups['farequote_data'] = setups['farequote_index'] + ''' + - do: + bulk: + index: farequote + type: metric + refresh: true + body: | + {"index": {"_id":"1"}} + {"airline":"JZA","responsetime":990.4628,"time":"2016-02-07T00:00:00+0000"} + {"index": {"_id":"2"}} + {"airline":"JBU","responsetime":877.5927,"time":"2016-02-07T00:00:00+0000"} + {"index": {"_id":"3"}} + {"airline":"KLM","responsetime":1355.4812,"time":"2016-02-07T00:00:00+0000"} +''' +setups['farequote_job'] = setups['farequote_data'] + ''' + - do: + xpack.ml.put_job: + job_id: "farequote" + body: > + { + "analysis_config": { + "bucket_span": "60m", + "detectors": [{ + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" + }], + "summary_count_field_name": "doc_count" + }, + "data_description": { + "time_field": "time" + } + } +''' setups['server_metrics_index'] = ''' - do: indices.create: diff --git a/x-pack/docs/en/ml/aggregations.asciidoc b/x-pack/docs/en/ml/aggregations.asciidoc index cc98a45d11e..f3b8e6b3e34 100644 --- a/x-pack/docs/en/ml/aggregations.asciidoc +++ b/x-pack/docs/en/ml/aggregations.asciidoc @@ -11,11 +11,12 @@ aggregated data into {xpackml} instead of raw results, which reduces the volume of data that must be considered while detecting anomalies. There are some limitations to using aggregations in {dfeeds}, however. -Your aggregation must include a buckets aggregation, which in turn must contain -a date histogram aggregation. This requirement ensures that the aggregated -data is a time series. If you use a terms aggregation and the cardinality of a -term is high, then the aggregation might not be effective and you might want -to just use the default search and scroll behavior. +Your aggregation must include a `date_histogram` aggregation, which in turn must +contain a `max` aggregation on the time field. This requirement ensures that the +aggregated data is a time series and the timestamp of each bucket is the time +of the last record in the bucket. If you use a terms aggregation and the +cardinality of a term is high, then the aggregation might not be effective and +you might want to just use the default search and scroll behavior. When you create or update a job, you can include the names of aggregations, for example: @@ -27,9 +28,9 @@ PUT _xpack/ml/anomaly_detectors/farequote "analysis_config": { "bucket_span": "60m", "detectors": [{ - "function":"mean", - "field_name":"responsetime", - "by_field_name":"airline" + "function": "mean", + "field_name": "responsetime", + "by_field_name": "airline" }], "summary_count_field_name": "doc_count" }, @@ -38,6 +39,8 @@ PUT _xpack/ml/anomaly_detectors/farequote } } ---------------------------------- +// CONSOLE +// TEST[setup:farequote_data] In this example, the `airline`, `responsetime`, and `time` fields are aggregations. @@ -85,7 +88,8 @@ PUT _xpack/ml/datafeeds/datafeed-farequote } } ---------------------------------- - +// CONSOLE +// TEST[setup:farequote_job] In this example, the aggregations have names that match the fields that they operate on. That is to say, the `max` aggregation is named `time` and its @@ -100,35 +104,86 @@ For all other aggregations, if the aggregation name doesn't match the field name there are limitations in the drill-down functionality within the {ml} page in {kib}. +{dfeeds} support complex nested aggregations, this example uses the `derivative` +pipeline aggregation to find the 1st order derivative of the counter +`system.network.out.bytes` for each value of the field `beat.name`. + +[source,js] +---------------------------------- +"aggregations": { + "beat.name": { + "terms": { + "field": "beat.name" + }, + "aggregations": { + "buckets": { + "date_histogram": { + "field": "@timestamp", + "interval": "5m" + }, + "aggregations": { + "@timestamp": { + "max": { + "field": "@timestamp" + } + }, + "bytes_out_average": { + "avg": { + "field": "system.network.out.bytes" + } + }, + "bytes_out_derivative": { + "derivative": { + "buckets_path": "bytes_out_average" + } + } + } + } + } + } +} +---------------------------------- +// NOTCONSOLE + When you define an aggregation in a {dfeed}, it must have the following form: [source,js] ---------------------------------- -"aggregations" : { - "buckets" : { - "date_histogram" : { - "time_zone": "UTC", ... +"aggregations": { + ["bucketing_aggregation": { + "bucket_agg": { + ... }, - "aggregations": { - "": { - "max": { - "field":"" + "aggregations": {] + "data_histogram_aggregation": { + "date_histogram": { + "field": "time", + }, + "aggregations": { + "timestamp": { + "max": { + "field": "time" + } + }, + [,"": { + "terms":{... + } + [,"aggregations" : { + []+ + } ] + }] } } - [,"": { - "terms":{... - } - [,"aggregations" : { - []+ - } ] - }] - } - } + } + } } ---------------------------------- +// NOTCONSOLE -You must specify `buckets` as the aggregation name and `date_histogram` as the -aggregation type. For more information, see +The top level aggregation must be either a {ref}/search-aggregations-bucket.html[Bucket Aggregation] +containing as single sub-aggregation that is a `date_histogram` or the top level aggregation +is the required `date_histogram`. There must be exactly 1 `date_histogram` aggregation. +For more information, see {ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram Aggregation]. NOTE: The `time_zone` parameter in the date histogram aggregation must be set to `UTC`, @@ -163,6 +218,7 @@ GET .../_search { } } -------------------------------------------------- +// NOTCONSOLE By default, {es} limits the maximum number of terms returned to 10000. For high cardinality fields, the query might not run. It might return errors related to diff --git a/x-pack/docs/en/ml/getting-started-data.asciidoc b/x-pack/docs/en/ml/getting-started-data.asciidoc deleted file mode 100644 index 6a0c6bbecc8..00000000000 --- a/x-pack/docs/en/ml/getting-started-data.asciidoc +++ /dev/null @@ -1,210 +0,0 @@ -[[ml-gs-data]] -=== Identifying Data for Analysis - -For the purposes of this tutorial, we provide sample data that you can play with -and search in {es}. When you consider your own data, however, it's important to -take a moment and think about where the {xpackml} features will be most -impactful. - -The first consideration is that it must be time series data. The {ml} features -are designed to model and detect anomalies in time series data. - -The second consideration, especially when you are first learning to use {ml}, -is the importance of the data and how familiar you are with it. Ideally, it is -information that contains key performance indicators (KPIs) for the health, -security, or success of your business or system. It is information that you need -to monitor and act on when anomalous behavior occurs. You might even have {kib} -dashboards that you're already using to watch this data. The better you know the -data, the quicker you will be able to create {ml} jobs that generate useful -insights. - -The final consideration is where the data is located. This tutorial assumes that -your data is stored in {es}. It guides you through the steps required to create -a _{dfeed}_ that passes data to a job. If your own data is outside of {es}, -analysis is still possible by using a post data API. - -IMPORTANT: If you want to create {ml} jobs in {kib}, you must use {dfeeds}. -That is to say, you must store your input data in {es}. When you create -a job, you select an existing index pattern and {kib} configures the {dfeed} -for you under the covers. - - -[float] -[[ml-gs-sampledata]] -==== Obtaining a Sample Data Set - -In this step we will upload some sample data to {es}. This is standard -{es} functionality, and is needed to set the stage for using {ml}. - -The sample data for this tutorial contains information about the requests that -are received by various applications and services in a system. A system -administrator might use this type of information to track the total number of -requests across all of the infrastructure. If the number of requests increases -or decreases unexpectedly, for example, this might be an indication that there -is a problem or that resources need to be redistributed. By using the {xpack} -{ml} features to model the behavior of this data, it is easier to identify -anomalies and take appropriate action. - -Download this sample data by clicking here: -https://download.elastic.co/demos/machine_learning/gettingstarted/server_metrics.tar.gz[server_metrics.tar.gz] - -Use the following commands to extract the files: - -[source,sh] ----------------------------------- -tar -zxvf server_metrics.tar.gz ----------------------------------- - -Each document in the server-metrics data set has the following schema: - -[source,js] ----------------------------------- -{ - "index": - { - "_index":"server-metrics", - "_type":"metric", - "_id":"1177" - } -} -{ - "@timestamp":"2017-03-23T13:00:00", - "accept":36320, - "deny":4156, - "host":"server_2", - "response":2.4558210155, - "service":"app_3", - "total":40476 -} ----------------------------------- -// NOTCONSOLE - -TIP: The sample data sets include summarized data. For example, the `total` -value is a sum of the requests that were received by a specific service at a -particular time. If your data is stored in {es}, you can generate -this type of sum or average by using aggregations. One of the benefits of -summarizing data this way is that {es} automatically distributes -these calculations across your cluster. You can then feed this summarized data -into {xpackml} instead of raw results, which reduces the volume -of data that must be considered while detecting anomalies. For the purposes of -this tutorial, however, these summary values are stored in {es}. For more -information, see <>. - -Before you load the data set, you need to set up {ref}/mapping.html[_mappings_] -for the fields. Mappings divide the documents in the index into logical groups -and specify a field's characteristics, such as the field's searchability or -whether or not it's _tokenized_, or broken up into separate words. - -The sample data includes an `upload_server-metrics.sh` script, which you can use -to create the mappings and load the data set. You can download it by clicking -here: https://download.elastic.co/demos/machine_learning/gettingstarted/upload_server-metrics.sh[upload_server-metrics.sh] -Before you run it, however, you must edit the USERNAME and PASSWORD variables -with your actual user ID and password. - -The script runs a command similar to the following example, which sets up a -mapping for the data set: - -[source,sh] ----------------------------------- -curl -u elastic:x-pack-test-password -X PUT -H 'Content-Type: application/json' -http://localhost:9200/server-metrics -d '{ - "settings":{ - "number_of_shards":1, - "number_of_replicas":0 - }, - "mappings":{ - "metric":{ - "properties":{ - "@timestamp":{ - "type":"date" - }, - "accept":{ - "type":"long" - }, - "deny":{ - "type":"long" - }, - "host":{ - "type":"keyword" - }, - "response":{ - "type":"float" - }, - "service":{ - "type":"keyword" - }, - "total":{ - "type":"long" - } - } - } - } -}' ----------------------------------- -// NOTCONSOLE - -NOTE: If you run this command, you must replace `x-pack-test-password` with your -actual password. - -You can then use the {es} `bulk` API to load the data set. The -`upload_server-metrics.sh` script runs commands similar to the following -example, which loads the four JSON files: - -[source,sh] ----------------------------------- -curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" -http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_1.json" - -curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" -http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_2.json" - -curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" -http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_3.json" - -curl -u elastic:x-pack-test-password -X POST -H "Content-Type: application/json" -http://localhost:9200/server-metrics/_bulk --data-binary "@server-metrics_4.json" ----------------------------------- -// NOTCONSOLE - -TIP: This will upload 200MB of data. This is split into 4 files as there is a -maximum 100MB limit when using the `_bulk` API. - -These commands might take some time to run, depending on the computing resources -available. - -You can verify that the data was loaded successfully with the following command: - -[source,sh] ----------------------------------- -curl 'http://localhost:9200/_cat/indices?v' -u elastic:x-pack-test-password ----------------------------------- -// NOTCONSOLE - -You should see output similar to the following: - -[source,txt] ----------------------------------- -health status index ... pri rep docs.count ... -green open server-metrics ... 1 0 905940 ... ----------------------------------- -// NOTCONSOLE - -Next, you must define an index pattern for this data set: - -. Open {kib} in your web browser and log in. If you are running {kib} -locally, go to `http://localhost:5601/`. - -. Click the **Management** tab, then **{kib}** > **Index Patterns**. - -. If you already have index patterns, click **Create Index** to define a new -one. Otherwise, the **Create index pattern** wizard is already open. - -. For this tutorial, any pattern that matches the name of the index you've -loaded will work. For example, enter `server-metrics*` as the index pattern. - -. In the **Configure settings** step, select the `@timestamp` field in the -**Time Filter field name** list. - -. Click **Create index pattern**. - -This data set can now be analyzed in {ml} jobs in {kib}. diff --git a/x-pack/docs/en/ml/getting-started-forecast.asciidoc b/x-pack/docs/en/ml/getting-started-forecast.asciidoc deleted file mode 100644 index bc445195bd4..00000000000 --- a/x-pack/docs/en/ml/getting-started-forecast.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -[[ml-gs-forecast]] -=== Creating Forecasts - -In addition to detecting anomalous behavior in your data, you can use -{ml} to predict future behavior. For more information, see <>. - -To create a forecast in {kib}: - -. Go to the **Single Metric Viewer** and select one of the jobs that you created -in this tutorial. For example, select the `total-requests` job. - -. Click **Forecast**. + -+ --- -[role="screenshot"] -image::images/ml-gs-forecast.jpg["Create a forecast from the Single Metric Viewer"] --- - -. Specify a duration for your forecast. This value indicates how far to -extrapolate beyond the last record that was processed. You must use time units, -such as `30d` for 30 days. For more information, see -{ref}/common-options.html#time-units[Time Units]. In this example, we use a -duration of 1 week: + -+ --- -[role="screenshot"] -image::images/ml-gs-duration.jpg["Specify a duration of 1w"] --- - -. View the forecast in the **Single Metric Viewer**: + -+ --- -[role="screenshot"] -image::images/ml-gs-forecast-results.jpg["View a forecast from the Single Metric Viewer"] - -The yellow line in the chart represents the predicted data values. The shaded -yellow area represents the bounds for the predicted values, which also gives an -indication of the confidence of the predictions. Note that the bounds generally -increase with time (that is to say, the confidence levels decrease), since you -are forecasting further into the future. Eventually if the confidence levels are -too low, the forecast stops. --- - -. Optional: Compare the forecast to actual data. + -+ --- -You can try this with the sample data by choosing a subset of the data when you -create the job, as described in <>. Create the forecast then process -the remaining data, as described in <>. --- - -.. After you restart the {dfeed}, re-open the forecast by selecting the job in -the **Single Metric Viewer**, clicking **Forecast**, and selecting your forecast -from the list. For example: + -+ --- -[role="screenshot"] -image::images/ml-gs-forecast-open.jpg["Open a forecast in the Single Metric Viewer"] --- - -.. View the forecast and actual data in the **Single Metric Viewer**: + -+ --- -[role="screenshot"] -image::images/ml-gs-forecast-actual.jpg["View a forecast over actual data in the Single Metric Viewer"] - -The chart contains the actual data values, the bounds for the expected values, -the anomalies, the forecast data values, and the bounds for the forecast. This -combination of actual and forecast data gives you an indication of how well the -{xpack} {ml} features can extrapolate the future behavior of the data. --- - -Now that you have seen how easy it is to create forecasts with the sample data, -consider what type of events you might want to predict in your own data. For -more information and ideas, as well as a list of limitations related to -forecasts, see <>. diff --git a/x-pack/docs/en/ml/getting-started-multi.asciidoc b/x-pack/docs/en/ml/getting-started-multi.asciidoc deleted file mode 100644 index 804abacc605..00000000000 --- a/x-pack/docs/en/ml/getting-started-multi.asciidoc +++ /dev/null @@ -1,211 +0,0 @@ -[[ml-gs-multi-jobs]] -=== Creating Multi-metric Jobs - -The multi-metric job wizard in {kib} provides a simple way to create more -complex jobs with multiple detectors. For example, in the single metric job, you -were tracking total requests versus time. You might also want to track other -metrics like average response time or the maximum number of denied requests. -Instead of creating jobs for each of those metrics, you can combine them in a -multi-metric job. - -You can also use multi-metric jobs to split a single time series into multiple -time series based on a categorical field. For example, you can split the data -based on its hostnames, locations, or users. Each time series is modeled -independently. By looking at temporal patterns on a per entity basis, you might -spot things that might have otherwise been hidden in the lumped view. - -Conceptually, you can think of this as running many independent single metric -jobs. By bundling them together in a multi-metric job, however, you can see an -overall score and shared influencers for all the metrics and all the entities in -the job. Multi-metric jobs therefore scale better than having many independent -single metric jobs and provide better results when you have influencers that are -shared across the detectors. - -The sample data for this tutorial contains information about the requests that -are received by various applications and services in a system. Let's assume that -you want to monitor the requests received and the response time. In particular, -you might want to track those metrics on a per service basis to see if any -services have unusual patterns. - -To create a multi-metric job in {kib}: - -. Open {kib} in your web browser and log in. If you are running {kib} locally, -go to `http://localhost:5601/`. - -. Click **Machine Learning** in the side navigation, then click **Create new job**. - -. Select the index pattern that you created for the sample data. For example, -`server-metrics*`. - -. In the **Use a wizard** section, click **Multi metric**. - -. Configure the job by providing the following job settings: + -+ --- -[role="screenshot"] -image::images/ml-gs-multi-job.jpg["Create a new job from the server-metrics index"] --- - -.. For the **Fields**, select `high mean(response)` and `sum(total)`. This -creates two detectors and specifies the analysis function and field that each -detector uses. The first detector uses the high mean function to detect -unusually high average values for the `response` field in each bucket. The -second detector uses the sum function to detect when the sum of the `total` -field is anomalous in each bucket. For more information about any of the -analytical functions, see <>. - -.. For the **Bucket span**, enter `10m`. This value specifies the size of the -interval that the analysis is aggregated into. As was the case in the single -metric example, this value has a significant impact on the analysis. When you're -creating jobs for your own data, you might need to experiment with different -bucket spans depending on the frequency of the input data, the duration of -typical anomalies, and the frequency at which alerting is required. - -.. For the **Split Data**, select `service`. When you specify this -option, the analysis is segmented such that you have completely independent -baselines for each distinct value of this field. -//TBD: What is the importance of having separate baselines? -There are seven unique service keyword values in the sample data. Thus for each -of the seven services, you will see the high mean response metrics and sum -total metrics. + -+ --- -NOTE: If you are creating a job by using the {ml} APIs or the advanced job -wizard in {kib}, you can accomplish this split by using the -`partition_field_name` property. - --- - -.. For the **Key Fields (Influencers)**, select `host`. Note that the `service` field -is also automatically selected because you used it to split the data. These key -fields are also known as _influencers_. -When you identify a field as an influencer, you are indicating that you think -it contains information about someone or something that influences or -contributes to anomalies. -+ --- -[TIP] -======================== -Picking an influencer is strongly recommended for the following reasons: - -* It allows you to more easily assign blame for the anomaly -* It simplifies and aggregates the results - -The best influencer is the person or thing that you want to blame for the -anomaly. In many cases, users or client IP addresses make excellent influencers. -Influencers can be any field in your data; they do not need to be fields that -are specified in your detectors, though they often are. - -As a best practice, do not pick too many influencers. For example, you generally -do not need more than three. If you pick many influencers, the results can be -overwhelming and there is a small overhead to the analysis. - -======================== -//TBD: Is this something you can determine later from looking at results and -//update your job with if necessary? Is it all post-processing or does it affect -//the ongoing modeling? --- - -. Click **Use full server-metrics* data**. Two graphs are generated for each -`service` value, which represent the high mean `response` values and -sum `total` values over time. For example: -+ --- -[role="screenshot"] -image::images/ml-gs-job2-split.jpg["Kibana charts for data split by service"] --- - -. Provide a name for the job, for example `response_requests_by_app`. The job -name must be unique in your cluster. You can also optionally provide a -description of the job. - -. Click **Create Job**. - -When the job is created, you can choose to view the results, continue the job in -real-time, and create a watch. In this tutorial, we will proceed to view the -results. - -TIP: The `create_multi_metic.sh` script creates a similar job and {dfeed} by -using the {ml} APIs. You can download that script by clicking -here: https://download.elastic.co/demos/machine_learning/gettingstarted/create_multi_metric.sh[create_multi_metric.sh] -For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. - -[[ml-gs-job2-analyze]] -=== Exploring Multi-metric Job Results - -The {xpackml} features analyze the input stream of data, model its behavior, and -perform analysis based on the two detectors you defined in your job. When an -event occurs outside of the model, that event is identified as an anomaly. - -You can use the **Anomaly Explorer** in {kib} to view the analysis results: - -[role="screenshot"] -image::images/ml-gs-job2-explorer.jpg["Job results in the Anomaly Explorer"] - -You can explore the overall anomaly time line, which shows the maximum anomaly -score for each section in the specified time period. You can change the time -period by using the time picker in the {kib} toolbar. Note that the sections in -this time line do not necessarily correspond to the bucket span. If you change -the time period, the sections change size too. The smallest possible size for -these sections is a bucket. If you specify a large time period, the sections can -span many buckets. - -On the left is a list of the top influencers for all of the detected anomalies -in that same time period. The list includes maximum anomaly scores, which in -this case are aggregated for each influencer, for each bucket, across all -detectors. There is also a total sum of the anomaly scores for each influencer. -You can use this list to help you narrow down the contributing factors and focus -on the most anomalous entities. - -If your job contains influencers, you can also explore swim lanes that -correspond to the values of an influencer. In this example, the swim lanes -correspond to the values for the `service` field that you used to split the data. -Each lane represents a unique application or service name. Since you specified -the `host` field as an influencer, you can also optionally view the results in -swim lanes for each host name: - -[role="screenshot"] -image::images/ml-gs-job2-explorer-host.jpg["Job results sorted by host"] - -By default, the swim lanes are ordered by their maximum anomaly score values. -You can click on the sections in the swim lane to see details about the -anomalies that occurred in that time interval. - -NOTE: The anomaly scores that you see in each section of the **Anomaly Explorer** -might differ slightly. This disparity occurs because for each job we generate -bucket results, influencer results, and record results. Anomaly scores are -generated for each type of result. The anomaly timeline uses the bucket-level -anomaly scores. The list of top influencers uses the influencer-level anomaly -scores. The list of anomalies uses the record-level anomaly scores. For more -information about these different result types, see -{ref}/ml-results-resource.html[Results Resources]. - -Click on a section in the swim lanes to obtain more information about the -anomalies in that time period. For example, click on the red section in the swim -lane for `server_2`: - -[role="screenshot"] -image::images/ml-gs-job2-explorer-anomaly.jpg["Job results for an anomaly"] - -You can see exact times when anomalies occurred and which detectors or metrics -caught the anomaly. Also note that because you split the data by the `service` -field, you see separate charts for each applicable service. In particular, you -see charts for each service for which there is data on the specified host in the -specified time interval. - -Below the charts, there is a table that provides more information, such as the -typical and actual values and the influencers that contributed to the anomaly. - -[role="screenshot"] -image::images/ml-gs-job2-explorer-table.jpg["Job results table"] - -Notice that there are anomalies for both detectors, that is to say for both the -`high_mean(response)` and the `sum(total)` metrics in this time interval. The -table aggregates the anomalies to show the highest severity anomaly per detector -and entity, which is the by, over, or partition field value that is displayed -in the **found for** column. To view all the anomalies without any aggregation, -set the **Interval** to `Show all`. - -By -investigating multiple metrics in a single job, you might see relationships -between events in your data that would otherwise be overlooked. diff --git a/x-pack/docs/en/ml/getting-started-next.asciidoc b/x-pack/docs/en/ml/getting-started-next.asciidoc deleted file mode 100644 index 90d1e7798ee..00000000000 --- a/x-pack/docs/en/ml/getting-started-next.asciidoc +++ /dev/null @@ -1,55 +0,0 @@ -[[ml-gs-next]] -=== Next Steps - -By completing this tutorial, you've learned how you can detect anomalous -behavior in a simple set of sample data. You created single and multi-metric -jobs in {kib}, which creates and opens jobs and creates and starts {dfeeds} for -you under the covers. You examined the results of the {ml} analysis in the -**Single Metric Viewer** and **Anomaly Explorer** in {kib}. You also -extrapolated the future behavior of a job by creating a forecast. - -If you want to learn about advanced job options, you might be interested in -the following video tutorial: -https://www.elastic.co/videos/machine-learning-lab-3-detect-outliers-in-a-population[Machine Learning Lab 3 - Detect Outliers in a Population]. - -If you intend to use {ml} APIs in your applications, a good next step might be -to learn about the APIs by retrieving information about these sample jobs. -For example, the following APIs retrieve information about the jobs and {dfeeds}. - -[source,js] --------------------------------------------------- -GET _xpack/ml/anomaly_detectors - -GET _xpack/ml/datafeeds --------------------------------------------------- -// CONSOLE - -For more information about the {ml} APIs, see <>. - -Ultimately, the next step is to start applying {ml} to your own data. -As mentioned in <>, there are three things to consider when you're -thinking about where {ml} will be most impactful: - -. It must be time series data. -. It should be information that contains key performance indicators for the -health, security, or success of your business or system. The better you know the -data, the quicker you will be able to create jobs that generate useful -insights. -. Ideally, the data is located in {es} and you can therefore create a {dfeed} -that retrieves data in real time. If your data is outside of {es}, you -cannot use {kib} to create your jobs and you cannot use {dfeeds}. Machine -learning analysis is still possible, however, by using APIs to create and manage -jobs and to post data to them. - -Once you have decided which data to analyze, you can start considering which -analysis functions you want to use. For more information, see <>. - -In general, it is a good idea to start with single metric jobs for your -key performance indicators. After you examine these simple analysis results, -you will have a better idea of what the influencers might be. You can create -multi-metric jobs and split the data or create more complex analysis functions -as necessary. For examples of more complicated configuration options, see -<>. - -If you encounter problems, we're here to help. See <> and -<>. diff --git a/x-pack/docs/en/ml/getting-started-single.asciidoc b/x-pack/docs/en/ml/getting-started-single.asciidoc deleted file mode 100644 index 3befdbaf34d..00000000000 --- a/x-pack/docs/en/ml/getting-started-single.asciidoc +++ /dev/null @@ -1,331 +0,0 @@ -[[ml-gs-jobs]] -=== Creating Single Metric Jobs - -At this point in the tutorial, the goal is to detect anomalies in the -total requests received by your applications and services. The sample data -contains a single key performance indicator(KPI) to track this, which is the total -requests over time. It is therefore logical to start by creating a single metric -job for this KPI. - -TIP: If you are using aggregated data, you can create an advanced job -and configure it to use a `summary_count_field_name`. The {ml} algorithms will -make the best possible use of summarized data in this case. For simplicity, in -this tutorial we will not make use of that advanced functionality. For more -information, see <>. - -A single metric job contains a single _detector_. A detector defines the type of -analysis that will occur (for example, `max`, `average`, or `rare` analytical -functions) and the fields that will be analyzed. - -To create a single metric job in {kib}: - -. Open {kib} in your web browser and log in. If you are running {kib} locally, -go to `http://localhost:5601/`. - -. Click **Machine Learning** in the side navigation. - -. Click **Create new job**. - -. Select the index pattern that you created for the sample data. For example, -`server-metrics*`. - -. In the **Use a wizard** section, click **Single metric**. - -. Configure the job by providing the following information: + -+ --- -[role="screenshot"] -image::images/ml-gs-single-job.jpg["Create a new job from the server-metrics index"] --- - -.. For the **Aggregation**, select `Sum`. This value specifies the analysis -function that is used. -+ --- -Some of the analytical functions look for single anomalous data points. For -example, `max` identifies the maximum value that is seen within a bucket. -Others perform some aggregation over the length of the bucket. For example, -`mean` calculates the mean of all the data points seen within the bucket. -Similarly, `count` calculates the total number of data points within the bucket. -In this tutorial, you are using the `sum` function, which calculates the sum of -the specified field's values within the bucket. For descriptions of all the -functions, see <>. --- - -.. For the **Field**, select `total`. This value specifies the field that -the detector uses in the function. -+ --- -NOTE: Some functions such as `count` and `rare` do not require fields. --- - -.. For the **Bucket span**, enter `10m`. This value specifies the size of the -interval that the analysis is aggregated into. -+ --- -The {xpackml} features use the concept of a bucket to divide up the time series -into batches for processing. For example, if you are monitoring -the total number of requests in the system, -using a bucket span of 1 hour would mean that at the end of each hour, it -calculates the sum of the requests for the last hour and computes the -anomalousness of that value compared to previous hours. - -The bucket span has two purposes: it dictates over what time span to look for -anomalous features in data, and also determines how quickly anomalies can be -detected. Choosing a shorter bucket span enables anomalies to be detected more -quickly. However, there is a risk of being too sensitive to natural variations -or noise in the input data. Choosing too long a bucket span can mean that -interesting anomalies are averaged away. There is also the possibility that the -aggregation might smooth out some anomalies based on when the bucket starts -in time. - -The bucket span has a significant impact on the analysis. When you're trying to -determine what value to use, take into account the granularity at which you -want to perform the analysis, the frequency of the input data, the duration of -typical anomalies, and the frequency at which alerting is required. --- - -. Determine whether you want to process all of the data or only part of it. If -you want to analyze all of the existing data, click -**Use full server-metrics* data**. If you want to see what happens when you -stop and start {dfeeds} and process additional data over time, click the time -picker in the {kib} toolbar. Since the sample data spans a period of time -between March 23, 2017 and April 22, 2017, click **Absolute**. Set the start -time to March 23, 2017 and the end time to April 1, 2017, for example. Once -you've got the time range set up, click the **Go** button. + -+ --- -[role="screenshot"] -image::images/ml-gs-job1-time.jpg["Setting the time range for the {dfeed}"] --- -+ --- -A graph is generated, which represents the total number of requests over time. - -Note that the **Estimate bucket span** option is no longer greyed out in the -**Buck span** field. This is an experimental feature that you can use to help -determine an appropriate bucket span for your data. For the purposes of this -tutorial, we will leave the bucket span at 10 minutes. --- - -. Provide a name for the job, for example `total-requests`. The job name must -be unique in your cluster. You can also optionally provide a description of the -job and create a job group. - -. Click **Create Job**. + -+ --- -[role="screenshot"] -image::images/ml-gs-job1.jpg["A graph of the total number of requests over time"] --- - -As the job is created, the graph is updated to give a visual representation of -the progress of {ml} as the data is processed. This view is only available whilst the -job is running. - -When the job is created, you can choose to view the results, continue the job -in real-time, and create a watch. In this tutorial, we will look at how to -manage jobs and {dfeeds} before we view the results. - -TIP: The `create_single_metic.sh` script creates a similar job and {dfeed} by -using the {ml} APIs. You can download that script by clicking -here: https://download.elastic.co/demos/machine_learning/gettingstarted/create_single_metric.sh[create_single_metric.sh] -For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. - -[[ml-gs-job1-manage]] -=== Managing Jobs - -After you create a job, you can see its status in the **Job Management** tab: + - -[role="screenshot"] -image::images/ml-gs-job1-manage1.jpg["Status information for the total-requests job"] - -The following information is provided for each job: - -Job ID:: -The unique identifier for the job. - -Description:: -The optional description of the job. - -Processed records:: -The number of records that have been processed by the job. - -Memory status:: -The status of the mathematical models. When you create jobs by using the APIs or -by using the advanced options in {kib}, you can specify a `model_memory_limit`. -That value is the maximum amount of memory resources that the mathematical -models can use. Once that limit is approached, data pruning becomes more -aggressive. Upon exceeding that limit, new entities are not modeled. For more -information about this setting, see -{ref}/ml-job-resource.html#ml-apilimits[Analysis Limits]. The memory status -field reflects whether you have reached or exceeded the model memory limit. It -can have one of the following values: + -`ok`::: The models stayed below the configured value. -`soft_limit`::: The models used more than 60% of the configured memory limit -and older unused models will be pruned to free up space. -`hard_limit`::: The models used more space than the configured memory limit. -As a result, not all incoming data was processed. - -Job state:: -The status of the job, which can be one of the following values: + -`opened`::: The job is available to receive and process data. -`closed`::: The job finished successfully with its model state persisted. -The job must be opened before it can accept further data. -`closing`::: The job close action is in progress and has not yet completed. -A closing job cannot accept further data. -`failed`::: The job did not finish successfully due to an error. -This situation can occur due to invalid input data. -If the job had irrevocably failed, it must be force closed and then deleted. -If the {dfeed} can be corrected, the job can be closed and then re-opened. - -{dfeed-cap} state:: -The status of the {dfeed}, which can be one of the following values: + -started::: The {dfeed} is actively receiving data. -stopped::: The {dfeed} is stopped and will not receive data until it is -re-started. - -Latest timestamp:: -The timestamp of the last processed record. - - -If you click the arrow beside the name of job, you can show or hide additional -information, such as the settings, configuration information, or messages for -the job. - -You can also click one of the **Actions** buttons to start the {dfeed}, edit -the job or {dfeed}, and clone or delete the job, for example. - -[float] -[[ml-gs-job1-datafeed]] -==== Managing {dfeeds-cap} - -A {dfeed} can be started and stopped multiple times throughout its lifecycle. -If you want to retrieve more data from {es} and the {dfeed} is stopped, you must -restart it. - -For example, if you did not use the full data when you created the job, you can -now process the remaining data by restarting the {dfeed}: - -. In the **Machine Learning** / **Job Management** tab, click the following -button to start the {dfeed}: image:images/ml-start-feed.jpg["Start {dfeed}"] - - -. Choose a start time and end time. For example, -click **Continue from 2017-04-01 23:59:00** and select **2017-04-30** as the -search end time. Then click **Start**. The date picker defaults to the latest -timestamp of processed data. Be careful not to leave any gaps in the analysis, -otherwise you might miss anomalies. + -+ --- -[role="screenshot"] -image::images/ml-gs-job1-datafeed.jpg["Restarting a {dfeed}"] --- - -The {dfeed} state changes to `started`, the job state changes to `opened`, -and the number of processed records increases as the new data is analyzed. The -latest timestamp information also increases. - -TIP: If your data is being loaded continuously, you can continue running the job -in real time. For this, start your {dfeed} and select **No end time**. - -If you want to stop the {dfeed} at this point, you can click the following -button: image:images/ml-stop-feed.jpg["Stop {dfeed}"] - -Now that you have processed all the data, let's start exploring the job results. - -[[ml-gs-job1-analyze]] -=== Exploring Single Metric Job Results - -The {xpackml} features analyze the input stream of data, model its behavior, -and perform analysis based on the detectors you defined in your job. When an -event occurs outside of the model, that event is identified as an anomaly. - -Result records for each anomaly are stored in `.ml-anomalies-*` indices in {es}. -By default, the name of the index where {ml} results are stored is labelled -`shared`, which corresponds to the `.ml-anomalies-shared` index. - -You can use the **Anomaly Explorer** or the **Single Metric Viewer** in {kib} to -view the analysis results. - -Anomaly Explorer:: - This view contains swim lanes showing the maximum anomaly score over time. - There is an overall swim lane that shows the overall score for the job, and - also swim lanes for each influencer. By selecting a block in a swim lane, the - anomaly details are displayed alongside the original source data (where - applicable). - -Single Metric Viewer:: - This view contains a chart that represents the actual and expected values over - time. This is only available for jobs that analyze a single time series and - where `model_plot_config` is enabled. As in the **Anomaly Explorer**, anomalous - data points are shown in different colors depending on their score. - -By default when you view the results for a single metric job, the -**Single Metric Viewer** opens: -[role="screenshot"] -image::images/ml-gs-job1-analysis.jpg["Single Metric Viewer for total-requests job"] - - -The blue line in the chart represents the actual data values. The shaded blue -area represents the bounds for the expected values. The area between the upper -and lower bounds are the most likely values for the model. If a value is outside -of this area then it can be said to be anomalous. - -If you slide the time selector from the beginning of the data to the end of the -data, you can see how the model improves as it processes more data. At the -beginning, the expected range of values is pretty broad and the model is not -capturing the periodicity in the data. But it quickly learns and begins to -reflect the daily variation. - -Any data points outside the range that was predicted by the model are marked -as anomalies. When you have high volumes of real-life data, many anomalies -might be found. These vary in probability from very likely to highly unlikely, -that is to say, from not particularly anomalous to highly anomalous. There -can be none, one or two or tens, sometimes hundreds of anomalies found within -each bucket. There can be many thousands found per job. In order to provide -a sensible view of the results, an _anomaly score_ is calculated for each bucket -time interval. The anomaly score is a value from 0 to 100, which indicates -the significance of the observed anomaly compared to previously seen anomalies. -The highly anomalous values are shown in red and the low scored values are -indicated in blue. An interval with a high anomaly score is significant and -requires investigation. - -Slide the time selector to a section of the time series that contains a red -anomaly data point. If you hover over the point, you can see more information -about that data point. You can also see details in the **Anomalies** section -of the viewer. For example: -[role="screenshot"] -image::images/ml-gs-job1-anomalies.jpg["Single Metric Viewer Anomalies for total-requests job"] - -For each anomaly you can see key details such as the time, the actual and -expected ("typical") values, and their probability. - -By default, the table contains all anomalies that have a severity of "warning" -or higher in the selected section of the timeline. If you are only interested in -critical anomalies, for example, you can change the severity threshold for this -table. - -The anomalies table also automatically calculates an interval for the data in -the table. If the time difference between the earliest and latest records in the -table is less than two days, the data is aggregated by hour to show the details -of the highest severity anomaly for each detector. Otherwise, it is -aggregated by day. You can change the interval for the table, for example, to -show all anomalies. - -You can see the same information in a different format by using the -**Anomaly Explorer**: -[role="screenshot"] -image::images/ml-gs-job1-explorer.jpg["Anomaly Explorer for total-requests job"] - - -Click one of the red sections in the swim lane to see details about the anomalies -that occurred in that time interval. For example: -[role="screenshot"] -image::images/ml-gs-job1-explorer-anomaly.jpg["Anomaly Explorer details for total-requests job"] - -After you have identified anomalies, often the next step is to try to determine -the context of those situations. For example, are there other factors that are -contributing to the problem? Are the anomalies confined to particular -applications or servers? You can begin to troubleshoot these situations by -layering additional jobs or creating multi-metric jobs. diff --git a/x-pack/docs/en/ml/getting-started-wizards.asciidoc b/x-pack/docs/en/ml/getting-started-wizards.asciidoc deleted file mode 100644 index 2eb6b5c2904..00000000000 --- a/x-pack/docs/en/ml/getting-started-wizards.asciidoc +++ /dev/null @@ -1,99 +0,0 @@ -[[ml-gs-wizards]] -=== Creating Jobs in {kib} -++++ -Creating Jobs -++++ - -Machine learning jobs contain the configuration information and metadata -necessary to perform an analytical task. They also contain the results of the -analytical task. - -[NOTE] --- -This tutorial uses {kib} to create jobs and view results, but you can -alternatively use APIs to accomplish most tasks. -For API reference information, see {ref}/ml-apis.html[Machine Learning APIs]. - -The {xpackml} features in {kib} use pop-ups. You must configure your -web browser so that it does not block pop-up windows or create an -exception for your {kib} URL. --- - -{kib} provides wizards that help you create typical {ml} jobs. For example, you -can use wizards to create single metric, multi-metric, population, and advanced -jobs. - -To see the job creation wizards: - -. Open {kib} in your web browser and log in. If you are running {kib} locally, -go to `http://localhost:5601/`. - -. Click **Machine Learning** in the side navigation. - -. Click **Create new job**. - -. Click the `server-metrics*` index pattern. - -You can then choose from a list of job wizards. For example: - -[role="screenshot"] -image::images/ml-create-job.jpg["Job creation wizards in {kib}"] - -If you are not certain which wizard to use, there is also a **Data Visualizer** -that can help you explore the fields in your data. - -To learn more about the sample data: - -. Click **Data Visualizer**. + -+ --- -[role="screenshot"] -image::images/ml-data-visualizer.jpg["Data Visualizer in {kib}"] --- - -. Select a time period that you're interested in exploring by using the time -picker in the {kib} toolbar. Alternatively, click -**Use full server-metrics* data** to view data over the full time range. In this -sample data, the documents relate to March and April 2017. - -. Optional: Change the number of documents per shard that are used in the -visualizations. There is a relatively small number of documents in the sample -data, so you can choose a value of `all`. For larger data sets, keep in mind -that using a large sample size increases query run times and increases the load -on the cluster. - -[role="screenshot"] -image::images/ml-data-metrics.jpg["Data Visualizer output for metrics in {kib}"] - -The fields in the indices are listed in two sections. The first section contains -the numeric ("metric") fields. The second section contains non-metric fields -(such as `keyword`, `text`, `date`, `boolean`, `ip`, and `geo_point` data types). - -For metric fields, the **Data Visualizer** indicates how many documents contain -the field in the selected time period. It also provides information about the -minimum, median, and maximum values, the number of distinct values, and their -distribution. You can use the distribution chart to get a better idea of how -the values in the data are clustered. Alternatively, you can view the top values -for metric fields. For example: - -[role="screenshot"] -image::images/ml-data-topmetrics.jpg["Data Visualizer output for top values in {kib}"] - -For date fields, the **Data Visualizer** provides the earliest and latest field -values and the number and percentage of documents that contain the field -during the selected time period. For example: - -[role="screenshot"] -image::images/ml-data-dates.jpg["Data Visualizer output for date fields in {kib}"] - -For keyword fields, the **Data Visualizer** provides the number of distinct -values, a list of the top values, and the number and percentage of documents -that contain the field during the selected time period. For example: - -[role="screenshot"] -image::images/ml-data-keywords.jpg["Data Visualizer output for date fields in {kib}"] - -In this tutorial, you will create single and multi-metric jobs that use the -`total`, `response`, `service`, and `host` fields. Though there is an option to -create an advanced job directly from the **Data Visualizer**, we will use the -single and multi-metric job creation wizards instead. diff --git a/x-pack/docs/en/ml/getting-started.asciidoc b/x-pack/docs/en/ml/getting-started.asciidoc deleted file mode 100644 index 0f1b7164d4a..00000000000 --- a/x-pack/docs/en/ml/getting-started.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -[[ml-getting-started]] -== Getting started with machine learning -++++ -Getting started -++++ - -Ready to get some hands-on experience with the {xpackml} features? This -tutorial shows you how to: - -* Load a sample data set into {es} -* Create single and multi-metric {ml} jobs in {kib} -* Use the results to identify possible anomalies in the data - -At the end of this tutorial, you should have a good idea of what {ml} is and -will hopefully be inspired to use it to detect anomalies in your own data. - -You might also be interested in these video tutorials, which use the same sample -data: - -* https://www.elastic.co/videos/machine-learning-tutorial-creating-a-single-metric-job[Machine Learning for the Elastic Stack: Creating a single metric job] -* https://www.elastic.co/videos/machine-learning-tutorial-creating-a-multi-metric-job[Machine Learning for the Elastic Stack: Creating a multi-metric job] - - -[float] -[[ml-gs-sysoverview]] -=== System Overview - -To follow the steps in this tutorial, you will need the following -components of the Elastic Stack: - -* {es} {version}, which stores the data and the analysis results -* {kib} {version}, which provides a helpful user interface for creating and -viewing jobs - -See the https://www.elastic.co/support/matrix[Elastic Support Matrix] for -information about supported operating systems. - -See {stack-ref}/installing-elastic-stack.html[Installing the Elastic Stack] for -information about installing each of the components. - -NOTE: To get started, you can install {es} and {kib} on a -single VM or even on your laptop (requires 64-bit OS). -As you add more data and your traffic grows, -you'll want to replace the single {es} instance with a cluster. - -By default, when you install {es} and {kib}, {xpack} is installed and the -{ml} features are enabled. You cannot use {ml} with the free basic license, but -you can try all of the {xpack} features with a <>. - -If you have multiple nodes in your cluster, you can optionally dedicate nodes to -specific purposes. If you want to control which nodes are -_machine learning nodes_ or limit which nodes run resource-intensive -activity related to jobs, see -{ref}/modules-node.html#modules-node-xpack[{ml} node settings]. - -[float] -[[ml-gs-users]] -==== Users, Roles, and Privileges - -The {xpackml} features implement cluster privileges and built-in roles to -make it easier to control which users have authority to view and manage the jobs, -{dfeeds}, and results. - -By default, you can perform all of the steps in this tutorial by using the -built-in `elastic` super user. However, the password must be set before the user -can do anything. For information about how to set that password, see -<>. - -If you are performing these steps in a production environment, take extra care -because `elastic` has the `superuser` role and you could inadvertently make -significant changes to the system. You can alternatively assign the -`machine_learning_admin` and `kibana_user` roles to a user ID of your choice. - -For more information, see <> and <>. - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-data.asciidoc -include::getting-started-data.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-wizards.asciidoc -include::getting-started-wizards.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-single.asciidoc -include::getting-started-single.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-multi.asciidoc -include::getting-started-multi.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-forecast.asciidoc -include::getting-started-forecast.asciidoc[] - -:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/ml/getting-started-next.asciidoc -include::getting-started-next.asciidoc[] diff --git a/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg b/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg deleted file mode 100644 index 446dce79727..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-aggregations.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-duration.jpg b/x-pack/docs/en/ml/images/ml-gs-duration.jpg deleted file mode 100644 index 0e93b3f4ccd..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-duration.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg deleted file mode 100644 index 6733b6e3477..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-forecast-actual.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg deleted file mode 100644 index e654c9e7804..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-forecast-open.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg deleted file mode 100644 index f6911b41939..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-forecast-results.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-forecast.jpg b/x-pack/docs/en/ml/images/ml-gs-forecast.jpg deleted file mode 100644 index eeb8923b412..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-forecast.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg deleted file mode 100644 index 9b34c916c80..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-analysis.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg deleted file mode 100644 index d0d77827c90..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-anomalies.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg deleted file mode 100644 index aa36b5f13ea..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-datafeed.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg deleted file mode 100644 index 9e6c76a5518..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-explorer-anomaly.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg deleted file mode 100644 index bb436a72e50..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-explorer.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg deleted file mode 100644 index a2cba454e9d..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-manage1.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg deleted file mode 100644 index 0b04fec0e2d..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-results.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg b/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg deleted file mode 100644 index 9cecf7e8b54..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1-time.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job1.jpg b/x-pack/docs/en/ml/images/ml-gs-job1.jpg deleted file mode 100644 index 7251bfc3f6b..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job1.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg deleted file mode 100644 index f7579dd338f..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-anomaly.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg deleted file mode 100644 index cfe3f4fba6d..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-host.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg deleted file mode 100644 index cb3b8205bc8..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job2-explorer-table.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg deleted file mode 100644 index 20809aa3d1b..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job2-explorer.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg b/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg deleted file mode 100644 index 4e07b865532..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-job2-split.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg b/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg deleted file mode 100644 index 03bb6ae1196..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-multi-job.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/images/ml-gs-single-job.jpg b/x-pack/docs/en/ml/images/ml-gs-single-job.jpg deleted file mode 100644 index 5d813444db9..00000000000 Binary files a/x-pack/docs/en/ml/images/ml-gs-single-job.jpg and /dev/null differ diff --git a/x-pack/docs/en/ml/limitations.asciidoc b/x-pack/docs/en/ml/limitations.asciidoc deleted file mode 100644 index 1efe6b19027..00000000000 --- a/x-pack/docs/en/ml/limitations.asciidoc +++ /dev/null @@ -1,198 +0,0 @@ -[[ml-limitations]] -== Machine Learning Limitations - -The following limitations and known problems apply to the {version} release of -{xpack}: - -[float] -=== Categorization uses English dictionary words -//See x-pack-elasticsearch/#3021 -Categorization identifies static parts of unstructured logs and groups similar -messages together. The default categorization tokenizer assumes English language -log messages. For other languages you must define a different -`categorization_analyzer` for your job. For more information, see -<>. - -Additionally, a dictionary used to influence the categorization process contains -only English words. This means categorization might work better in English than -in other languages. The ability to customize the dictionary will be added in a -future release. - -[float] -=== Pop-ups must be enabled in browsers -//See x-pack-elasticsearch/#844 - -The {xpackml} features in {kib} use pop-ups. You must configure your -web browser so that it does not block pop-up windows or create an -exception for your {kib} URL. - -[float] -=== Anomaly Explorer omissions and limitations -//See x-pack-elasticsearch/#844 and x-pack-kibana/#1461 - -In {kib}, Anomaly Explorer charts are not displayed for anomalies -that were due to categorization, `time_of_day` functions, or `time_of_week` -functions. Those particular results do not display well as time series -charts. - -The charts are also not displayed for detectors that use script fields. In that -case, the original source data cannot be easily searched because it has been -somewhat transformed by the script. - -The Anomaly Explorer charts can also look odd in circumstances where there -is very little data to plot. For example, if there is only one data point, it is -represented as a single dot. If there are only two data points, they are joined -by a line. - -[float] -=== Jobs close on the {dfeed} end date -//See x-pack-elasticsearch/#1037 - -If you start a {dfeed} and specify an end date, it will close the job when -the {dfeed} stops. This behavior avoids having numerous open one-time jobs. - -If you do not specify an end date when you start a {dfeed}, the job -remains open when you stop the {dfeed}. This behavior avoids the overhead -of closing and re-opening large jobs when there are pauses in the {dfeed}. - -[float] -=== Jobs created in {kib} must use {dfeeds} - -If you create jobs in {kib}, you must use {dfeeds}. If the data that you want to -analyze is not stored in {es}, you cannot use {dfeeds} and therefore you cannot -create your jobs in {kib}. You can, however, use the {ml} APIs to create jobs -and to send batches of data directly to the jobs. For more information, see -<> and <>. - -[float] -=== Post data API requires JSON format - -The post data API enables you to send data to a job for analysis. The data that -you send to the job must use the JSON format. - -For more information about this API, see -{ref}/ml-post-data.html[Post Data to Jobs]. - - -[float] -=== Misleading high missing field counts -//See x-pack-elasticsearch/#684 - -One of the counts associated with a {ml} job is `missing_field_count`, -which indicates the number of records that are missing a configured field. -//This information is most useful when your job analyzes CSV data. In this case, -//missing fields indicate data is not being analyzed and you might receive poor results. - -Since jobs analyze JSON data, the `missing_field_count` might be misleading. -Missing fields might be expected due to the structure of the data and therefore -do not generate poor results. - -For more information about `missing_field_count`, -see {ref}/ml-jobstats.html#ml-datacounts[Data Counts Objects]. - - -[float] -=== Terms aggregation size affects data analysis -//See x-pack-elasticsearch/#601 - -By default, the `terms` aggregation returns the buckets for the top ten terms. -You can change this default behavior by setting the `size` parameter. - -If you are send pre-aggregated data to a job for analysis, you must ensure -that the `size` is configured correctly. Otherwise, some data might not be -analyzed. - - -[float] -=== Time-based index patterns are not supported -//See x-pack-elasticsearch/#1910 - -It is not possible to create an {xpackml} analysis job that uses time-based -index patterns, for example `[logstash-]YYYY.MM.DD`. -This applies to the single metric or multi metric job creation wizards in {kib}. - - -[float] -=== Fields named "by", "count", or "over" cannot be used to split data -//See x-pack-elasticsearch/#858 - -You cannot use the following field names in the `by_field_name` or -`over_field_name` properties in a job: `by`; `count`; `over`. This limitation -also applies to those properties when you create advanced jobs in {kib}. - - -[float] -=== Jobs created in {kib} use model plot config and pre-aggregated data -//See x-pack-elasticsearch/#844 - -If you create single or multi-metric jobs in {kib}, it might enable some -options under the covers that you'd want to reconsider for large or -long-running jobs. - -For example, when you create a single metric job in {kib}, it generally -enables the `model_plot_config` advanced configuration option. That configuration -option causes model information to be stored along with the results and provides -a more detailed view into anomaly detection. It is specifically used by the -**Single Metric Viewer** in {kib}. When this option is enabled, however, it can -add considerable overhead to the performance of the system. If you have jobs -with many entities, for example data from tens of thousands of servers, storing -this additional model information for every bucket might be problematic. If you -are not certain that you need this option or if you experience performance -issues, edit your job configuration to disable this option. - -For more information, see -{ref}/ml-job-resource.html#ml-apimodelplotconfig[Model Plot Config]. - -Likewise, when you create a single or multi-metric job in {kib}, in some cases -it uses aggregations on the data that it retrieves from {es}. One of the -benefits of summarizing data this way is that {es} automatically distributes -these calculations across your cluster. This summarized data is then fed into -{xpackml} instead of raw results, which reduces the volume of data that must -be considered while detecting anomalies. However, if you have two jobs, one of -which uses pre-aggregated data and another that does not, their results might -differ. This difference is due to the difference in precision of the input data. -The {ml} analytics are designed to be aggregation-aware and the likely increase -in performance that is gained by pre-aggregating the data makes the potentially -poorer precision worthwhile. If you want to view or change the aggregations -that are used in your job, refer to the `aggregations` property in your {dfeed}. - -For more information, see {ref}/ml-datafeed-resource.html[Datafeed Resources]. - -[float] -=== Security Integration - -When {security} is enabled, a {dfeed} stores the roles of the user who created -or updated the {dfeed} **at that time**. This means that if those roles are -updated then the {dfeed} subsequently runs with the new permissions that are -associated with the roles. However, if the user's roles are adjusted after -creating or updating the {dfeed}, the {dfeed} continues to run with the -permissions that were associated with the original roles. For more information, -see <>. - -[float] -=== Forecasts cannot be created for population jobs - -If you use an `over_field_name` property in your job (that is to say, it's a -_population job_), you cannot create a forecast. If you try to create a forecast -for this type of job, an error occurs. For more information about forecasts, -see <>. - -[float] -=== Forecasts cannot be created for jobs that use geographic, rare, or time functions - -If you use any of the following analytical functions in your job, you cannot -create a forecast: - -* `lat_long` -* `rare` and `freq_rare` -* `time_of_day` and `time_of_week` - -If you try to create a forecast for this type of job, an error occurs. For more -information about any of these functions, see <>. - -[float] -=== Jobs must be stopped before upgrades - -You must stop any {ml} jobs that are running before you start the upgrade -process. For more information, see <> and -{stack-ref}/upgrading-elastic-stack.html[Upgrading the Elastic Stack]. diff --git a/x-pack/docs/en/ml/troubleshooting.asciidoc b/x-pack/docs/en/ml/troubleshooting.asciidoc deleted file mode 100644 index d5244cebdae..00000000000 --- a/x-pack/docs/en/ml/troubleshooting.asciidoc +++ /dev/null @@ -1,116 +0,0 @@ -[[ml-troubleshooting]] -== {xpackml} Troubleshooting -++++ -{xpackml} -++++ - -Use the information in this section to troubleshoot common problems and find -answers for frequently asked questions. - -* <> -* <> - -To get help, see <>. - -[[ml-rollingupgrade]] -=== Machine learning features unavailable after rolling upgrade - -This problem occurs after you upgrade all of the nodes in your cluster to -{version} by using rolling upgrades. When you try to use {xpackml} features for -the first time, all attempts fail, though `GET _xpack` and `GET _xpack/usage` -indicate that {xpack} is enabled. - -*Symptoms:* - -* Errors when you click *Machine Learning* in {kib}. -For example: `Jobs list could not be created` and `An internal server error occurred`. -* Null pointer and remote transport exceptions when you run {ml} APIs such as -`GET _xpack/ml/anomaly_detectors` and `GET _xpack/ml/datafeeds`. -* Errors in the log files on the master nodes. -For example: `unable to install ml metadata upon startup` - -*Resolution:* - -After you upgrade all master-eligible nodes to {es} {version} and {xpack} -{version}, restart the current master node, which triggers the {xpackml} -features to re-initialize. - -For more information, see {ref}/rolling-upgrades.html[Rolling upgrades]. - -[[ml-mappingclash]] -=== Job creation failure due to mapping clash - -This problem occurs when you try to create a job. - -*Symptoms:* - -* Illegal argument exception occurs when you click *Create Job* in {kib} or run -the create job API. For example: -`Save failed: [status_exception] This job would cause a mapping clash -with existing field [field_name] - avoid the clash by assigning a dedicated -results index` or `Save failed: [illegal_argument_exception] Can't merge a non -object mapping [field_name] with an object mapping [field_name]`. - -*Resolution:* - -This issue typically occurs when two or more jobs store their results in the -same index and the results contain fields with the same name but different -data types or different `fields` settings. - -By default, {ml} results are stored in the `.ml-anomalies-shared` index in {es}. -To resolve this issue, click *Advanced > Use dedicated index* when you create -the job in {kib}. If you are using the create job API, specify an index name in -the `results_index_name` property. - -[[ml-jobnames]] -=== {kib} cannot display jobs with invalid characters in their name - -This problem occurs when you create a job by using the -{ref}/ml-put-job.html[Create Jobs API] then try to view that job in {kib}. In -particular, the problem occurs when you use a period(.) in the job identifier. - -*Symptoms:* - -* When you try to open a job (named, for example, `job.test` in the -**Anomaly Explorer** or the **Single Metric Viewer**, the job name is split and -the text after the period is assumed to be the job name. If a job does not exist -with that abbreviated name, an error occurs. For example: -`Warning Requested job test does not exist`. If a job exists with that -abbreviated name, it is displayed. - -*Resolution:* - -Create jobs in {kib} or ensure that you create jobs with valid identifiers when -you use the {ml} APIs. For more information about valid identifiers, see -{ref}/ml-put-job.html[Create Jobs API] or -{ref}/ml-job-resource.html[Job Resources]. - -[[ml-upgradedf]] - -=== Upgraded nodes fail to start due to {dfeed} issues - -This problem occurs when you have a {dfeed} that contains search or query -domain specific language (DSL) that was discontinued. For example, if you -created a {dfeed} query in 5.x using search syntax that was deprecated in 5.x -and removed in 6.0, you must fix the {dfeed} before you upgrade to 6.0. - -*Symptoms:* - -* If {ref}/logging.html#deprecation-logging[deprecation logging] is enabled -before the upgrade, deprecation messages are generated when the {dfeeds} attempt -to retrieve data. -* After the upgrade, nodes fail to start and the error indicates that they -failed to read the local state. - -*Resolution:* - -Before you upgrade, identify the problematic search or query DSL. In 5.6.5 and -later, the Upgrade Assistant detects these scenarios. If you cannot fix the DSL -before the upgrade, you must delete the {dfeed} then re-create it with valid DSL -after the upgrade. - -If you do not fix or delete the {dfeed} before the upgrade, in order to successfully -start the failing nodes you must downgrade the nodes then fix the problem per -above. - -See also {stack-ref}/upgrading-elastic-stack.html[Upgrading the Elastic Stack]. diff --git a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc index 99c69eeea8a..1712c88380b 100644 --- a/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc +++ b/x-pack/docs/en/monitoring/configuring-monitoring.asciidoc @@ -142,4 +142,4 @@ stored, that is to say the monitoring cluster. To grant all of the necessary per <>. include::indices.asciidoc[] -include::{xes-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file +include::{es-repo-dir}/settings/monitoring-settings.asciidoc[] \ No newline at end of file diff --git a/x-pack/docs/en/monitoring/indices.asciidoc b/x-pack/docs/en/monitoring/indices.asciidoc index 10d2c212de2..efa9836daa2 100644 --- a/x-pack/docs/en/monitoring/indices.asciidoc +++ b/x-pack/docs/en/monitoring/indices.asciidoc @@ -11,6 +11,8 @@ You can retrieve the templates through the `_template` API: ---------------------------------- GET /_template/.monitoring-* ---------------------------------- +// CONSOLE +// TEST[catch:missing] By default, the template configures one shard and one replica for the monitoring indices. To override the default settings, add your own template: @@ -36,6 +38,7 @@ PUT /_template/custom_monitoring } } ---------------------------------- +// CONSOLE IMPORTANT: Only set the `number_of_shards` and `number_of_replicas` in the settings section. Overriding other monitoring template settings could cause diff --git a/x-pack/docs/en/security/auditing/event-types.asciidoc b/x-pack/docs/en/security/auditing/event-types.asciidoc index 1a6d4b02b0c..100de2da13a 100644 --- a/x-pack/docs/en/security/auditing/event-types.asciidoc +++ b/x-pack/docs/en/security/auditing/event-types.asciidoc @@ -1,5 +1,4 @@ [role="xpack"] -[float] [[audit-event-types]] === Audit event types diff --git a/x-pack/docs/en/security/auditing/output-index.asciidoc b/x-pack/docs/en/security/auditing/output-index.asciidoc index e3ba805d715..a07bd7a8d06 100644 --- a/x-pack/docs/en/security/auditing/output-index.asciidoc +++ b/x-pack/docs/en/security/auditing/output-index.asciidoc @@ -1,5 +1,4 @@ [role="xpack"] -[float] [[audit-index]] === Index audit output @@ -36,3 +35,8 @@ xpack.security.audit.index.settings: number_of_shards: 1 number_of_replicas: 1 ---------------------------- + +NOTE: Audit events are batched for indexing so there is a lag before +events appear in the index. You can control how frequently batches of +events are pushed to the index by setting +`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. diff --git a/x-pack/docs/en/security/auditing/output-logfile.asciidoc b/x-pack/docs/en/security/auditing/output-logfile.asciidoc index 095f57cf61e..849046bdb9d 100644 --- a/x-pack/docs/en/security/auditing/output-logfile.asciidoc +++ b/x-pack/docs/en/security/auditing/output-logfile.asciidoc @@ -1,5 +1,4 @@ [role="xpack"] -[float] [[audit-log-output]] === Logfile audit output diff --git a/x-pack/docs/en/security/auditing/overview.asciidoc b/x-pack/docs/en/security/auditing/overview.asciidoc index b60122612a0..b874af3d1c4 100644 --- a/x-pack/docs/en/security/auditing/overview.asciidoc +++ b/x-pack/docs/en/security/auditing/overview.asciidoc @@ -29,12 +29,7 @@ indexing by setting `xpack.security.audit.outputs` in `elasticsearch.yml`: xpack.security.audit.outputs: [ index, logfile ] ---------------------------- -The `index` output type should be used in conjunction with the `logfile` -output type Because it is possible for the `index` output type to lose -messages if the target index is unavailable, the `access.log` should be -used as the official record of events. - -NOTE: Audit events are batched for indexing so there is a lag before -events appear in the index. You can control how frequently batches of -events are pushed to the index by setting -`xpack.security.audit.index.flush_interval` in `elasticsearch.yml`. +TIP: If you choose to enable the `index` output type, we strongly recommend that +you still use the `logfile` output as the official record of events. If the +target index is unavailable (for example, during a rolling upgrade), the `index` +output can lose messages. diff --git a/x-pack/docs/en/security/configuring-es.asciidoc b/x-pack/docs/en/security/configuring-es.asciidoc index d8ef6c2809b..5e8f1adbc7a 100644 --- a/x-pack/docs/en/security/configuring-es.asciidoc +++ b/x-pack/docs/en/security/configuring-es.asciidoc @@ -142,5 +142,5 @@ include::authentication/configuring-ldap-realm.asciidoc[] include::authentication/configuring-native-realm.asciidoc[] include::authentication/configuring-pki-realm.asciidoc[] include::authentication/configuring-saml-realm.asciidoc[] -include::{xes-repo-dir}/settings/security-settings.asciidoc[] -include::{xes-repo-dir}/settings/audit-settings.asciidoc[] +include::{es-repo-dir}/settings/security-settings.asciidoc[] +include::{es-repo-dir}/settings/audit-settings.asciidoc[] diff --git a/x-pack/docs/en/setup/images/ElasticsearchFlow.jpg b/x-pack/docs/en/setup/images/ElasticsearchFlow.jpg deleted file mode 100644 index 41fd8508c7a..00000000000 Binary files a/x-pack/docs/en/setup/images/ElasticsearchFlow.jpg and /dev/null differ diff --git a/x-pack/docs/en/setup/next-steps.asciidoc b/x-pack/docs/en/setup/next-steps.asciidoc deleted file mode 100644 index e52cdfee077..00000000000 --- a/x-pack/docs/en/setup/next-steps.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -[role="exclude"] -==== Next steps - -You now have a test {es} environment set up. Before you start -serious development or go into production with {es}, you must do some additional -setup: - -* Learn how to <>. -* Configure <>. -* Configure <>. diff --git a/x-pack/docs/en/setup/xpack-passwords.asciidoc b/x-pack/docs/en/setup/xpack-passwords.asciidoc deleted file mode 100644 index 0d9dc22f3b3..00000000000 --- a/x-pack/docs/en/setup/xpack-passwords.asciidoc +++ /dev/null @@ -1,27 +0,0 @@ - -. Set the passwords for all built-in users. -+ --- -If you have enabled {security}, it provides built-in user credentials to help you -get up and running. The +elasticsearch-setup-passwords+ command is the simplest -method to set the built-in users' passwords for the first time. - -For example, you can run the command in an "interactive" mode, which prompts you -to enter new passwords for the `elastic`, `kibana`, and `logstash_system` users: - -[source,shell] --------------------------------------------------- -bin/elasticsearch-setup-passwords interactive --------------------------------------------------- - -For more information about the command options, see <>. - -IMPORTANT: The `elasticsearch-setup-passwords` command uses a transient -bootstrap password that is no longer valid after the command runs successfully. -You cannot run the `elasticsearch-setup-passwords` command a second time. -Instead, you can update passwords from the **Management > Users** UI in {kib} or -use the security user API. - -For more information, see -{xpack-ref}/setting-up-authentication.html#set-built-in-user-passwords[Setting Built-in User Passwords]. --- diff --git a/x-pack/docs/en/setup/xpack-tls.asciidoc b/x-pack/docs/en/setup/xpack-tls.asciidoc deleted file mode 100644 index 033339b3610..00000000000 --- a/x-pack/docs/en/setup/xpack-tls.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ - -. Configure Transport Layer Security (TLS/SSL). -+ --- -If have enabled {security}, you must configure TLS for internode-communication. - -NOTE: This requirement applies to clusters with more than one node and to -clusters with a single node that listens on an external interface. Single-node -clusters that use a loopback interface do not have this requirement. For more -information, see -{xpack-ref}/encrypting-communications.html[Encrypting Communications]. - --- -.. <>. - -.. <>. diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 4a0b29c4258..ac423c42811 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -1,12 +1,8 @@ import org.elasticsearch.gradle.LoggedExec -import org.elasticsearch.gradle.MavenFilteringHack +import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.elasticsearch.gradle.test.NodeInfo import java.nio.charset.StandardCharsets -import java.nio.file.Files -import java.nio.file.Path -import java.nio.file.StandardCopyOption -import org.elasticsearch.gradle.test.RunTask; apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' @@ -17,6 +13,50 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } +subprojects { + afterEvaluate { + if (project.plugins.hasPlugin(PluginBuildPlugin)) { + // see the root Gradle file for additional logic regarding this configuration + project.configurations.create('featureAwarePlugin') + project.dependencies.add('featureAwarePlugin', project.configurations.compileClasspath) + project.dependencies.add( + 'featureAwarePlugin', + "org.elasticsearch.xpack.test:feature-aware:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + project.dependencies.add('featureAwarePlugin', project.sourceSets.main.output.getClassesDirs()) + + final Task featureAwareTask = project.tasks.create("featureAwareCheck", LoggedExec) { + description = "Runs FeatureAwareCheck on main classes." + dependsOn project.configurations.featureAwarePlugin + + final File successMarker = new File(project.buildDir, 'markers/featureAware') + outputs.file(successMarker) + + executable = new File(project.runtimeJavaHome, 'bin/java') + + // default to main class files if such a source set exists + final List files = [] + if (project.sourceSets.findByName("main")) { + files.add(project.sourceSets.main.output.classesDir) + dependsOn project.tasks.classes + } + // filter out non-existent classes directories from empty source sets + final FileCollection classDirectories = project.files(files).filter { it.exists() } + + doFirst { + args('-cp', project.configurations.featureAwarePlugin.asPath, 'org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck') + classDirectories.each { args it.getAbsolutePath() } + } + doLast { + successMarker.parentFile.mkdirs() + successMarker.setText("", 'UTF-8') + } + } + + project.precommit.dependsOn featureAwareTask + } + } +} + // https://github.com/elastic/x-plugins/issues/724 configurations { testArtifacts.extendsFrom testRuntime diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java index 894f434397c..3c6f9e66720 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTask.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xpack.core.XPackPlugin; import java.io.IOException; import java.util.Arrays; @@ -24,7 +25,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -public class ShardFollowTask implements PersistentTaskParams { +public class ShardFollowTask implements XPackPlugin.XPackPersistentTaskParams { public static final String NAME = "xpack/ccr/shard_follow_task"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java index b549cffc0cc..f615fbd0b53 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/SecurityFeatureSetUsage.java @@ -92,4 +92,8 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage { builder.field(ANONYMOUS_XFIELD, anonymousUsage); } } + + public Map getRealmsUsage() { + return Collections.unmodifiableMap(realmsUsage); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java index 234141c77c9..3e92be2ef90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Realm.java @@ -119,11 +119,11 @@ public abstract class Realm implements Comparable { */ public abstract void lookupUser(String username, ActionListener listener); - public Map usageStats() { + public void usageStats(ActionListener> listener) { Map stats = new HashMap<>(); stats.put("name", name()); stats.put("order", order()); - return stats; + listener.onResponse(stats); } @Override diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen index 67d51684520..4a192ac3b16 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.core.ssl.CertificateGenerateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat index 01f3c0f21cd..b5842b57b16 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certgen.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.core.ssl.CertificateGenerateTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.core.ssl.CertificateGenerateTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil index eb245fd0b0e..a13be812f0b 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.core.ssl.CertificateTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.core.ssl.CertificateTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat index f8a5fd9880a..2e397190f23 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-certutil.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.core.ssl.CertificateTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.core.ssl.CertificateTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate index dc3f360361d..183722d9c93 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool \ - "$@" \ No newline at end of file + "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat index 67faf2ea66a..4b8e4f926d7 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-migrate.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.security.authc.esnative.ESNativeRealmMigrateTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata index 48274ab7efa..2b2637b094d 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat index 6cdd539a81d..64a272dfbb5 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-saml-metadata.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.security.authc.saml.SamlMetadataCommand ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords index d896efcfcbe..3c5887fc675 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat index e3ea134ae43..3c956ca47ba 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-setup-passwords.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.security.authc.esnative.tool.SetupPasswordTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen index 954b0884007..a5d988945fd 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat index 570eef619ec..11414872d07 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-syskeygen.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.security.crypto.tool.SystemKeyTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users b/x-pack/plugin/security/src/main/bin/elasticsearch-users index 6caeece8cbc..9d445887fee 100755 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.file.tool.UsersTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-security-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.security.authc.file.tool.UsersTool \ "$@" diff --git a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat index 2975fbe87b9..9b35895ed86 100644 --- a/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat +++ b/x-pack/plugin/security/src/main/bin/elasticsearch-users.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.security.authc.file.tool.UsersTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-security-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.security.authc.file.tool.UsersTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java index 1be3b4cd679..ab70b8513de 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityFeatureSet.java @@ -86,7 +86,6 @@ public class SecurityFeatureSet implements XPackFeatureSet { @Override public void usage(ActionListener listener) { - Map realmsUsage = buildRealmsUsage(realms); Map sslUsage = sslUsage(settings); Map auditUsage = auditUsage(settings); Map ipFilterUsage = ipFilterUsage(ipFilter); @@ -94,10 +93,11 @@ public class SecurityFeatureSet implements XPackFeatureSet { final AtomicReference> rolesUsageRef = new AtomicReference<>(); final AtomicReference> roleMappingUsageRef = new AtomicReference<>(); - final CountDown countDown = new CountDown(2); + final AtomicReference> realmsUsageRef = new AtomicReference<>(); + final CountDown countDown = new CountDown(3); final Runnable doCountDown = () -> { if (countDown.countDown()) { - listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsage, + listener.onResponse(new SecurityFeatureSetUsage(available(), enabled(), realmsUsageRef.get(), rolesUsageRef.get(), roleMappingUsageRef.get(), sslUsage, auditUsage, ipFilterUsage, anonymousUsage)); } @@ -116,6 +116,12 @@ public class SecurityFeatureSet implements XPackFeatureSet { doCountDown.run(); }, listener::onFailure); + final ActionListener> realmsUsageListener = + ActionListener.wrap(realmsUsage -> { + realmsUsageRef.set(realmsUsage); + doCountDown.run(); + }, listener::onFailure); + if (rolesStore == null) { rolesStoreUsageListener.onResponse(Collections.emptyMap()); } else { @@ -126,13 +132,11 @@ public class SecurityFeatureSet implements XPackFeatureSet { } else { roleMappingStore.usageStats(roleMappingStoreUsageListener); } - } - - static Map buildRealmsUsage(Realms realms) { if (realms == null) { - return Collections.emptyMap(); + realmsUsageListener.onResponse(Collections.emptyMap()); + } else { + realms.usageStats(realmsUsageListener); } - return realms.usageStats(); } static Map sslUsage(Settings settings) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index db7475a8972..1976722d65f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -992,24 +992,22 @@ public class IndexAuditTrail extends AbstractComponent implements AuditTrail, Cl } public static Settings customAuditIndexSettings(Settings nodeSettings, Logger logger) { - Settings newSettings = Settings.builder() + final Settings newSettings = Settings.builder() .put(INDEX_SETTINGS.get(nodeSettings), false) + .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX) .build(); if (newSettings.names().isEmpty()) { return Settings.EMPTY; } - // Filter out forbidden settings: - Settings.Builder builder = Settings.builder(); - builder.put(newSettings.filter(k -> { - String name = "index." + k; + // Filter out forbidden setting + return Settings.builder().put(newSettings.filter(name -> { if (FORBIDDEN_INDEX_SETTING.equals(name)) { logger.warn("overriding the default [{}} setting is forbidden. ignoring...", name); return false; } return true; - })); - return builder.build(); + })).build(); } private void putTemplate(Settings customSettings, Consumer consumer) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java index 38319597523..0284ae9a05f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/Realms.java @@ -15,12 +15,16 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import java.util.stream.Stream; import java.util.stream.StreamSupport; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.license.XPackLicenseState; @@ -188,46 +192,67 @@ public class Realms extends AbstractComponent implements Iterable { return realms; } - public Map usageStats() { + public void usageStats(ActionListener> listener) { Map realmMap = new HashMap<>(); - for (Realm realm : this) { - if (ReservedRealm.TYPE.equals(realm.type())) { - continue; + final AtomicBoolean failed = new AtomicBoolean(false); + final List realmList = asList().stream() + .filter(r -> ReservedRealm.TYPE.equals(r.type()) == false) + .collect(Collectors.toList()); + final CountDown countDown = new CountDown(realmList.size()); + final Runnable doCountDown = () -> { + if ((realmList.isEmpty() || countDown.countDown()) && failed.get() == false) { + final AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); + // iterate over the factories so we can add enabled & available info + for (String type : factories.keySet()) { + assert ReservedRealm.TYPE.equals(type) == false; + realmMap.compute(type, (key, value) -> { + if (value == null) { + return MapBuilder.newMapBuilder() + .put("enabled", false) + .put("available", isRealmTypeAvailable(allowedRealmType, type)) + .map(); + } + + assert value instanceof Map; + Map realmTypeUsage = (Map) value; + realmTypeUsage.put("enabled", true); + // the realms iterator returned this type so it must be enabled + assert isRealmTypeAvailable(allowedRealmType, type); + realmTypeUsage.put("available", true); + return value; + }); + } + listener.onResponse(realmMap); + } + }; + + if (realmList.isEmpty()) { + doCountDown.run(); + } else { + for (Realm realm : realmList) { + realm.usageStats(ActionListener.wrap(stats -> { + if (failed.get() == false) { + synchronized (realmMap) { + realmMap.compute(realm.type(), (key, value) -> { + if (value == null) { + Object realmTypeUsage = convertToMapOfLists(stats); + return realmTypeUsage; + } + assert value instanceof Map; + combineMaps((Map) value, stats); + return value; + }); + } + doCountDown.run(); + } + }, + e -> { + if (failed.compareAndSet(false, true)) { + listener.onFailure(e); + } + })); } - realmMap.compute(realm.type(), (key, value) -> { - if (value == null) { - Object realmTypeUsage = convertToMapOfLists(realm.usageStats()); - return realmTypeUsage; - } - assert value instanceof Map; - combineMaps((Map) value, realm.usageStats()); - return value; - }); } - - final AllowedRealmType allowedRealmType = licenseState.allowedRealmType(); - // iterate over the factories so we can add enabled & available info - for (String type : factories.keySet()) { - assert ReservedRealm.TYPE.equals(type) == false; - realmMap.compute(type, (key, value) -> { - if (value == null) { - return MapBuilder.newMapBuilder() - .put("enabled", false) - .put("available", isRealmTypeAvailable(allowedRealmType, type)) - .map(); - } - - assert value instanceof Map; - Map realmTypeUsage = (Map) value; - realmTypeUsage.put("enabled", true); - // the realms iterator returned this type so it must be enabled - assert isRealmTypeAvailable(allowedRealmType, type); - realmTypeUsage.put("available", true); - return value; - }); - } - - return realmMap; } private void addNativeRealms(List realms) throws Exception { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java index af2bfcf0d6c..a84b76beab8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealm.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; +import java.util.Map; + import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isIndexDeleted; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isMoveFromRedToNonRed; @@ -46,6 +48,16 @@ public class NativeRealm extends CachingUsernamePasswordRealm { } } + @Override + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> + userStore.getUserCount(ActionListener.wrap(size -> { + stats.put("size", size); + listener.onResponse(stats); + }, listener::onFailure)) + , listener::onFailure)); + } + // method is used for testing to verify cache expiration since expireAll is final void clearCache() { expireAll(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 1477c6dc880..72a65b8213f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -150,6 +150,30 @@ public class NativeUsersStore extends AbstractComponent { } } + void getUserCount(final ActionListener listener) { + if (securityIndex.indexExists() == false) { + listener.onResponse(0L); + } else { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> + executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, + client.prepareSearch(SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE)) + .setSize(0) + .request(), + new ActionListener() { + @Override + public void onResponse(SearchResponse response) { + listener.onResponse(response.getHits().getTotalHits()); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::search)); + } + } + /** * Async method to retrieve a user and their password */ diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java index 88656b9e01e..e2586ea836d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/FileRealm.java @@ -55,11 +55,11 @@ public class FileRealm extends CachingUsernamePasswordRealm { } @Override - public Map usageStats() { - Map stats = super.usageStats(); - // here we can determine the size based on the in mem user store - stats.put("size", userPasswdStore.usersCount()); - return stats; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> { + stats.put("size", userPasswdStore.usersCount()); + listener.onResponse(stats); + }, listener::onFailure)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index a7c6efdda31..87749850141 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -160,12 +160,14 @@ public final class LdapRealm extends CachingUsernamePasswordRealm { } @Override - public Map usageStats() { - Map usage = super.usageStats(); - usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); - usage.put("ssl", sessionFactory.isSslUsed()); - usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); - return usage; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(usage -> { + usage.put("size", getCacheSize()); + usage.put("load_balance_type", LdapLoadBalancing.resolve(config.settings()).toString()); + usage.put("ssl", sessionFactory.isSslUsed()); + usage.put("user_search", LdapUserSearchSessionFactory.hasUserSearchSettings(config)); + listener.onResponse(usage); + }, listener::onFailure)); } private static void buildUser(LdapSession session, String username, ActionListener listener, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java index 8dae5275eda..e9c107abcce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/CachingUsernamePasswordRealm.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.user.User; +import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.ExecutionException; @@ -177,10 +178,15 @@ public abstract class CachingUsernamePasswordRealm extends UsernamePasswordRealm } @Override - public Map usageStats() { - Map stats = super.usageStats(); - stats.put("size", cache.count()); - return stats; + public void usageStats(ActionListener> listener) { + super.usageStats(ActionListener.wrap(stats -> { + stats.put("cache", Collections.singletonMap("size", getCacheSize())); + listener.onResponse(stats); + }, listener::onFailure)); + } + + protected int getCacheSize() { + return cache.count(); } protected abstract void doAuthenticate(UsernamePasswordToken token, ActionListener listener); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index f2d78806da0..59bc8042fba 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -87,7 +87,7 @@ public class FileRolesStore extends AbstractComponent { } public Map usageStats() { - Map usageStats = new HashMap<>(); + Map usageStats = new HashMap<>(3); usageStats.put("size", permissions.size()); boolean dls = false; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index b1e5170a202..9093b6a6673 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -195,7 +195,7 @@ public class NativeRolesStore extends AbstractComponent { } public void usageStats(ActionListener> listener) { - Map usageStats = new HashMap<>(); + Map usageStats = new HashMap<>(3); if (securityIndex.indexExists() == false) { usageStats.put("size", 0L); usageStats.put("fls", false); @@ -204,56 +204,56 @@ public class NativeRolesStore extends AbstractComponent { } else { securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareMultiSearch() - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .setSize(0)) - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .must(QueryBuilders.boolQuery() - .should(existsQuery("indices.field_security.grant")) - .should(existsQuery("indices.field_security.except")) - // for backwardscompat with 2.x - .should(existsQuery("indices.fields")))) - .setSize(0) - .setTerminateAfter(1)) - .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) - .setQuery(QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) - .filter(existsQuery("indices.query"))) - .setSize(0) - .setTerminateAfter(1)) - .request(), - new ActionListener() { - @Override - public void onResponse(MultiSearchResponse items) { - Item[] responses = items.getResponses(); - if (responses[0].isFailure()) { - usageStats.put("size", 0); - } else { - usageStats.put("size", responses[0].getResponse().getHits().getTotalHits()); - } - - if (responses[1].isFailure()) { - usageStats.put("fls", false); - } else { - usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L); - } - - if (responses[2].isFailure()) { - usageStats.put("dls", false); - } else { - usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L); - } - listener.onResponse(usageStats); + client.prepareMultiSearch() + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .setSize(0)) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .must(QueryBuilders.boolQuery() + .should(existsQuery("indices.field_security.grant")) + .should(existsQuery("indices.field_security.except")) + // for backwardscompat with 2.x + .should(existsQuery("indices.fields")))) + .setSize(0) + .setTerminateAfter(1)) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) + .setQuery(QueryBuilders.boolQuery() + .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) + .filter(existsQuery("indices.query"))) + .setSize(0) + .setTerminateAfter(1)) + .request(), + new ActionListener() { + @Override + public void onResponse(MultiSearchResponse items) { + Item[] responses = items.getResponses(); + if (responses[0].isFailure()) { + usageStats.put("size", 0); + } else { + usageStats.put("size", responses[0].getResponse().getHits().getTotalHits()); } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); + if (responses[1].isFailure()) { + usageStats.put("fls", false); + } else { + usageStats.put("fls", responses[1].getResponse().getHits().getTotalHits() > 0L); } - }, client::multiSearch)); + + if (responses[2].isFailure()) { + usageStats.put("dls", false); + } else { + usageStats.put("dls", responses[2].getResponse().getHits().getTotalHits() > 0L); + } + listener.onResponse(usageStats); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, client::multiSearch)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java index 171507de741..95af7665157 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContext.java @@ -11,7 +11,7 @@ import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ReadWriteHandler; import org.elasticsearch.nio.SocketChannelContext; -import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.WriteOperation; import java.io.IOException; @@ -28,7 +28,7 @@ public final class SSLChannelContext extends SocketChannelContext { private final SSLDriver sslDriver; - SSLChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, SSLDriver sslDriver, + SSLChannelContext(NioSocketChannel channel, NioSelector selector, Consumer exceptionHandler, SSLDriver sslDriver, ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { super(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); this.sslDriver = sslDriver; @@ -140,7 +140,7 @@ public final class SSLChannelContext extends SocketChannelContext { public void closeChannel() { if (isClosing.compareAndSet(false, true)) { WriteOperation writeOperation = new CloseNotifyOperation(this); - SocketSelector selector = getSelector(); + NioSelector selector = getSelector(); if (selector.isOnCurrentThread() == false) { selector.queueWrite(writeOperation); return; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java index 1c9d779c2cc..39ce1a0150c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SecurityNioTransport.java @@ -13,11 +13,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; -import org.elasticsearch.nio.AcceptingSelector; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; -import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.nio.NioTransport; @@ -117,7 +116,7 @@ public class SecurityNioTransport extends NioTransport { } @Override - public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + public TcpNioSocketChannel createChannel(NioSelector selector, SocketChannel channel) throws IOException { SSLConfiguration defaultConfig = profileConfiguration.get(TcpTransport.DEFAULT_PROFILE); SSLEngine sslEngine = sslService.createSSLEngine(profileConfiguration.getOrDefault(profileName, defaultConfig), null, -1); SSLDriver sslDriver = new SSLDriver(sslEngine, isClient); @@ -136,7 +135,7 @@ public class SecurityNioTransport extends NioTransport { } @Override - public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + public TcpNioServerSocketChannel createServerChannel(NioSelector selector, ServerSocketChannel channel) throws IOException { TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel); Consumer exceptionHandler = (e) -> logger.error(() -> new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java index c169d62c6b1..076ce6c9fcb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityFeatureSetTests.java @@ -146,7 +146,11 @@ public class SecurityFeatureSetTests extends ESTestCase { realmUsage.put("key2", Arrays.asList(i)); realmUsage.put("key3", Arrays.asList(i % 2 == 0)); } - when(realms.usageStats()).thenReturn(realmsUsageStats); + doAnswer(invocationOnMock -> { + ActionListener> listener = (ActionListener) invocationOnMock.getArguments()[0]; + listener.onResponse(realmsUsageStats); + return Void.TYPE; + }).when(realms).usageStats(any(ActionListener.class)); final boolean anonymousEnabled = randomBoolean(); if (anonymousEnabled) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java index dab3d023f65..bc27e4cde40 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrailTests.java @@ -6,10 +6,14 @@ package org.elasticsearch.xpack.security.audit.index; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesRequest; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; @@ -17,6 +21,8 @@ import org.elasticsearch.client.Requests; import org.elasticsearch.client.transport.NoNodeAvailableException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -29,6 +35,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.search.SearchHit; @@ -70,7 +77,9 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.function.Function; +import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.InternalTestCluster.clusterName; @@ -85,6 +94,7 @@ import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.hasSize; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -360,6 +370,21 @@ public class IndexAuditTrailTests extends SecurityIntegTestCase { auditor.start(); } + public void testIndexTemplateUpgrader() throws Exception { + final MetaDataUpgrader metaDataUpgrader = internalCluster().getInstance(MetaDataUpgrader.class); + final Map updatedTemplates = metaDataUpgrader.indexTemplateMetaDataUpgraders.apply(emptyMap()); + final IndexTemplateMetaData indexAuditTrailTemplate = updatedTemplates.get(IndexAuditTrail.INDEX_TEMPLATE_NAME); + assertThat(indexAuditTrailTemplate, notNullValue()); + // test custom index settings override template + assertThat(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexAuditTrailTemplate.settings()), is(numReplicas)); + assertThat(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexAuditTrailTemplate.settings()), is(numShards)); + // test upgrade template and installed template are equal + final GetIndexTemplatesRequest request = new GetIndexTemplatesRequest(IndexAuditTrail.INDEX_TEMPLATE_NAME); + final GetIndexTemplatesResponse response = client().admin().indices().getTemplates(request).get(); + assertThat(response.getIndexTemplates(), hasSize(1)); + assertThat(indexAuditTrailTemplate, is(response.getIndexTemplates().get(0))); + } + public void testProcessorsSetting() { final boolean explicitProcessors = randomBoolean(); final int processors; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java index 2bc3d58471b..ff4c30ddf8c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/RealmsTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.security.authc; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -454,9 +455,11 @@ public class RealmsTests extends ESTestCase { .put("xpack.security.authc.realms.bar.order", "1"); Settings settings = builder.build(); Environment env = TestEnvironment.newEnvironment(settings); - Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm ); + Realms realms = new Realms(settings, env, factories, licenseState, threadContext, reservedRealm); - Map usageStats = realms.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realms.usageStats(future); + Map usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); // first check type_0 @@ -482,7 +485,9 @@ public class RealmsTests extends ESTestCase { // disable ALL using license when(licenseState.isAuthAllowed()).thenReturn(false); when(licenseState.allowedRealmType()).thenReturn(AllowedRealmType.NONE); - usageStats = realms.usageStats(); + future = new PlainActionFuture<>(); + realms.usageStats(future); + usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); for (Entry entry : usageStats.entrySet()) { Map typeMap = (Map) entry.getValue(); @@ -494,7 +499,9 @@ public class RealmsTests extends ESTestCase { // check native or internal realms enabled only when(licenseState.isAuthAllowed()).thenReturn(true); when(licenseState.allowedRealmType()).thenReturn(randomFrom(AllowedRealmType.NATIVE, AllowedRealmType.DEFAULT)); - usageStats = realms.usageStats(); + future = new PlainActionFuture<>(); + realms.usageStats(future); + usageStats = future.get(); assertThat(usageStats.size(), is(factories.size())); for (Entry entry : usageStats.entrySet()) { final String type = entry.getKey(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index a238576e413..a0550b4c1ce 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -22,6 +22,10 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.action.XPackUsageRequestBuilder; +import org.elasticsearch.xpack.core.action.XPackUsageResponse; +import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; import org.elasticsearch.xpack.core.security.action.role.DeleteRoleResponse; import org.elasticsearch.xpack.core.security.action.role.GetRolesResponse; import org.elasticsearch.xpack.core.security.action.role.PutRoleResponse; @@ -49,6 +53,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.CountDownLatch; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -662,6 +667,28 @@ public class NativeRealmIntegTests extends NativeRealmIntegTestCase { assertThat(usage.get("dls"), is(dls)); } + public void testRealmUsageStats() { + final int numNativeUsers = scaledRandomIntBetween(1, 32); + SecurityClient securityClient = new SecurityClient(client()); + for (int i = 0; i < numNativeUsers; i++) { + securityClient.preparePutUser("joe" + i, "s3krit".toCharArray(), "superuser").get(); + } + + XPackUsageResponse response = new XPackUsageRequestBuilder(client()).get(); + Optional securityUsage = response.getUsages().stream() + .filter(usage -> usage instanceof SecurityFeatureSetUsage) + .findFirst(); + assertTrue(securityUsage.isPresent()); + SecurityFeatureSetUsage securityFeatureSetUsage = (SecurityFeatureSetUsage) securityUsage.get(); + Map realmsUsage = securityFeatureSetUsage.getRealmsUsage(); + assertNotNull(realmsUsage); + assertNotNull(realmsUsage.get("native")); + assertNotNull(((Map) realmsUsage.get("native")).get("size")); + List sizeList = (List) ((Map) realmsUsage.get("native")).get("size"); + assertEquals(1, sizeList.size()); + assertEquals(numNativeUsers, Math.toIntExact(sizeList.get(0))); + } + public void testSetEnabled() throws Exception { securityClient().preparePutUser("joe", "s3krit".toCharArray(), SecuritySettingsSource.TEST_ROLE).get(); final String token = basicAuthHeaderValue("joe", new SecureString("s3krit".toCharArray())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java index b0f53229377..7295e48d003 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/file/FileRealmTests.java @@ -248,7 +248,9 @@ public class FileRealmTests extends ESTestCase { threadContext); FileRealm realm = new FileRealm(config, userPasswdStore, userRolesStore, threadPool); - Map usage = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map usage = future.get(); assertThat(usage, is(notNullValue())); assertThat(usage, hasEntry("name", "file-realm")); assertThat(usage, hasEntry("order", order)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java index 52026cc8af5..6ab4dbf3e0c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryRealmTests.java @@ -320,7 +320,9 @@ public class ActiveDirectoryRealmTests extends ESTestCase { DnRoleMapper roleMapper = new DnRoleMapper(config, resourceWatcherService); LdapRealm realm = new LdapRealm(LdapRealmSettings.AD_TYPE, config, sessionFactory, roleMapper, threadPool); - Map stats = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map stats = future.get(); assertThat(stats, is(notNullValue())); assertThat(stats, hasEntry("name", realm.name())); assertThat(stats, hasEntry("order", realm.order())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index 042664fa670..ea1b9117922 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -360,7 +360,9 @@ public class LdapRealmTests extends LdapTestCase { LdapRealm realm = new LdapRealm(LdapRealmSettings.LDAP_TYPE, config, ldapFactory, new DnRoleMapper(config, resourceWatcherService), threadPool); - Map stats = realm.usageStats(); + PlainActionFuture> future = new PlainActionFuture<>(); + realm.usageStats(future); + Map stats = future.get(); assertThat(stats, is(notNullValue())); assertThat(stats, hasEntry("name", "ldap-realm")); assertThat(stats, hasEntry("order", realm.order())); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java index 168dcd64e6c..14a22d300d1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/nio/SSLChannelContextTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.nio.BytesWriteHandler; import org.elasticsearch.nio.FlushReadyWrite; import org.elasticsearch.nio.InboundChannelBuffer; import org.elasticsearch.nio.NioSocketChannel; -import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.nio.NioSelector; import org.elasticsearch.nio.WriteOperation; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -40,7 +40,7 @@ public class SSLChannelContextTests extends ESTestCase { private SocketChannel rawChannel; private SSLChannelContext context; private InboundChannelBuffer channelBuffer; - private SocketSelector selector; + private NioSelector selector; private BiConsumer listener; private Consumer exceptionHandler; private SSLDriver sslDriver; @@ -55,7 +55,7 @@ public class SSLChannelContextTests extends ESTestCase { TestReadWriteHandler readWriteHandler = new TestReadWriteHandler(readConsumer); messageLength = randomInt(96) + 20; - selector = mock(SocketSelector.class); + selector = mock(NioSelector.class); listener = mock(BiConsumer.class); channel = mock(NioSocketChannel.class); rawChannel = mock(SocketChannel.class); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java index 085e0c187e7..edf512f49cb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/ssl/SSLTrustRestrictionsTests.java @@ -28,6 +28,7 @@ import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; import java.io.IOException; import java.net.SocketException; +import java.nio.file.AtomicMoveNotSupportedException; import java.nio.file.Files; import java.nio.file.Path; import java.security.PrivateKey; @@ -35,6 +36,8 @@ import java.security.cert.X509Certificate; import java.util.Collections; import java.util.concurrent.TimeUnit; +import static java.nio.file.StandardCopyOption.ATOMIC_MOVE; +import static java.nio.file.StandardCopyOption.REPLACE_EXISTING; import static org.hamcrest.Matchers.is; /** @@ -46,11 +49,6 @@ import static org.hamcrest.Matchers.is; @TestLogging("org.elasticsearch.xpack.ssl.RestrictedTrustManager:DEBUG") public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { - /** - * Use a small keysize for performance, since the keys are only used in this test, but a large enough keysize - * to get past the SSL algorithm checker - */ - private static final int RESOURCE_RELOAD_MILLIS = 3; private static final TimeValue MAX_WAIT_RELOAD = TimeValue.timeValueSeconds(1); @@ -61,6 +59,7 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { private static CertificateInfo trustedCert; private static CertificateInfo untrustedCert; private static Path restrictionsPath; + private static Path restrictionsTmpPath; @Override protected int maxNumberOfNodes() { @@ -124,6 +123,8 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { .put(nodeSSL); restrictionsPath = configPath.resolve("trust_restrictions.yml"); + restrictionsTmpPath = configPath.resolve("trust_restrictions.tmp"); + writeRestrictions("*.trusted"); builder.put("xpack.ssl.trust_restrictions.path", restrictionsPath); builder.put("resource.reload.interval.high", RESOURCE_RELOAD_MILLIS + "ms"); @@ -133,7 +134,12 @@ public class SSLTrustRestrictionsTests extends SecurityIntegTestCase { private void writeRestrictions(String trustedPattern) { try { - Files.write(restrictionsPath, Collections.singleton("trust.subject_name: \"" + trustedPattern + "\"")); + Files.write(restrictionsTmpPath, Collections.singleton("trust.subject_name: \"" + trustedPattern + "\"")); + try { + Files.move(restrictionsTmpPath, restrictionsPath, REPLACE_EXISTING, ATOMIC_MOVE); + } catch (final AtomicMoveNotSupportedException e) { + Files.move(restrictionsTmpPath, restrictionsPath, REPLACE_EXISTING); + } } catch (IOException e) { throw new ElasticsearchException("failed to write restrictions", e); } diff --git a/x-pack/plugin/sql/jdbc/build.gradle b/x-pack/plugin/sql/jdbc/build.gradle index 18755523ac8..26cf913aa27 100644 --- a/x-pack/plugin/sql/jdbc/build.gradle +++ b/x-pack/plugin/sql/jdbc/build.gradle @@ -66,7 +66,7 @@ dependencies { compile (project(':libs:x-content')) { transitive = false } - compile project(':libs:elasticsearch-core') + compile project(':libs:core') runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/x-pack/plugin/sql/sql-proto/build.gradle b/x-pack/plugin/sql/sql-proto/build.gradle index e5ac7904432..b1d8497f9b8 100644 --- a/x-pack/plugin/sql/sql-proto/build.gradle +++ b/x-pack/plugin/sql/sql-proto/build.gradle @@ -18,7 +18,7 @@ dependencies { compile (project(':server')) { transitive = false } - compile (project(':libs:elasticsearch-core')) { + compile (project(':libs:core')) { transitive = false } compile (project(':libs:x-content')) { diff --git a/x-pack/plugin/sql/sql-shared-proto/build.gradle b/x-pack/plugin/sql/sql-shared-proto/build.gradle index b6580fbcb01..5a1439f4360 100644 --- a/x-pack/plugin/sql/sql-shared-proto/build.gradle +++ b/x-pack/plugin/sql/sql-shared-proto/build.gradle @@ -11,7 +11,7 @@ description = 'Request and response objects shared by the cli, jdbc ' + 'and the Elasticsearch plugin' dependencies { - compile (project(':libs:elasticsearch-core')) { + compile (project(':libs:core')) { transitive = false } compile (project(':libs:x-content')) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index 27597c93fd0..a755d2b4f59 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.Netty4Plugin; +import org.elasticsearch.transport.nio.NioTransportPlugin; import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; import org.elasticsearch.xpack.sql.plugin.SqlQueryRequestBuilder; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; @@ -66,9 +67,10 @@ public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { // Enable http so we can test JDBC licensing because only exists on the REST layer. + String httpPlugin = randomBoolean() ? Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME : NioTransportPlugin.NIO_TRANSPORT_NAME; return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) + .put(NetworkModule.HTTP_TYPE_KEY, httpPlugin) .build(); } diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval index c7185d16756..1c55587f556 100755 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval @@ -4,7 +4,7 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-watcher-env" \ +ES_MAIN_CLASS=org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \ + ES_ADDITIONAL_SOURCES="x-pack-env;x-pack-watcher-env" \ "`dirname "$0"`"/elasticsearch-cli \ - org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool \ "$@" diff --git a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat index 281b06cf77b..2b4a33c9f9e 100644 --- a/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat +++ b/x-pack/plugin/watcher/src/main/bin/elasticsearch-croneval.bat @@ -7,9 +7,9 @@ rem you may not use this file except in compliance with the Elastic License. setlocal enabledelayedexpansion setlocal enableextensions +set ES_MAIN_CLASS=org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool set ES_ADDITIONAL_SOURCES=x-pack-env;x-pack-watcher-env call "%~dp0elasticsearch-cli.bat" ^ - org.elasticsearch.xpack.watcher.trigger.schedule.tool.CronEvalTool ^ %%* ^ || exit /b 1 diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index b4a45abed5b..abb981053e7 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -68,7 +68,7 @@ public class WatcherPluginTests extends ESTestCase { // ensure index module is not called, even if watches index is tried IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(Watch.INDEX, settings); AnalysisRegistry registry = new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); + emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); IndexModule indexModule = new IndexModule(indexSettings, registry, new InternalEngineFactory()); // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it watcher.onIndexModule(indexModule); diff --git a/x-pack/qa/rolling-upgrade-basic/build.gradle b/x-pack/qa/rolling-upgrade-basic/build.gradle index 91a6d106c98..6d5b250b460 100644 --- a/x-pack/qa/rolling-upgrade-basic/build.gradle +++ b/x-pack/qa/rolling-upgrade-basic/build.gradle @@ -8,62 +8,9 @@ apply plugin: 'elasticsearch.standalone-test' dependencies { testCompile project(path: xpackModule('core'), configuration: 'runtime') - testCompile project(path: xpackModule('security'), configuration: 'runtime') testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') // to be moved in a later commit } -Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> - File tmpFile = new File(node.cwd, 'wait.success') - - // wait up to two minutes - final long stopTime = System.currentTimeMillis() + (2 * 60000L); - Exception lastException = null; - int lastResponseCode = 0 - - while (System.currentTimeMillis() < stopTime) { - - lastException = null; - // we use custom wait logic here as the elastic user is not available immediately and ant.get will fail when a 401 is returned - HttpURLConnection httpURLConnection = null; - try { - // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); - httpURLConnection.setRequestProperty("Authorization", "Basic " + - Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); - httpURLConnection.setRequestMethod("GET"); - httpURLConnection.setConnectTimeout(1000); - httpURLConnection.setReadTimeout(30000); // read needs to wait for nodes! - httpURLConnection.connect(); - lastResponseCode = httpURLConnection.getResponseCode() - if (lastResponseCode == 200) { - tmpFile.withWriter StandardCharsets.UTF_8.name(), { - it.write(httpURLConnection.getInputStream().getText(StandardCharsets.UTF_8.name())) - } - break; - } - } catch (Exception e) { - logger.debug("failed to call cluster health", e) - lastException = e - } finally { - if (httpURLConnection != null) { - httpURLConnection.disconnect(); - } - } - - // did not start, so wait a bit before trying again - Thread.sleep(500L); - } - if (tmpFile.exists() == false) { - final String message = "final attempt of calling cluster health failed [lastResponseCode=${lastResponseCode}]" - if (lastException != null) { - logger.error(message, lastException) - } else { - logger.error(message + " [no exception]") - } - } - return tmpFile.exists() -} - // This is a top level task which we will add dependencies to below. // It is a single task that can be used to backcompat tests against all versions. task bwcTest { @@ -82,14 +29,13 @@ for (Version version : bwcVersions.wireCompatible) { configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { if (version.before('6.3.0')) { - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}" } bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - minimumMasterNodes = { 2 } + numBwcNodes = 3 + numNodes = 3 + minimumMasterNodes = { 3 } clusterName = 'rolling-upgrade-basic' - waitCondition = waitWithAuth setting 'xpack.security.enabled', 'false' setting 'xpack.monitoring.enabled', 'false' setting 'xpack.ml.enabled', 'false' @@ -102,51 +48,66 @@ for (Version version : bwcVersions.wireCompatible) { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) - - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - clusterName = 'rolling-upgrade-basic' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - waitCondition = waitWithAuth - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.license.self_generated.type', 'basic' - setting 'node.name', 'mixed-node-0' + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + clusterName = 'rolling-upgrade-basic' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'xpack.security.enabled', 'false' + setting 'xpack.monitoring.enabled', 'false' + setting 'xpack.ml.enabled', 'false' + setting 'xpack.watcher.enabled', 'false' + setting 'xpack.license.self_generated.type', 'basic' + setting 'node.name', "upgraded-node-${stopNode}" + } } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(1).transportUri() + ',' + oldClusterTest.nodes.get(2).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'true' + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + } + + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(2).transportUri() + ',' + oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") - clusterName = 'rolling-upgrade-basic' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } - waitCondition = waitWithAuth - setting 'xpack.security.enabled', 'false' - setting 'xpack.monitoring.enabled', 'false' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - setting 'xpack.license.self_generated.type', 'basic' - setting 'node.name', 'upgraded-node-0' - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oneThirdUpgradedTest.nodes.get(0).transportUri() + ',' + twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { @@ -170,11 +131,6 @@ task integTest { } check.dependsOn(integTest) -dependencies { - testCompile project(path: xpackModule('core'), configuration: 'runtime') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') -} - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" // copy x-pack plugin info so it is on the classpath and security manager has the right permissions diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index 6e93041e9a0..351f33b9412 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -30,7 +30,7 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> HttpURLConnection httpURLConnection = null; try { // TODO this sucks having to hardcode number of nodes, but node.config.numNodes isn't necessarily accurate for rolling - httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=2&wait_for_status=yellow").openConnection(); + httpURLConnection = (HttpURLConnection) new URL("http://${node.httpUri()}/_cluster/health?wait_for_nodes=3&wait_for_status=yellow").openConnection(); httpURLConnection.setRequestProperty("Authorization", "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8))); httpURLConnection.setRequestMethod("GET"); @@ -128,9 +128,9 @@ subprojects { String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - minimumMasterNodes = { 2 } + numBwcNodes = 3 + numNodes = 3 + minimumMasterNodes = { 3 } clusterName = 'rolling-upgrade' waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' @@ -167,78 +167,87 @@ subprojects { systemProperty 'tests.rest.suite', 'old_cluster' } - Task mixedClusterTest = tasks.create(name: "${baseName}#mixedClusterTest", type: RestIntegTestTask) - - configure(extensions.findByName("${baseName}#mixedClusterTestCluster")) { - dependsOn oldClusterTestRunner, "${baseName}#oldClusterTestCluster#node1.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> oldClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[1].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - setting 'node.attr.upgraded', 'first' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'mixed-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure unicastSeed -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + unicastTransportUri = { seedNode, node, ant -> unicastSeed() } + minimumMasterNodes = { 3 } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.ssl.keystore.path', 'testnode.jks' + keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'xpack.security.audit.outputs', 'index' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeystore + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + if (withSystemKey) { + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + } } } - Task mixedClusterTestRunner = tasks.getByName("${baseName}#mixedClusterTestRunner") - mixedClusterTestRunner.configure { + Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(1).transportUri() + ',' + oldClusterTest.nodes.get(2).transportUri() }) + + Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") + oneThirdUpgradedTestRunner.configure { systemProperty 'tests.rest.suite', 'mixed_cluster' - finalizedBy "${baseName}#oldClusterTestCluster#node0.stop" + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', + ].join(',') + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + } + + Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) + + configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oldClusterTest.nodes.get(2).transportUri() + ',' + oneThirdUpgradedTest.nodes.get(0).transportUri() }) + + Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") + twoThirdsUpgradedTestRunner.configure { + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) - configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn(mixedClusterTestRunner, "${baseName}#oldClusterTestCluster#node0.stop") - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - clusterName = 'rolling-upgrade' - unicastTransportUri = { seedNode, node, ant -> mixedClusterTest.nodes.get(0).transportUri() } - minimumMasterNodes = { 2 } - dataDir = { nodeNumber -> oldClusterTest.nodes[0].dataDir } - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.ssl.keystore.path', 'testnode.jks' - keystoreSetting 'xpack.ssl.keystore.secure_password', 'testnode' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'xpack.security.audit.outputs', 'index' - setting 'node.name', 'upgraded-node-0' - dependsOn copyTestNodeKeystore - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } - } + configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { oneThirdUpgradedTest.nodes.get(0).transportUri() + ',' + twoThirdsUpgradedTest.nodes.get(0).transportUri() }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. // this stinks but we do the check here since our rest tests do not support conditionals @@ -251,8 +260,6 @@ subprojects { systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' } } - // only need to kill the mixed cluster tests node here because we explicitly told it to not stop nodes upon completion - finalizedBy "${baseName}#mixedClusterTestCluster#stop" } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java index 65b1a7c85dc..a3576b7b8c3 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractUpgradeTestCase.java @@ -37,12 +37,12 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase { return true; } - enum CLUSTER_TYPE { + enum ClusterType { OLD, MIXED, UPGRADED; - public static CLUSTER_TYPE parse(String value) { + public static ClusterType parse(String value) { switch (value) { case "old_cluster": return OLD; @@ -56,7 +56,7 @@ public abstract class AbstractUpgradeTestCase extends ESRestTestCase { } } - protected final CLUSTER_TYPE clusterType = CLUSTER_TYPE.parse(System.getProperty("tests.rest.suite")); + protected static final ClusterType CLUSTER_TYPE = ClusterType.parse(System.getProperty("tests.rest.suite")); @Override protected Settings restClientSettings() { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java index 1f76e670854..7c81a7141a9 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexAuditUpgradeIT.java @@ -8,37 +8,47 @@ package org.elasticsearch.upgrades; import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.Version; import org.elasticsearch.client.Response; +import org.elasticsearch.common.Booleans; import org.hamcrest.Matchers; +import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.hasSize; + public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { - public void testDocsAuditedInOldCluster() throws Exception { - assumeTrue("only runs against old cluster", clusterType == CLUSTER_TYPE.OLD); + public void testAuditLogs() throws Exception { assertBusy(() -> { assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(2); + assertNumUniqueNodeNameBuckets(expectedNumUniqueNodeNameBuckets()); }); } - public void testDocsAuditedInMixedCluster() throws Exception { - assumeTrue("only runs against mixed cluster", clusterType == CLUSTER_TYPE.MIXED); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(2); - }); - } - - public void testDocsAuditedInUpgradedCluster() throws Exception { - assumeTrue("only runs against upgraded cluster", clusterType == CLUSTER_TYPE.UPGRADED); - assertBusy(() -> { - assertAuditDocsExist(); - assertNumUniqueNodeNameBuckets(4); - }); + private int expectedNumUniqueNodeNameBuckets() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + // There are three nodes in the initial test cluster + return 3; + case MIXED: + if (false == masterIsNewVersion()) { + return 3; + } + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + // One of the old nodes has been removed and we've added a new node + return 4; + } + // Two of the old nodes have been removed and we've added two new nodes + return 5; + case UPGRADED: + return 6; + default: + throw new IllegalArgumentException("Unsupported cluster type [" + CLUSTER_TYPE + "]"); + } } private void assertAuditDocsExist() throws Exception { @@ -51,26 +61,40 @@ public class IndexAuditUpgradeIT extends AbstractUpgradeTestCase { private void assertNumUniqueNodeNameBuckets(int numBuckets) throws Exception { // call API that will hit all nodes - assertEquals(200, client().performRequest("GET", "/_nodes").getStatusLine().getStatusCode()); + Map nodesResponse = entityAsMap(client().performRequest("GET", "/_nodes/_all/info/version")); + logger.info("all nodes {}", nodesResponse); HttpEntity httpEntity = new StringEntity( "{\n" + - " \"aggs\" : {\n" + - " \"nodes\" : {\n" + - " \"terms\" : { \"field\" : \"node_name\" }\n" + - " }\n" + - " }\n" + - "}", ContentType.APPLICATION_JSON); + " \"aggs\" : {\n" + + " \"nodes\" : {\n" + + " \"terms\" : { \"field\" : \"node_name\" }\n" + + " }\n" + + " }\n" + + "}", ContentType.APPLICATION_JSON); Response aggResponse = client().performRequest("GET", "/.security_audit_log*/_search", Collections.singletonMap("pretty", "true"), httpEntity); Map aggResponseMap = entityAsMap(aggResponse); logger.debug("aggResponse {}", aggResponseMap); - Map aggregations = (Map) aggResponseMap.get("aggregations"); + Map aggregations = (Map) aggResponseMap.get("aggregations"); assertNotNull(aggregations); - Map nodesAgg = (Map) aggregations.get("nodes"); + Map nodesAgg = (Map) aggregations.get("nodes"); assertNotNull(nodesAgg); - List> buckets = (List>) nodesAgg.get("buckets"); + List buckets = (List) nodesAgg.get("buckets"); assertNotNull(buckets); - assertEquals("Found node buckets " + buckets, numBuckets, buckets.size()); + assertThat("Found node buckets " + buckets, buckets, hasSize(numBuckets)); + } + + /** + * Has the master been upgraded to the new version? + * @throws IOException + */ + private boolean masterIsNewVersion() throws IOException { + Map map = entityAsMap(client().performRequest("GET", "/_nodes/_master")); + map = (Map) map.get("nodes"); + assertThat(map.values(), hasSize(1)); + map = (Map) map.values().iterator().next(); + Version masterVersion = Version.fromString(map.get("version").toString()); + return Version.CURRENT.equals(masterVersion); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java new file mode 100644 index 00000000000..3448117cd2c --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.apache.http.util.EntityUtils; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +/** + * Basic test that indexed documents survive the rolling restart. + *

+ * This test is an almost exact copy of IndexingIT in the + * oss rolling restart tests. We should work on a way to remove this + * duplication but for now we have no real way to share code. + */ +public class IndexingIT extends AbstractUpgradeTestCase { + public void testIndexing() throws IOException { + switch (CLUSTER_TYPE) { + case OLD: + break; + case MIXED: + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + break; + case UPGRADED: + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + if (CLUSTER_TYPE == ClusterType.OLD) { + Request createTestIndex = new Request("PUT", "/test_index"); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + + String recoverQuickly = "{\"settings\": {\"index.unassigned.node_left.delayed_timeout\": \"100ms\"}}"; + Request createIndexWithReplicas = new Request("PUT", "/index_with_replicas"); + createIndexWithReplicas.setJsonEntity(recoverQuickly); + client().performRequest(createIndexWithReplicas); + + Request createEmptyIndex = new Request("PUT", "/empty_index"); + // Ask for recovery to be quick + createEmptyIndex.setJsonEntity(recoverQuickly); + client().performRequest(createEmptyIndex); + + bulk("test_index", "_OLD", 5); + bulk("index_with_replicas", "_OLD", 5); + } + + int expectedCount; + switch (CLUSTER_TYPE) { + case OLD: + expectedCount = 5; + break; + case MIXED: + if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { + expectedCount = 5; + } else { + expectedCount = 10; + } + break; + case UPGRADED: + expectedCount = 15; + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + + assertCount("test_index", expectedCount); + assertCount("index_with_replicas", 5); + assertCount("empty_index", 0); + + if (CLUSTER_TYPE != ClusterType.OLD) { + bulk("test_index", "_" + CLUSTER_TYPE, 5); + Request toBeDeleted = new Request("PUT", "/test_index/doc/to_be_deleted"); + toBeDeleted.addParameter("refresh", "true"); + toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); + client().performRequest(toBeDeleted); + assertCount("test_index", expectedCount + 6); + + Request delete = new Request("DELETE", "/test_index/doc/to_be_deleted"); + delete.addParameter("refresh", "true"); + client().performRequest(delete); + + assertCount("test_index", expectedCount + 5); + } + } + + private void bulk(String index, String valueSuffix, int count) throws IOException { + StringBuilder b = new StringBuilder(); + for (int i = 0; i < count; i++) { + b.append("{\"index\": {\"_index\": \"").append(index).append("\", \"_type\": \"doc\"}}\n"); + b.append("{\"f1\": \"v").append(i).append(valueSuffix).append("\", \"f2\": ").append(i).append("}\n"); + } + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + bulk.setJsonEntity(b.toString()); + client().performRequest(bulk); + } + + private void assertCount(String index, int count) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + assertEquals("{\"hits\":{\"total\":" + count + "}}", + EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)); + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 4fa0c9a535f..705122252e7 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -25,7 +25,7 @@ import java.util.Map; public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testGeneratingTokenInOldCluster() throws Exception { - assumeTrue("this test should only run against the old cluster", clusterType == CLUSTER_TYPE.OLD); + assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD); final StringEntity tokenPostBody = new StringEntity("{\n" + " \"username\": \"test_user\",\n" + " \"password\": \"x-pack-test-password\",\n" + @@ -61,7 +61,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed or upgraded cluster", - clusterType == CLUSTER_TYPE.MIXED || clusterType == CLUSTER_TYPE.UPGRADED); + CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -69,7 +69,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { } public void testMixedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.MIXED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); @@ -117,7 +117,7 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { } public void testUpgradedCluster() throws Exception { - assumeTrue("this test should only run against the mixed cluster", clusterType == CLUSTER_TYPE.UPGRADED); + assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED); Response getResponse = client().performRequest("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index 93db3996a6b..3dd1f708959 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -1,166 +1,13 @@ --- -setup: - - do: - cluster.health: - # if the primary shard of an index with (number_of_replicas > 0) ends up on the new node, the replica cannot be - # allocated to the old node (see NodeVersionAllocationDecider). x-pack automatically creates indices with - # replicas, for example monitoring-data-*. - wait_for_status: yellow - wait_for_nodes: 2 - ---- -"Index data and search on the mixed cluster": - - do: - search: - index: test_index - - - match: { hits.total: 5 } # no new indexed data, so expect the original 5 documents from the old cluster - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_mixed", "f2": 5}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_mixed", "f2": 6}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_mixed", "f2": 7}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_mixed", "f2": 8}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_mixed", "f2": 9}' - - - do: - index: - index: test_index - type: test_type - id: d10 - body: {"f1": "v6_mixed", "f2": 10} - - - do: - index: - index: test_index - type: test_type - id: d11 - body: {"f1": "v7_mixed", "f2": 11} - - - do: - index: - index: test_index - type: test_type - id: d12 - body: {"f1": "v8_mixed", "f2": 12} - - - do: - indices.refresh: - index: test_index - - - do: - search: - index: test_index - - - match: { hits.total: 13 } # 5 docs from old cluster, 8 docs from mixed cluster - - - do: - delete: - index: test_index - type: test_type - id: d10 - - - do: - delete: - index: test_index - type: test_type - id: d11 - - - do: - delete: - index: test_index - type: test_type - id: d12 - - - do: - indices.refresh: - index: test_index - ---- -"Basic scroll mixed": - - do: - indices.create: - index: test_scroll - - do: - index: - index: test_scroll - type: test - id: 42 - body: { foo: 1 } - - - do: - index: - index: test_scroll - type: test - id: 43 - body: { foo: 2 } - - - do: - indices.refresh: {} - - - do: - search: - index: test_scroll - size: 1 - scroll: 1m - sort: foo - body: - query: - match_all: {} - - - set: {_scroll_id: scroll_id} - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "42" } - - - do: - index: - index: test_scroll - type: test - id: 44 - body: { foo: 3 } - - - do: - indices.refresh: {} - - - do: - scroll: - body: { "scroll_id": "$scroll_id", "scroll": "1m"} - - - match: {hits.total: 2 } - - length: {hits.hits: 1 } - - match: {hits.hits.0._id: "43" } - - - do: - scroll: - scroll_id: $scroll_id - scroll: 1m - - - match: {hits.total: 2 } - - length: {hits.hits: 0 } - - - do: - clear_scroll: - scroll_id: $scroll_id - ---- -"Start scroll in mixed cluster for upgraded": +"Start scroll in mixed cluster on upgraded node that we will continue after upgrade": - do: indices.create: index: upgraded_scroll wait_for_active_shards: all body: settings: - number_of_replicas: "0" - index.routing.allocation.include.upgraded: "first" + number_of_replicas: 0 + index.routing.allocation.include.upgraded: true - do: index: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml index 750bedc4c6d..cfe3ca97330 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_security.yml @@ -1,13 +1,5 @@ --- "Verify user and role in mixed cluster": - - do: - headers: - Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - - match: { timed_out: false } - - do: xpack.security.get_user: username: "native_user" @@ -36,6 +28,3 @@ username: "kibana,logstash_system" - match: { kibana.enabled: false } - match: { logstash_system.enabled: true } - - - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index daf2f913fff..6ea8771c237 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -1,10 +1,3 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - --- "Test get old cluster job": - skip: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml index 8a06c91cc8a..0ec288f9097 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/40_ml_datafeed_crud.yml @@ -1,9 +1,3 @@ -setup: - - do: - cluster.health: - wait_for_status: yellow - wait_for_nodes: 2 - --- "Test old cluster datafeed": - do: diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml deleted file mode 100644 index a780709400a..00000000000 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -"Index data and search on the old cluster": - - do: - indices.create: - index: test_index - wait_for_active_shards : all - body: - settings: - index: - number_of_replicas: 1 - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_old", "f2": 0}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_old", "f2": 1}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_old", "f2": 2}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_old", "f2": 3}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_old", "f2": 4}' - - - do: - search: - index: test_index - - - match: { hits.total: 5 } diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 9c3443339a7..7249b4a32c7 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -1,42 +1,5 @@ --- -"Index data and search on the upgraded cluster": - - do: - cluster.health: - wait_for_status: green - wait_for_nodes: 2 - # wait for long enough that we give delayed unassigned shards to stop being delayed - timeout: 70s - level: shards - - - do: - search: - index: test_index - - - match: { hits.total: 10 } # no new indexed data, so expect the original 10 documents from the old and mixed clusters - - - do: - bulk: - refresh: true - body: - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v1_upgraded", "f2": 10}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v2_upgraded", "f2": 11}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v3_upgraded", "f2": 12}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v4_upgraded", "f2": 13}' - - '{"index": {"_index": "test_index", "_type": "test_type"}}' - - '{"f1": "v5_upgraded", "f2": 14}' - - - do: - search: - index: test_index - - - match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs - ---- -"Get indexed scroll and execute scroll": +"Continue scroll after upgrade": - do: get: index: scroll_index diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml index 9c709748391..46ade4823a2 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_security.yml @@ -5,7 +5,7 @@ Authorization: "Basic bmF0aXZlX3VzZXI6eC1wYWNrLXRlc3QtcGFzc3dvcmQ=" cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s - match: { timed_out: false } @@ -22,4 +22,3 @@ - match: { native_role.cluster.0: "all" } - match: { native_role.indices.0.names.0: "test_index" } - match: { native_role.indices.0.privileges.0: "all" } - diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml index 9520e954d7b..91d29457289 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_ml_jobs_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml index ed6a66ae1a5..6b4c963dd53 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/40_ml_datafeed_crud.yml @@ -2,7 +2,7 @@ setup: - do: cluster.health: wait_for_status: green - wait_for_nodes: 2 + wait_for_nodes: 3 # wait for long enough that we give delayed unassigned shards to stop being delayed timeout: 70s @@ -97,4 +97,3 @@ setup: xpack.ml.delete_job: job_id: mixed-cluster-datafeed-job - match: { acknowledged: true } - diff --git a/x-pack/test/feature-aware/build.gradle b/x-pack/test/feature-aware/build.gradle new file mode 100644 index 00000000000..217ed25a2d4 --- /dev/null +++ b/x-pack/test/feature-aware/build.gradle @@ -0,0 +1,16 @@ +apply plugin: 'elasticsearch.build' + +dependencies { + compile 'org.ow2.asm:asm:6.2' + compile "org.elasticsearch:elasticsearch:${version}" + compile "org.elasticsearch.plugin:x-pack-core:${version}" + testCompile "org.elasticsearch.test:framework:${version}" +} + +forbiddenApisMain.enabled = true + +dependencyLicenses.enabled = false + +jarHell.enabled = false + +thirdPartyAudit.enabled = false diff --git a/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java b/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java new file mode 100644 index 00000000000..7746692b408 --- /dev/null +++ b/x-pack/test/feature-aware/src/main/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheck.java @@ -0,0 +1,180 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.test.feature_aware; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.objectweb.asm.ClassReader; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.Consumer; + +/** + * Used in the featureAwareCheck to check for classes in X-Pack that implement customs but do not extend the appropriate marker interface. + */ +public final class FeatureAwareCheck { + + /** + * Check the class directories specified by the arguments for classes in X-Pack that implement customs but do not extend the appropriate + * marker interface that provides a mix-in implementation of {@link ClusterState.FeatureAware#getRequiredFeature()}. + * + * @param args the class directories to check + * @throws IOException if an I/O exception is walking the class directories + */ + public static void main(final String[] args) throws IOException { + systemOutPrintln("checking for custom violations"); + final List violations = new ArrayList<>(); + checkDirectories(violations::add, args); + if (violations.isEmpty()) { + systemOutPrintln("no custom violations found"); + } else { + violations.forEach(violation -> + systemOutPrintln( + "class [" + violation.name + "] implements" + + " [" + violation.interfaceName + " but does not implement" + + " [" + violation.expectedInterfaceName + "]") + ); + throw new IllegalStateException( + "found custom" + (violations.size() == 1 ? "" : "s") + " in X-Pack not extending appropriate X-Pack mix-in"); + } + } + + @SuppressForbidden(reason = "System.out#println") + private static void systemOutPrintln(final String s) { + System.out.println(s); + } + + private static void checkDirectories( + final Consumer callback, + final String... classDirectories) throws IOException { + for (final String classDirectory : classDirectories) { + final Path root = pathsGet(classDirectory); + if (Files.isDirectory(root)) { + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException { + if (Files.isRegularFile(file) && file.getFileName().toString().endsWith(".class")) { + try (InputStream in = Files.newInputStream(file)) { + checkClass(in, callback); + } + } + return super.visitFile(file, attrs); + } + }); + } else { + throw new FileNotFoundException("class directory [" + classDirectory + "] should exist"); + } + } + } + + @SuppressForbidden(reason = "Paths#get") + private static Path pathsGet(final String pathString) { + return Paths.get(pathString); + } + + /** + * Represents a feature-aware violation. + */ + static class FeatureAwareViolation { + + final String name; + final String interfaceName; + final String expectedInterfaceName; + + /** + * Constructs a representation of a feature-aware violation. + * + * @param name the name of the custom class + * @param interfaceName the name of the feature-aware interface + * @param expectedInterfaceName the name of the expected mix-in class + */ + FeatureAwareViolation(final String name, final String interfaceName, final String expectedInterfaceName) { + this.name = name; + this.interfaceName = interfaceName; + this.expectedInterfaceName = expectedInterfaceName; + } + + } + + /** + * Loads a class from the specified input stream and checks that if it implements a feature-aware custom then it extends the appropriate + * mix-in interface from X-Pack. If the class does not, then the specified callback is invoked. + * + * @param in the input stream + * @param callback the callback to invoke + * @throws IOException if an I/O exception occurs loading the class hierarchy + */ + static void checkClass(final InputStream in, final Consumer callback) throws IOException { + // the class format only reports declared interfaces so we have to walk the hierarchy looking for all interfaces + final List interfaces = new ArrayList<>(); + ClassReader cr = new ClassReader(in); + final String name = cr.getClassName(); + do { + interfaces.addAll(Arrays.asList(cr.getInterfaces())); + final String superName = cr.getSuperName(); + if ("java/lang/Object".equals(superName)) { + break; + } + cr = new ClassReader(superName); + } while (true); + checkClass(name, interfaces, callback); + } + + private static void checkClass( + final String name, + final List interfaces, + final Consumer callback) { + checkCustomForClass(ClusterState.Custom.class, XPackPlugin.XPackClusterStateCustom.class, name, interfaces, callback); + checkCustomForClass(MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class, name, interfaces, callback); + checkCustomForClass(PersistentTaskParams.class, XPackPlugin.XPackPersistentTaskParams.class, name, interfaces, callback); + } + + private static void checkCustomForClass( + final Class interfaceToCheck, + final Class expectedInterface, + final String name, + final List interfaces, + final Consumer callback) { + final Set interfaceSet = new TreeSet<>(interfaces); + final String interfaceToCheckName = formatClassName(interfaceToCheck); + final String expectedXPackInterfaceName = formatClassName(expectedInterface); + if (interfaceSet.contains(interfaceToCheckName) + && name.equals(expectedXPackInterfaceName) == false + && interfaceSet.contains(expectedXPackInterfaceName) == false) { + assert name.startsWith("org/elasticsearch/license") || name.startsWith("org/elasticsearch/xpack"); + callback.accept(new FeatureAwareViolation(name, interfaceToCheckName, expectedXPackInterfaceName)); + } + } + + /** + * Format the specified class to a name in the ASM format replacing all dots in the class name with forward-slashes. + * + * @param clazz the class whose name to format + * @return the formatted class name + */ + static String formatClassName(final Class clazz) { + return clazz.getName().replace(".", "/"); + } + +} diff --git a/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java b/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java new file mode 100644 index 00000000000..2dde9efce42 --- /dev/null +++ b/x-pack/test/feature-aware/src/test/java/org/elasticsearch/xpack/test/feature_aware/FeatureAwareCheckTests.java @@ -0,0 +1,323 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.test.feature_aware; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.persistent.PersistentTaskParams; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.XPackPlugin; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.equalTo; + +public class FeatureAwareCheckTests extends ESTestCase { + + public void testClusterStateCustomViolation() throws IOException { + runCustomViolationTest( + ClusterStateCustomViolation.class, + getClass(), + ClusterState.Custom.class, + XPackPlugin.XPackClusterStateCustom.class); + } + + public void testClusterStateCustom() throws IOException { + runCustomTest(XPackClusterStateCustom.class, getClass(), ClusterState.Custom.class, XPackPlugin.XPackClusterStateCustom.class); + } + + public void testClusterStateCustomMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackClusterStateCustom.class, + XPackPlugin.class, + ClusterState.Custom.class, + XPackPlugin.XPackClusterStateCustom.class); + } + + public void testMetaDataCustomViolation() throws IOException { + runCustomViolationTest(MetaDataCustomViolation.class, getClass(), MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class); + } + + public void testMetaDataCustom() throws IOException { + runCustomTest(XPackMetaDataCustom.class, getClass(), MetaData.Custom.class, XPackPlugin.XPackMetaDataCustom.class); + } + + public void testMetaDataCustomMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackMetaDataCustom.class, + XPackPlugin.class, + MetaData.Custom.class, + XPackPlugin.XPackMetaDataCustom.class); + } + + public void testPersistentTaskParamsViolation() throws IOException { + runCustomViolationTest( + PersistentTaskParamsViolation.class, + getClass(), + PersistentTaskParams.class, + XPackPlugin.XPackPersistentTaskParams.class); + } + + public void testPersistentTaskParams() throws IOException { + runCustomTest(XPackPersistentTaskParams.class, getClass(), PersistentTaskParams.class, XPackPlugin.XPackPersistentTaskParams.class); + } + + public void testPersistentTaskParamsMarkerInterface() throws IOException { + // marker interfaces do not implement the marker interface but should not fail the feature aware check + runCustomTest( + XPackPlugin.XPackPersistentTaskParams.class, + XPackPlugin.class, + PersistentTaskParams.class, + XPackPlugin.XPackPersistentTaskParams.class); + } + + abstract class ClusterStateCustomFeatureAware implements ClusterState.Custom { + + private final String writeableName; + + ClusterStateCustomFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public Diff diff(ClusterState.Custom previousState) { + return null; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class ClusterStateCustomViolation extends ClusterStateCustomFeatureAware { + + ClusterStateCustomViolation() { + super("cluster_state_custom_violation"); + } + } + + class XPackClusterStateCustom extends ClusterStateCustomFeatureAware implements XPackPlugin.XPackClusterStateCustom { + + XPackClusterStateCustom() { + super("x_pack_cluster_state_custom"); + } + + } + + abstract class MetaDataCustomFeatureAware implements MetaData.Custom { + + private final String writeableName; + + MetaDataCustomFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public EnumSet context() { + return MetaData.ALL_CONTEXTS; + } + + @Override + public Diff diff(MetaData.Custom previousState) { + return null; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class MetaDataCustomViolation extends MetaDataCustomFeatureAware { + + MetaDataCustomViolation() { + super("meta_data_custom_violation"); + } + + } + + class XPackMetaDataCustom extends MetaDataCustomFeatureAware implements XPackPlugin.XPackMetaDataCustom { + + XPackMetaDataCustom() { + super("x_pack_meta_data_custom"); + } + + } + + abstract class PersistentTaskParamsFeatureAware implements PersistentTaskParams { + + private final String writeableName; + + PersistentTaskParamsFeatureAware(final String writeableName) { + this.writeableName = writeableName; + } + + @Override + public String getWriteableName() { + return writeableName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT.minimumCompatibilityVersion(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + + } + + @Override + public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException { + return builder; + } + + } + + class PersistentTaskParamsViolation extends PersistentTaskParamsFeatureAware { + + PersistentTaskParamsViolation() { + super("persistent_task_params_violation"); + } + + } + + class XPackPersistentTaskParams extends PersistentTaskParamsFeatureAware implements XPackPlugin.XPackPersistentTaskParams { + + XPackPersistentTaskParams() { + super("x_pack_persistent_task_params"); + } + + } + + private class FeatureAwareViolationConsumer implements Consumer { + + private final AtomicBoolean called = new AtomicBoolean(); + private final String name; + private final String interfaceName; + private final String expectedInterfaceName; + + FeatureAwareViolationConsumer(final String name, final String interfaceName, final String expectedInterfaceName) { + this.name = name; + this.interfaceName = interfaceName; + this.expectedInterfaceName = expectedInterfaceName; + } + + @Override + public void accept(final org.elasticsearch.xpack.test.feature_aware.FeatureAwareCheck.FeatureAwareViolation featureAwareViolation) { + called.set(true); + assertThat(featureAwareViolation.name, equalTo(name)); + assertThat(featureAwareViolation.interfaceName, equalTo(interfaceName)); + assertThat(featureAwareViolation.expectedInterfaceName, equalTo(expectedInterfaceName)); + } + + } + + /** + * Runs a test on an actual class implementing a custom interface and not the expected marker interface. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @throws IOException if an I/O error occurs reading the class + */ + private void runCustomViolationTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz) throws IOException { + runTest(clazz, outerClazz, interfaceClazz, expectedInterfaceClazz, true); + } + + /** + * Runs a test on an actual class implementing a custom interface and the expected marker interface. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @throws IOException if an I/O error occurs reading the class + */ + private void runCustomTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz) throws IOException { + runTest(clazz, outerClazz, interfaceClazz, expectedInterfaceClazz, false); + } + + /** + * Runs a test on an actual class implementing a custom interface and should implement the expected marker interface if and only if + * the specified violation parameter is false. + * + * @param clazz the custom implementation + * @param outerClazz the outer class to load the custom implementation relative to + * @param interfaceClazz the custom + * @param expectedInterfaceClazz the marker interface + * @param violation whether or not the actual class is expected to fail the feature aware check + * @throws IOException if an I/O error occurs reading the class + */ + private void runTest( + final Class clazz, + final Class outerClazz, + final Class interfaceClazz, + final Class expectedInterfaceClazz, + final boolean violation) throws IOException { + final String name = clazz.getName(); + final FeatureAwareViolationConsumer callback = + new FeatureAwareViolationConsumer( + FeatureAwareCheck.formatClassName(clazz), + FeatureAwareCheck.formatClassName(interfaceClazz), + FeatureAwareCheck.formatClassName(expectedInterfaceClazz)); + FeatureAwareCheck.checkClass(outerClazz.getResourceAsStream(name.substring(1 + name.lastIndexOf(".")) + ".class"), callback); + assertThat(callback.called.get(), equalTo(violation)); + } + +}