Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-05-02 09:01:08 +01:00
commit 4ad44a76b0
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
102 changed files with 3404 additions and 2424 deletions

View File

@ -561,7 +561,6 @@ class BuildPlugin implements Plugin<Project> {
*/ */
List html4Projects = [ List html4Projects = [
':server', ':server',
':x-pack:plugin:core',
] ]
if (false == html4Projects.contains(project.path)) { if (false == html4Projects.contains(project.path)) {
javadoc.options.addBooleanOption('html5', true) javadoc.options.addBooleanOption('html5', true)

View File

@ -48,7 +48,7 @@ public final class ClusterClient {
*/ */
public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers) public ClusterUpdateSettingsResponse putSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, Header... headers)
throws IOException { throws IOException {
return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, return restHighLevelClient.performRequestAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers); ClusterUpdateSettingsResponse::fromXContent, emptySet(), headers);
} }
@ -60,7 +60,7 @@ public final class ClusterClient {
*/ */
public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest, public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest,
ActionListener<ClusterUpdateSettingsResponse> listener, Header... headers) { ActionListener<ClusterUpdateSettingsResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, Request::clusterPutSettings, restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings,
ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
} }
} }

View File

@ -74,8 +74,8 @@ public final class IndicesClient {
* Delete Index API on elastic.co</a> * Delete Index API on elastic.co</a>
*/ */
public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException { public DeleteIndexResponse delete(DeleteIndexRequest deleteIndexRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
emptySet(), headers); DeleteIndexResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -85,8 +85,8 @@ public final class IndicesClient {
* Delete Index API on elastic.co</a> * Delete Index API on elastic.co</a>
*/ */
public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) { public void deleteAsync(DeleteIndexRequest deleteIndexRequest, ActionListener<DeleteIndexResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, Request::deleteIndex, DeleteIndexResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(deleteIndexRequest, RequestConverters::deleteIndex,
listener, emptySet(), headers); DeleteIndexResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -96,8 +96,8 @@ public final class IndicesClient {
* Create Index API on elastic.co</a> * Create Index API on elastic.co</a>
*/ */
public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException { public CreateIndexResponse create(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, RequestConverters::createIndex,
emptySet(), headers); CreateIndexResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -107,8 +107,8 @@ public final class IndicesClient {
* Create Index API on elastic.co</a> * Create Index API on elastic.co</a>
*/ */
public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) { public void createAsync(CreateIndexRequest createIndexRequest, ActionListener<CreateIndexResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, RequestConverters::createIndex,
listener, emptySet(), headers); CreateIndexResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -118,8 +118,8 @@ public final class IndicesClient {
* Put Mapping API on elastic.co</a> * Put Mapping API on elastic.co</a>
*/ */
public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException { public PutMappingResponse putMapping(PutMappingRequest putMappingRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(putMappingRequest, RequestConverters::putMapping,
emptySet(), headers); PutMappingResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -130,8 +130,8 @@ public final class IndicesClient {
*/ */
public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener<PutMappingResponse> listener, public void putMappingAsync(PutMappingRequest putMappingRequest, ActionListener<PutMappingResponse> listener,
Header... headers) { Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, Request::putMapping, PutMappingResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(putMappingRequest, RequestConverters::putMapping,
listener, emptySet(), headers); PutMappingResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -142,7 +142,7 @@ public final class IndicesClient {
* Index Aliases API on elastic.co</a> * Index Aliases API on elastic.co</a>
*/ */
public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException { public IndicesAliasesResponse updateAliases(IndicesAliasesRequest indicesAliasesRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, Request::updateAliases, return restHighLevelClient.performRequestAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
IndicesAliasesResponse::fromXContent, emptySet(), headers); IndicesAliasesResponse::fromXContent, emptySet(), headers);
} }
@ -155,7 +155,7 @@ public final class IndicesClient {
*/ */
public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener<IndicesAliasesResponse> listener, public void updateAliasesAsync(IndicesAliasesRequest indicesAliasesRequest, ActionListener<IndicesAliasesResponse> listener,
Header... headers) { Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, Request::updateAliases, restHighLevelClient.performRequestAsyncAndParseEntity(indicesAliasesRequest, RequestConverters::updateAliases,
IndicesAliasesResponse::fromXContent, listener, emptySet(), headers); IndicesAliasesResponse::fromXContent, listener, emptySet(), headers);
} }
@ -166,8 +166,8 @@ public final class IndicesClient {
* Open Index API on elastic.co</a> * Open Index API on elastic.co</a>
*/ */
public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException { public OpenIndexResponse open(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, RequestConverters::openIndex,
emptySet(), headers); OpenIndexResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -177,8 +177,8 @@ public final class IndicesClient {
* Open Index API on elastic.co</a> * Open Index API on elastic.co</a>
*/ */
public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) { public void openAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, RequestConverters::openIndex,
listener, emptySet(), headers); OpenIndexResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -188,8 +188,8 @@ public final class IndicesClient {
* Close Index API on elastic.co</a> * Close Index API on elastic.co</a>
*/ */
public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException { public CloseIndexResponse close(CloseIndexRequest closeIndexRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
emptySet(), headers); CloseIndexResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -199,8 +199,8 @@ public final class IndicesClient {
* Close Index API on elastic.co</a> * Close Index API on elastic.co</a>
*/ */
public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) { public void closeAsync(CloseIndexRequest closeIndexRequest, ActionListener<CloseIndexResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, Request::closeIndex, CloseIndexResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(closeIndexRequest, RequestConverters::closeIndex,
listener, emptySet(), headers); CloseIndexResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -210,8 +210,8 @@ public final class IndicesClient {
* Indices Aliases API on elastic.co</a> * Indices Aliases API on elastic.co</a>
*/ */
public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException { public boolean existsAlias(GetAliasesRequest getAliasesRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequest(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, return restHighLevelClient.performRequest(getAliasesRequest, RequestConverters::existsAlias,
emptySet(), headers); RestHighLevelClient::convertExistsResponse, emptySet(), headers);
} }
/** /**
@ -221,8 +221,8 @@ public final class IndicesClient {
* Indices Aliases API on elastic.co</a> * Indices Aliases API on elastic.co</a>
*/ */
public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener<Boolean> listener, Header... headers) { public void existsAliasAsync(GetAliasesRequest getAliasesRequest, ActionListener<Boolean> listener, Header... headers) {
restHighLevelClient.performRequestAsync(getAliasesRequest, Request::existsAlias, RestHighLevelClient::convertExistsResponse, restHighLevelClient.performRequestAsync(getAliasesRequest, RequestConverters::existsAlias,
listener, emptySet(), headers); RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers);
} }
/** /**
@ -231,7 +231,7 @@ public final class IndicesClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
*/ */
public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException { public RefreshResponse refresh(RefreshRequest refreshRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
emptySet(), headers); emptySet(), headers);
} }
@ -241,7 +241,7 @@ public final class IndicesClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-refresh.html"> Refresh API on elastic.co</a>
*/ */
public void refreshAsync(RefreshRequest refreshRequest, ActionListener<RefreshResponse> listener, Header... headers) { public void refreshAsync(RefreshRequest refreshRequest, ActionListener<RefreshResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, Request::refresh, RefreshResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(refreshRequest, RequestConverters::refresh, RefreshResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -251,7 +251,7 @@ public final class IndicesClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
*/ */
public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException { public FlushResponse flush(FlushRequest flushRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
emptySet(), headers); emptySet(), headers);
} }
@ -261,7 +261,7 @@ public final class IndicesClient {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-flush.html"> Flush API on elastic.co</a>
*/ */
public void flushAsync(FlushRequest flushRequest, ActionListener<FlushResponse> listener, Header... headers) { public void flushAsync(FlushRequest flushRequest, ActionListener<FlushResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, Request::flush, FlushResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(flushRequest, RequestConverters::flush, FlushResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -272,8 +272,8 @@ public final class IndicesClient {
* Force Merge API on elastic.co</a> * Force Merge API on elastic.co</a>
*/ */
public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException { public ForceMergeResponse forceMerge(ForceMergeRequest forceMergeRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
emptySet(), headers); ForceMergeResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -283,8 +283,8 @@ public final class IndicesClient {
* Force Merge API on elastic.co</a> * Force Merge API on elastic.co</a>
*/ */
public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener<ForceMergeResponse> listener, Header... headers) { public void forceMergeAsync(ForceMergeRequest forceMergeRequest, ActionListener<ForceMergeResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, Request::forceMerge, ForceMergeResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(forceMergeRequest, RequestConverters::forceMerge,
listener, emptySet(), headers); ForceMergeResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
@ -294,7 +294,7 @@ public final class IndicesClient {
* Clear Cache API on elastic.co</a> * Clear Cache API on elastic.co</a>
*/ */
public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException { public ClearIndicesCacheResponse clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, Request::clearCache, return restHighLevelClient.performRequestAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
ClearIndicesCacheResponse::fromXContent, emptySet(), headers); ClearIndicesCacheResponse::fromXContent, emptySet(), headers);
} }
@ -306,7 +306,7 @@ public final class IndicesClient {
*/ */
public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener<ClearIndicesCacheResponse> listener, public void clearCacheAsync(ClearIndicesCacheRequest clearIndicesCacheRequest, ActionListener<ClearIndicesCacheResponse> listener,
Header... headers) { Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, Request::clearCache, restHighLevelClient.performRequestAsyncAndParseEntity(clearIndicesCacheRequest, RequestConverters::clearCache,
ClearIndicesCacheResponse::fromXContent, listener, emptySet(), headers); ClearIndicesCacheResponse::fromXContent, listener, emptySet(), headers);
} }
@ -319,7 +319,7 @@ public final class IndicesClient {
public boolean exists(GetIndexRequest request, Header... headers) throws IOException { public boolean exists(GetIndexRequest request, Header... headers) throws IOException {
return restHighLevelClient.performRequest( return restHighLevelClient.performRequest(
request, request,
Request::indicesExist, RequestConverters::indicesExist,
RestHighLevelClient::convertExistsResponse, RestHighLevelClient::convertExistsResponse,
Collections.emptySet(), Collections.emptySet(),
headers headers
@ -335,7 +335,7 @@ public final class IndicesClient {
public void existsAsync(GetIndexRequest request, ActionListener<Boolean> listener, Header... headers) { public void existsAsync(GetIndexRequest request, ActionListener<Boolean> listener, Header... headers) {
restHighLevelClient.performRequestAsync( restHighLevelClient.performRequestAsync(
request, request,
Request::indicesExist, RequestConverters::indicesExist,
RestHighLevelClient::convertExistsResponse, RestHighLevelClient::convertExistsResponse,
listener, listener,
Collections.emptySet(), Collections.emptySet(),
@ -350,7 +350,7 @@ public final class IndicesClient {
* Shrink Index API on elastic.co</a> * Shrink Index API on elastic.co</a>
*/ */
public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException { public ResizeResponse shrink(ResizeRequest resizeRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
emptySet(), headers); emptySet(), headers);
} }
@ -361,7 +361,7 @@ public final class IndicesClient {
* Shrink Index API on elastic.co</a> * Shrink Index API on elastic.co</a>
*/ */
public void shrinkAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) { public void shrinkAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::shrink, ResizeResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::shrink, ResizeResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -372,7 +372,7 @@ public final class IndicesClient {
* Split Index API on elastic.co</a> * Split Index API on elastic.co</a>
*/ */
public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException { public ResizeResponse split(ResizeRequest resizeRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
emptySet(), headers); emptySet(), headers);
} }
@ -383,7 +383,7 @@ public final class IndicesClient {
* Split Index API on elastic.co</a> * Split Index API on elastic.co</a>
*/ */
public void splitAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) { public void splitAsync(ResizeRequest resizeRequest, ActionListener<ResizeResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, Request::split, ResizeResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(resizeRequest, RequestConverters::split, ResizeResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -394,8 +394,8 @@ public final class IndicesClient {
* Rollover Index API on elastic.co</a> * Rollover Index API on elastic.co</a>
*/ */
public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException { public RolloverResponse rollover(RolloverRequest rolloverRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, return restHighLevelClient.performRequestAndParseEntity(rolloverRequest, RequestConverters::rollover,
emptySet(), headers); RolloverResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -405,7 +405,7 @@ public final class IndicesClient {
* Rollover Index API on elastic.co</a> * Rollover Index API on elastic.co</a>
*/ */
public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener<RolloverResponse> listener, Header... headers) { public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener<RolloverResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, Request::rollover, RolloverResponse::fromXContent, restHighLevelClient.performRequestAsyncAndParseEntity(rolloverRequest, RequestConverters::rollover, RolloverResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -416,7 +416,7 @@ public final class IndicesClient {
* API on elastic.co</a> * API on elastic.co</a>
*/ */
public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings, return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
UpdateSettingsResponse::fromXContent, emptySet(), headers); UpdateSettingsResponse::fromXContent, emptySet(), headers);
} }
@ -428,7 +428,7 @@ public final class IndicesClient {
*/ */
public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener<UpdateSettingsResponse> listener, public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener<UpdateSettingsResponse> listener,
Header... headers) { Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings, restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, RequestConverters::indexPutSettings,
UpdateSettingsResponse::fromXContent, listener, emptySet(), headers); UpdateSettingsResponse::fromXContent, listener, emptySet(), headers);
} }

View File

@ -89,117 +89,85 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Objects;
import java.util.StringJoiner; import java.util.StringJoiner;
public final class Request { final class RequestConverters {
static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON; static final XContentType REQUEST_BODY_CONTENT_TYPE = XContentType.JSON;
private final String method; private RequestConverters() {
private final String endpoint; // Contains only status utility methods
private final Map<String, String> parameters;
private final HttpEntity entity;
public Request(String method, String endpoint, Map<String, String> parameters, HttpEntity entity) {
this.method = Objects.requireNonNull(method, "method cannot be null");
this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null");
this.parameters = Objects.requireNonNull(parameters, "parameters cannot be null");
this.entity = entity;
}
public String getMethod() {
return method;
}
public String getEndpoint() {
return endpoint;
}
public Map<String, String> getParameters() {
return parameters;
}
public HttpEntity getEntity() {
return entity;
}
@Override
public String toString() {
return "Request{" +
"method='" + method + '\'' +
", endpoint='" + endpoint + '\'' +
", params=" + parameters +
", hasBody=" + (entity != null) +
'}';
} }
static Request delete(DeleteRequest deleteRequest) { static Request delete(DeleteRequest deleteRequest) {
String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id()); String endpoint = endpoint(deleteRequest.index(), deleteRequest.type(), deleteRequest.id());
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withRouting(deleteRequest.routing()); parameters.withRouting(deleteRequest.routing());
parameters.withTimeout(deleteRequest.timeout()); parameters.withTimeout(deleteRequest.timeout());
parameters.withVersion(deleteRequest.version()); parameters.withVersion(deleteRequest.version());
parameters.withVersionType(deleteRequest.versionType()); parameters.withVersionType(deleteRequest.versionType());
parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy()); parameters.withRefreshPolicy(deleteRequest.getRefreshPolicy());
parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards()); parameters.withWaitForActiveShards(deleteRequest.waitForActiveShards());
return request;
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
} }
static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) { static Request deleteIndex(DeleteIndexRequest deleteIndexRequest) {
String endpoint = endpoint(deleteIndexRequest.indices()); String endpoint = endpoint(deleteIndexRequest.indices());
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withTimeout(deleteIndexRequest.timeout()); parameters.withTimeout(deleteIndexRequest.timeout());
parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout()); parameters.withMasterTimeout(deleteIndexRequest.masterNodeTimeout());
parameters.withIndicesOptions(deleteIndexRequest.indicesOptions()); parameters.withIndicesOptions(deleteIndexRequest.indicesOptions());
return request;
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
} }
static Request openIndex(OpenIndexRequest openIndexRequest) { static Request openIndex(OpenIndexRequest openIndexRequest) {
String endpoint = endpoint(openIndexRequest.indices(), "_open"); String endpoint = endpoint(openIndexRequest.indices(), "_open");
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withTimeout(openIndexRequest.timeout()); parameters.withTimeout(openIndexRequest.timeout());
parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout()); parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout());
parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards()); parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards());
parameters.withIndicesOptions(openIndexRequest.indicesOptions()); parameters.withIndicesOptions(openIndexRequest.indicesOptions());
return request;
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
} }
static Request closeIndex(CloseIndexRequest closeIndexRequest) { static Request closeIndex(CloseIndexRequest closeIndexRequest) {
String endpoint = endpoint(closeIndexRequest.indices(), "_close"); String endpoint = endpoint(closeIndexRequest.indices(), "_close");
Params parameters = Params.builder(); Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withTimeout(closeIndexRequest.timeout()); parameters.withTimeout(closeIndexRequest.timeout());
parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout()); parameters.withMasterTimeout(closeIndexRequest.masterNodeTimeout());
parameters.withIndicesOptions(closeIndexRequest.indicesOptions()); parameters.withIndicesOptions(closeIndexRequest.indicesOptions());
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException { static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
String endpoint = endpoint(createIndexRequest.indices()); String endpoint = endpoint(createIndexRequest.indices());
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withTimeout(createIndexRequest.timeout()); parameters.withTimeout(createIndexRequest.timeout());
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); request.setEntity(createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE));
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); return request;
} }
static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException { static Request updateAliases(IndicesAliasesRequest indicesAliasesRequest) throws IOException {
Params parameters = Params.builder(); Request request = new Request(HttpPost.METHOD_NAME, "/_aliases");
Params parameters = new Params(request);
parameters.withTimeout(indicesAliasesRequest.timeout()); parameters.withTimeout(indicesAliasesRequest.timeout());
parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout()); parameters.withMasterTimeout(indicesAliasesRequest.masterNodeTimeout());
HttpEntity entity = createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE); request.setEntity(createEntity(indicesAliasesRequest, REQUEST_BODY_CONTENT_TYPE));
return new Request(HttpPost.METHOD_NAME, "/_aliases", parameters.getParams(), entity); return request;
} }
static Request putMapping(PutMappingRequest putMappingRequest) throws IOException { static Request putMapping(PutMappingRequest putMappingRequest) throws IOException {
@ -208,63 +176,69 @@ public final class Request {
throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API"); throw new IllegalArgumentException("concreteIndex cannot be set on PutMapping requests made over the REST API");
} }
String endpoint = endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()); Request request = new Request(HttpPut.METHOD_NAME, endpoint(putMappingRequest.indices(), "_mapping", putMappingRequest.type()));
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withTimeout(putMappingRequest.timeout()); parameters.withTimeout(putMappingRequest.timeout());
parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout()); parameters.withMasterTimeout(putMappingRequest.masterNodeTimeout());
HttpEntity entity = createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE); request.setEntity(createEntity(putMappingRequest, REQUEST_BODY_CONTENT_TYPE));
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); return request;
} }
static Request refresh(RefreshRequest refreshRequest) { static Request refresh(RefreshRequest refreshRequest) {
String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices(); String[] indices = refreshRequest.indices() == null ? Strings.EMPTY_ARRAY : refreshRequest.indices();
String endpoint = endpoint(indices, "_refresh"); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_refresh"));
Params parameters = Params.builder();
Params parameters = new Params(request);
parameters.withIndicesOptions(refreshRequest.indicesOptions()); parameters.withIndicesOptions(refreshRequest.indicesOptions());
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request flush(FlushRequest flushRequest) { static Request flush(FlushRequest flushRequest) {
String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices(); String[] indices = flushRequest.indices() == null ? Strings.EMPTY_ARRAY : flushRequest.indices();
String endpoint = endpoint(indices, "_flush"); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush"));
Params parameters = Params.builder();
Params parameters = new Params(request);
parameters.withIndicesOptions(flushRequest.indicesOptions()); parameters.withIndicesOptions(flushRequest.indicesOptions());
parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); parameters.putParam("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
parameters.putParam("force", Boolean.toString(flushRequest.force())); parameters.putParam("force", Boolean.toString(flushRequest.force()));
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request forceMerge(ForceMergeRequest forceMergeRequest) { static Request forceMerge(ForceMergeRequest forceMergeRequest) {
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
String endpoint = endpoint(indices, "_forcemerge"); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge"));
Params parameters = Params.builder();
Params parameters = new Params(request);
parameters.withIndicesOptions(forceMergeRequest.indicesOptions()); parameters.withIndicesOptions(forceMergeRequest.indicesOptions());
parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments())); parameters.putParam("max_num_segments", Integer.toString(forceMergeRequest.maxNumSegments()));
parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes())); parameters.putParam("only_expunge_deletes", Boolean.toString(forceMergeRequest.onlyExpungeDeletes()));
parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush())); parameters.putParam("flush", Boolean.toString(forceMergeRequest.flush()));
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) { static Request clearCache(ClearIndicesCacheRequest clearIndicesCacheRequest) {
String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices(); String[] indices = clearIndicesCacheRequest.indices() == null ? Strings.EMPTY_ARRAY :clearIndicesCacheRequest.indices();
String endpoint = endpoint(indices, "_cache/clear"); Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_cache/clear"));
Params parameters = Params.builder();
Params parameters = new Params(request);
parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions()); parameters.withIndicesOptions(clearIndicesCacheRequest.indicesOptions());
parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache())); parameters.putParam("query", Boolean.toString(clearIndicesCacheRequest.queryCache()));
parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache())); parameters.putParam("fielddata", Boolean.toString(clearIndicesCacheRequest.fieldDataCache()));
parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache())); parameters.putParam("request", Boolean.toString(clearIndicesCacheRequest.requestCache()));
parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields())); parameters.putParam("fields", String.join(",", clearIndicesCacheRequest.fields()));
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request info() { static Request info() {
return new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null); return new Request(HttpGet.METHOD_NAME, "/");
} }
static Request bulk(BulkRequest bulkRequest) throws IOException { static Request bulk(BulkRequest bulkRequest) throws IOException {
Params parameters = Params.builder(); Request request = new Request(HttpPost.METHOD_NAME, "/_bulk");
Params parameters = new Params(request);
parameters.withTimeout(bulkRequest.timeout()); parameters.withTimeout(bulkRequest.timeout());
parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy());
@ -273,14 +247,14 @@ public final class Request {
// and this content-type is supported by the Bulk API. // and this content-type is supported by the Bulk API.
XContentType bulkContentType = null; XContentType bulkContentType = null;
for (int i = 0; i < bulkRequest.numberOfActions(); i++) { for (int i = 0; i < bulkRequest.numberOfActions(); i++) {
DocWriteRequest<?> request = bulkRequest.requests().get(i); DocWriteRequest<?> action = bulkRequest.requests().get(i);
DocWriteRequest.OpType opType = request.opType(); DocWriteRequest.OpType opType = action.opType();
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
bulkContentType = enforceSameContentType((IndexRequest) request, bulkContentType); bulkContentType = enforceSameContentType((IndexRequest) action, bulkContentType);
} else if (opType == DocWriteRequest.OpType.UPDATE) { } else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request; UpdateRequest updateRequest = (UpdateRequest) action;
if (updateRequest.doc() != null) { if (updateRequest.doc() != null) {
bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType); bulkContentType = enforceSameContentType(updateRequest.doc(), bulkContentType);
} }
@ -298,30 +272,30 @@ public final class Request {
final ContentType requestContentType = createContentType(bulkContentType); final ContentType requestContentType = createContentType(bulkContentType);
ByteArrayOutputStream content = new ByteArrayOutputStream(); ByteArrayOutputStream content = new ByteArrayOutputStream();
for (DocWriteRequest<?> request : bulkRequest.requests()) { for (DocWriteRequest<?> action : bulkRequest.requests()) {
DocWriteRequest.OpType opType = request.opType(); DocWriteRequest.OpType opType = action.opType();
try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) { try (XContentBuilder metadata = XContentBuilder.builder(bulkContentType.xContent())) {
metadata.startObject(); metadata.startObject();
{ {
metadata.startObject(opType.getLowercase()); metadata.startObject(opType.getLowercase());
if (Strings.hasLength(request.index())) { if (Strings.hasLength(action.index())) {
metadata.field("_index", request.index()); metadata.field("_index", action.index());
} }
if (Strings.hasLength(request.type())) { if (Strings.hasLength(action.type())) {
metadata.field("_type", request.type()); metadata.field("_type", action.type());
} }
if (Strings.hasLength(request.id())) { if (Strings.hasLength(action.id())) {
metadata.field("_id", request.id()); metadata.field("_id", action.id());
} }
if (Strings.hasLength(request.routing())) { if (Strings.hasLength(action.routing())) {
metadata.field("routing", request.routing()); metadata.field("routing", action.routing());
} }
if (request.version() != Versions.MATCH_ANY) { if (action.version() != Versions.MATCH_ANY) {
metadata.field("version", request.version()); metadata.field("version", action.version());
} }
VersionType versionType = request.versionType(); VersionType versionType = action.versionType();
if (versionType != VersionType.INTERNAL) { if (versionType != VersionType.INTERNAL) {
if (versionType == VersionType.EXTERNAL) { if (versionType == VersionType.EXTERNAL) {
metadata.field("version_type", "external"); metadata.field("version_type", "external");
@ -333,12 +307,12 @@ public final class Request {
} }
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request; IndexRequest indexRequest = (IndexRequest) action;
if (Strings.hasLength(indexRequest.getPipeline())) { if (Strings.hasLength(indexRequest.getPipeline())) {
metadata.field("pipeline", indexRequest.getPipeline()); metadata.field("pipeline", indexRequest.getPipeline());
} }
} else if (opType == DocWriteRequest.OpType.UPDATE) { } else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request; UpdateRequest updateRequest = (UpdateRequest) action;
if (updateRequest.retryOnConflict() > 0) { if (updateRequest.retryOnConflict() > 0) {
metadata.field("retry_on_conflict", updateRequest.retryOnConflict()); metadata.field("retry_on_conflict", updateRequest.retryOnConflict());
} }
@ -357,7 +331,7 @@ public final class Request {
BytesRef source = null; BytesRef source = null;
if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) { if (opType == DocWriteRequest.OpType.INDEX || opType == DocWriteRequest.OpType.CREATE) {
IndexRequest indexRequest = (IndexRequest) request; IndexRequest indexRequest = (IndexRequest) action;
BytesReference indexSource = indexRequest.source(); BytesReference indexSource = indexRequest.source();
XContentType indexXContentType = indexRequest.getContentType(); XContentType indexXContentType = indexRequest.getContentType();
@ -369,7 +343,7 @@ public final class Request {
} }
} }
} else if (opType == DocWriteRequest.OpType.UPDATE) { } else if (opType == DocWriteRequest.OpType.UPDATE) {
source = XContentHelper.toXContent((UpdateRequest) request, bulkContentType, false).toBytesRef(); source = XContentHelper.toXContent((UpdateRequest) action, bulkContentType, false).toBytesRef();
} }
if (source != null) { if (source != null) {
@ -377,20 +351,22 @@ public final class Request {
content.write(separator); content.write(separator);
} }
} }
request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType));
HttpEntity entity = new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType); return request;
return new Request(HttpPost.METHOD_NAME, "/_bulk", parameters.getParams(), entity);
} }
static Request exists(GetRequest getRequest) { static Request exists(GetRequest getRequest) {
Request request = get(getRequest); return getStyleRequest(HttpHead.METHOD_NAME, getRequest);
return new Request(HttpHead.METHOD_NAME, request.endpoint, request.parameters, null);
} }
static Request get(GetRequest getRequest) { static Request get(GetRequest getRequest) {
String endpoint = endpoint(getRequest.index(), getRequest.type(), getRequest.id()); return getStyleRequest(HttpGet.METHOD_NAME, getRequest);
}
Params parameters = Params.builder(); private static Request getStyleRequest(String method, GetRequest getRequest) {
Request request = new Request(method, endpoint(getRequest.index(), getRequest.type(), getRequest.id()));
Params parameters = new Params(request);
parameters.withPreference(getRequest.preference()); parameters.withPreference(getRequest.preference());
parameters.withRouting(getRequest.routing()); parameters.withRouting(getRequest.routing());
parameters.withRefresh(getRequest.refresh()); parameters.withRefresh(getRequest.refresh());
@ -400,25 +376,28 @@ public final class Request {
parameters.withVersionType(getRequest.versionType()); parameters.withVersionType(getRequest.versionType());
parameters.withFetchSourceContext(getRequest.fetchSourceContext()); parameters.withFetchSourceContext(getRequest.fetchSourceContext());
return new Request(HttpGet.METHOD_NAME, endpoint, parameters.getParams(), null); return request;
} }
static Request multiGet(MultiGetRequest multiGetRequest) throws IOException { static Request multiGet(MultiGetRequest multiGetRequest) throws IOException {
Params parameters = Params.builder(); Request request = new Request(HttpPost.METHOD_NAME, "/_mget");
Params parameters = new Params(request);
parameters.withPreference(multiGetRequest.preference()); parameters.withPreference(multiGetRequest.preference());
parameters.withRealtime(multiGetRequest.realtime()); parameters.withRealtime(multiGetRequest.realtime());
parameters.withRefresh(multiGetRequest.refresh()); parameters.withRefresh(multiGetRequest.refresh());
HttpEntity entity = createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPost.METHOD_NAME, "/_mget", parameters.getParams(), entity); request.setEntity(createEntity(multiGetRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request index(IndexRequest indexRequest) { static Request index(IndexRequest indexRequest) {
String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME; String method = Strings.hasLength(indexRequest.id()) ? HttpPut.METHOD_NAME : HttpPost.METHOD_NAME;
boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE); boolean isCreate = (indexRequest.opType() == DocWriteRequest.OpType.CREATE);
String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null); String endpoint = endpoint(indexRequest.index(), indexRequest.type(), indexRequest.id(), isCreate ? "_create" : null);
Request request = new Request(method, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withRouting(indexRequest.routing()); parameters.withRouting(indexRequest.routing());
parameters.withTimeout(indexRequest.timeout()); parameters.withTimeout(indexRequest.timeout());
parameters.withVersion(indexRequest.version()); parameters.withVersion(indexRequest.version());
@ -429,19 +408,19 @@ public final class Request {
BytesRef source = indexRequest.source().toBytesRef(); BytesRef source = indexRequest.source().toBytesRef();
ContentType contentType = createContentType(indexRequest.getContentType()); ContentType contentType = createContentType(indexRequest.getContentType());
HttpEntity entity = new ByteArrayEntity(source.bytes, source.offset, source.length, contentType); request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType));
return request;
return new Request(method, endpoint, parameters.getParams(), entity);
} }
static Request ping() { static Request ping() {
return new Request(HttpHead.METHOD_NAME, "/", Collections.emptyMap(), null); return new Request(HttpHead.METHOD_NAME, "/");
} }
static Request update(UpdateRequest updateRequest) throws IOException { static Request update(UpdateRequest updateRequest) throws IOException {
String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update"); String endpoint = endpoint(updateRequest.index(), updateRequest.type(), updateRequest.id(), "_update");
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params parameters = Params.builder(); Params parameters = new Params(request);
parameters.withRouting(updateRequest.routing()); parameters.withRouting(updateRequest.routing());
parameters.withTimeout(updateRequest.timeout()); parameters.withTimeout(updateRequest.timeout());
parameters.withRefreshPolicy(updateRequest.getRefreshPolicy()); parameters.withRefreshPolicy(updateRequest.getRefreshPolicy());
@ -472,14 +451,14 @@ public final class Request {
if (xContentType == null) { if (xContentType == null) {
xContentType = Requests.INDEX_CONTENT_TYPE; xContentType = Requests.INDEX_CONTENT_TYPE;
} }
request.setEntity(createEntity(updateRequest, xContentType));
HttpEntity entity = createEntity(updateRequest, xContentType); return request;
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), entity);
} }
static Request search(SearchRequest searchRequest) throws IOException { static Request search(SearchRequest searchRequest) throws IOException {
String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search"); Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), "_search"));
Params params = Params.builder();
Params params = new Params(request);
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
params.withRouting(searchRequest.routing()); params.withRouting(searchRequest.routing());
params.withPreference(searchRequest.preference()); params.withPreference(searchRequest.preference());
@ -495,65 +474,73 @@ public final class Request {
if (searchRequest.scroll() != null) { if (searchRequest.scroll() != null) {
params.putParam("scroll", searchRequest.scroll().keepAlive()); params.putParam("scroll", searchRequest.scroll().keepAlive());
} }
HttpEntity entity = null;
if (searchRequest.source() != null) { if (searchRequest.source() != null) {
entity = createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE); request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE));
} }
return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity); return request;
} }
static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException {
HttpEntity entity = createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE); Request request = new Request(HttpPost.METHOD_NAME, "/_search/scroll");
return new Request(HttpPost.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); request.setEntity(createEntity(searchScrollRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException {
HttpEntity entity = createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE); Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll");
return new Request(HttpDelete.METHOD_NAME, "/_search/scroll", Collections.emptyMap(), entity); request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException { static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOException {
Params params = Params.builder(); Request request = new Request(HttpPost.METHOD_NAME, "/_msearch");
Params params = new Params(request);
params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true");
if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) { if (multiSearchRequest.maxConcurrentSearchRequests() != MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); params.putParam("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
} }
XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent();
byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent);
HttpEntity entity = new ByteArrayEntity(source, createContentType(xContent.type())); request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type())));
return new Request(HttpPost.METHOD_NAME, "/_msearch", params.getParams(), entity); return request;
} }
static Request existsAlias(GetAliasesRequest getAliasesRequest) { static Request existsAlias(GetAliasesRequest getAliasesRequest) {
Params params = Params.builder();
params.withIndicesOptions(getAliasesRequest.indicesOptions());
params.withLocal(getAliasesRequest.local());
if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) &&
(getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) {
throw new IllegalArgumentException("existsAlias requires at least an alias or an index"); throw new IllegalArgumentException("existsAlias requires at least an alias or an index");
} }
String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices(); String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices();
String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases(); String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases();
String endpoint = endpoint(indices, "_alias", aliases);
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); Request request = new Request(HttpHead.METHOD_NAME, endpoint(indices, "_alias", aliases));
Params params = new Params(request);
params.withIndicesOptions(getAliasesRequest.indicesOptions());
params.withLocal(getAliasesRequest.local());
return request;
} }
static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) { static Request fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest) {
Params params = Params.builder(); Request request = new Request(HttpGet.METHOD_NAME, endpoint(fieldCapabilitiesRequest.indices(), "_field_caps"));
Params params = new Params(request);
params.withFields(fieldCapabilitiesRequest.fields()); params.withFields(fieldCapabilitiesRequest.fields());
params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions()); params.withIndicesOptions(fieldCapabilitiesRequest.indicesOptions());
return request;
String[] indices = fieldCapabilitiesRequest.indices();
String endpoint = endpoint(indices, "_field_caps");
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), null);
} }
static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException { static Request rankEval(RankEvalRequest rankEvalRequest) throws IOException {
String endpoint = endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"); Request request = new Request(HttpGet.METHOD_NAME, endpoint(rankEvalRequest.indices(), Strings.EMPTY_ARRAY, "_rank_eval"));
Params params = Params.builder();
Params params = new Params(request);
params.withIndicesOptions(rankEvalRequest.indicesOptions()); params.withIndicesOptions(rankEvalRequest.indicesOptions());
HttpEntity entity = createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpGet.METHOD_NAME, endpoint, params.getParams(), entity); request.setEntity(createEntity(rankEvalRequest.getRankEvalSpec(), REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request split(ResizeRequest resizeRequest) throws IOException { static Request split(ResizeRequest resizeRequest) throws IOException {
@ -571,64 +558,76 @@ public final class Request {
} }
private static Request resize(ResizeRequest resizeRequest) throws IOException { private static Request resize(ResizeRequest resizeRequest) throws IOException {
Params params = Params.builder();
params.withTimeout(resizeRequest.timeout());
params.withMasterTimeout(resizeRequest.masterNodeTimeout());
params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards());
String endpoint = new EndpointBuilder().addPathPart(resizeRequest.getSourceIndex()) String endpoint = new EndpointBuilder().addPathPart(resizeRequest.getSourceIndex())
.addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT)) .addPathPartAsIs("_" + resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT))
.addPathPart(resizeRequest.getTargetIndexRequest().index()).build(); .addPathPart(resizeRequest.getTargetIndexRequest().index()).build();
HttpEntity entity = createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE); Request request = new Request(HttpPut.METHOD_NAME, endpoint);
return new Request(HttpPut.METHOD_NAME, endpoint, params.getParams(), entity);
Params params = new Params(request);
params.withTimeout(resizeRequest.timeout());
params.withMasterTimeout(resizeRequest.masterNodeTimeout());
params.withWaitForActiveShards(resizeRequest.getTargetIndexRequest().waitForActiveShards());
request.setEntity(createEntity(resizeRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException { static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSettingsRequest) throws IOException {
Params parameters = Params.builder(); Request request = new Request(HttpPut.METHOD_NAME, "/_cluster/settings");
Params parameters = new Params(request);
parameters.withTimeout(clusterUpdateSettingsRequest.timeout()); parameters.withTimeout(clusterUpdateSettingsRequest.timeout());
parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout()); parameters.withMasterTimeout(clusterUpdateSettingsRequest.masterNodeTimeout());
HttpEntity entity = createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPut.METHOD_NAME, "/_cluster/settings", parameters.getParams(), entity); request.setEntity(createEntity(clusterUpdateSettingsRequest, REQUEST_BODY_CONTENT_TYPE));
return request;
} }
static Request rollover(RolloverRequest rolloverRequest) throws IOException { static Request rollover(RolloverRequest rolloverRequest) throws IOException {
Params params = Params.builder(); String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
.addPathPart(rolloverRequest.getNewIndexName()).build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params params = new Params(request);
params.withTimeout(rolloverRequest.timeout()); params.withTimeout(rolloverRequest.timeout());
params.withMasterTimeout(rolloverRequest.masterNodeTimeout()); params.withMasterTimeout(rolloverRequest.masterNodeTimeout());
params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards()); params.withWaitForActiveShards(rolloverRequest.getCreateIndexRequest().waitForActiveShards());
if (rolloverRequest.isDryRun()) { if (rolloverRequest.isDryRun()) {
params.putParam("dry_run", Boolean.TRUE.toString()); params.putParam("dry_run", Boolean.TRUE.toString());
} }
String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover")
.addPathPart(rolloverRequest.getNewIndexName()).build(); request.setEntity(createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE));
HttpEntity entity = createEntity(rolloverRequest, REQUEST_BODY_CONTENT_TYPE); return request;
return new Request(HttpPost.METHOD_NAME, endpoint, params.getParams(), entity);
} }
static Request indicesExist(GetIndexRequest request) { static Request indicesExist(GetIndexRequest getIndexRequest) {
// this can be called with no indices as argument by transport client, not via REST though // this can be called with no indices as argument by transport client, not via REST though
if (request.indices() == null || request.indices().length == 0) { if (getIndexRequest.indices() == null || getIndexRequest.indices().length == 0) {
throw new IllegalArgumentException("indices are mandatory"); throw new IllegalArgumentException("indices are mandatory");
} }
String endpoint = endpoint(request.indices(), ""); String endpoint = endpoint(getIndexRequest.indices(), "");
Params params = Params.builder(); Request request = new Request(HttpHead.METHOD_NAME, endpoint);
params.withLocal(request.local());
params.withHuman(request.humanReadable()); Params params = new Params(request);
params.withIndicesOptions(request.indicesOptions()); params.withLocal(getIndexRequest.local());
params.withIncludeDefaults(request.includeDefaults()); params.withHuman(getIndexRequest.humanReadable());
return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); params.withIndicesOptions(getIndexRequest.indicesOptions());
params.withIncludeDefaults(getIndexRequest.includeDefaults());
return request;
} }
static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException {
Params parameters = Params.builder(); String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices();
Request request = new Request(HttpPut.METHOD_NAME, endpoint(indices, "_settings"));
Params parameters = new Params(request);
parameters.withTimeout(updateSettingsRequest.timeout()); parameters.withTimeout(updateSettingsRequest.timeout());
parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout());
parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); parameters.withIndicesOptions(updateSettingsRequest.indicesOptions());
parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting());
String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); request.setEntity(createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE));
String endpoint = endpoint(indices, "_settings"); return request;
HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
} }
private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException {
@ -678,19 +677,19 @@ public final class Request {
} }
/** /**
* Utility class to build request's parameters map and centralize all parameter names. * Utility class to help with common parameter names and patterns. Wraps
* a {@link Request} and adds the parameters to it directly.
*/ */
static class Params { static class Params {
private final Map<String, String> params = new HashMap<>(); private final Request request;
private Params() { Params(Request request) {
this.request = request;
} }
Params putParam(String key, String value) { Params putParam(String name, String value) {
if (Strings.hasLength(value)) { if (Strings.hasLength(value)) {
if (params.putIfAbsent(key, value) != null) { request.addParameter(name, value);
throw new IllegalArgumentException("Request parameter [" + key + "] is already registered");
}
} }
return this; return this;
} }
@ -854,14 +853,6 @@ public final class Request {
} }
return this; return this;
} }
Map<String, String> getParams() {
return Collections.unmodifiableMap(params);
}
static Params builder() {
return new Params();
}
} }
/** /**

View File

@ -258,7 +258,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
*/ */
public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -267,14 +267,14 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
*/ */
public final void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) { public final void bulkAsync(BulkRequest bulkRequest, ActionListener<BulkResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); performRequestAsyncAndParseEntity(bulkRequest, RequestConverters::bulk, BulkResponse::fromXContent, listener, emptySet(), headers);
} }
/** /**
* Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise
*/ */
public final boolean ping(Header... headers) throws IOException { public final boolean ping(Header... headers) throws IOException {
return performRequest(new MainRequest(), (request) -> Request.ping(), RestHighLevelClient::convertExistsResponse, return performRequest(new MainRequest(), (request) -> RequestConverters.ping(), RestHighLevelClient::convertExistsResponse,
emptySet(), headers); emptySet(), headers);
} }
@ -282,8 +282,8 @@ public class RestHighLevelClient implements Closeable {
* Get the cluster info otherwise provided when sending an HTTP request to port 9200 * Get the cluster info otherwise provided when sending an HTTP request to port 9200
*/ */
public final MainResponse info(Header... headers) throws IOException { public final MainResponse info(Header... headers) throws IOException {
return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(), return performRequestAndParseEntity(new MainRequest(), (request) -> RequestConverters.info(),
headers); MainResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -292,7 +292,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
*/ */
public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException { public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, singleton(404), headers); return performRequestAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, singleton(404), headers);
} }
/** /**
@ -301,7 +301,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
*/ */
public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) { public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers); performRequestAsyncAndParseEntity(getRequest, RequestConverters::get, GetResponse::fromXContent, listener,
singleton(404), headers);
} }
/** /**
@ -310,7 +311,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
*/ */
public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException { public final MultiGetResponse multiGet(MultiGetRequest multiGetRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, singleton(404), headers); return performRequestAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent,
singleton(404), headers);
} }
/** /**
@ -319,7 +321,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html">Multi Get API on elastic.co</a>
*/ */
public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener<MultiGetResponse> listener, Header... headers) { public final void multiGetAsync(MultiGetRequest multiGetRequest, ActionListener<MultiGetResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(multiGetRequest, Request::multiGet, MultiGetResponse::fromXContent, listener, performRequestAsyncAndParseEntity(multiGetRequest, RequestConverters::multiGet, MultiGetResponse::fromXContent, listener,
singleton(404), headers); singleton(404), headers);
} }
@ -329,7 +331,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
*/ */
public final boolean exists(GetRequest getRequest, Header... headers) throws IOException { public final boolean exists(GetRequest getRequest, Header... headers) throws IOException {
return performRequest(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); return performRequest(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers);
} }
/** /**
@ -338,7 +340,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
*/ */
public final void existsAsync(GetRequest getRequest, ActionListener<Boolean> listener, Header... headers) { public final void existsAsync(GetRequest getRequest, ActionListener<Boolean> listener, Header... headers) {
performRequestAsync(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); performRequestAsync(getRequest, RequestConverters::exists, RestHighLevelClient::convertExistsResponse, listener,
emptySet(), headers);
} }
/** /**
@ -347,7 +350,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
*/ */
public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -356,7 +359,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html">Index API on elastic.co</a>
*/ */
public final void indexAsync(IndexRequest indexRequest, ActionListener<IndexResponse> listener, Header... headers) { public final void indexAsync(IndexRequest indexRequest, ActionListener<IndexResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers); performRequestAsyncAndParseEntity(indexRequest, RequestConverters::index, IndexResponse::fromXContent, listener,
emptySet(), headers);
} }
/** /**
@ -365,7 +369,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
*/ */
public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -374,7 +378,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html">Update API on elastic.co</a>
*/ */
public final void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) { public final void updateAsync(UpdateRequest updateRequest, ActionListener<UpdateResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers); performRequestAsyncAndParseEntity(updateRequest, RequestConverters::update, UpdateResponse::fromXContent, listener,
emptySet(), headers);
} }
/** /**
@ -383,8 +388,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
*/ */
public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404), return performRequestAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent,
headers); singleton(404), headers);
} }
/** /**
@ -393,7 +398,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html">Delete API on elastic.co</a>
*/ */
public final void deleteAsync(DeleteRequest deleteRequest, ActionListener<DeleteResponse> listener, Header... headers) { public final void deleteAsync(DeleteRequest deleteRequest, ActionListener<DeleteResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener, performRequestAsyncAndParseEntity(deleteRequest, RequestConverters::delete, DeleteResponse::fromXContent, listener,
Collections.singleton(404), headers); Collections.singleton(404), headers);
} }
@ -403,7 +408,7 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
*/ */
public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, emptySet(), headers);
} }
/** /**
@ -412,7 +417,8 @@ public class RestHighLevelClient implements Closeable {
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a> * See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html">Search API on elastic.co</a>
*/ */
public final void searchAsync(SearchRequest searchRequest, ActionListener<SearchResponse> listener, Header... headers) { public final void searchAsync(SearchRequest searchRequest, ActionListener<SearchResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers); performRequestAsyncAndParseEntity(searchRequest, RequestConverters::search, SearchResponse::fromXContent, listener,
emptySet(), headers);
} }
/** /**
@ -422,7 +428,7 @@ public class RestHighLevelClient implements Closeable {
* elastic.co</a> * elastic.co</a>
*/ */
public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException { public final MultiSearchResponse multiSearch(MultiSearchRequest multiSearchRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(multiSearchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, return performRequestAndParseEntity(multiSearchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext,
emptySet(), headers); emptySet(), headers);
} }
@ -433,7 +439,7 @@ public class RestHighLevelClient implements Closeable {
* elastic.co</a> * elastic.co</a>
*/ */
public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) { public final void multiSearchAsync(MultiSearchRequest searchRequest, ActionListener<MultiSearchResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(searchRequest, Request::multiSearch, MultiSearchResponse::fromXContext, listener, performRequestAsyncAndParseEntity(searchRequest, RequestConverters::multiSearch, MultiSearchResponse::fromXContext, listener,
emptySet(), headers); emptySet(), headers);
} }
@ -444,7 +450,8 @@ public class RestHighLevelClient implements Closeable {
* API on elastic.co</a> * API on elastic.co</a>
*/ */
public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
emptySet(), headers);
} }
/** /**
@ -455,7 +462,7 @@ public class RestHighLevelClient implements Closeable {
*/ */
public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, public final void searchScrollAsync(SearchScrollRequest searchScrollRequest,
ActionListener<SearchResponse> listener, Header... headers) { ActionListener<SearchResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, performRequestAsyncAndParseEntity(searchScrollRequest, RequestConverters::searchScroll, SearchResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -466,7 +473,7 @@ public class RestHighLevelClient implements Closeable {
* Clear Scroll API on elastic.co</a> * Clear Scroll API on elastic.co</a>
*/ */
public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, return performRequestAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
emptySet(), headers); emptySet(), headers);
} }
@ -478,7 +485,7 @@ public class RestHighLevelClient implements Closeable {
*/ */
public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, public final void clearScrollAsync(ClearScrollRequest clearScrollRequest,
ActionListener<ClearScrollResponse> listener, Header... headers) { ActionListener<ClearScrollResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, performRequestAsyncAndParseEntity(clearScrollRequest, RequestConverters::clearScroll, ClearScrollResponse::fromXContent,
listener, emptySet(), headers); listener, emptySet(), headers);
} }
@ -489,7 +496,8 @@ public class RestHighLevelClient implements Closeable {
* on elastic.co</a> * on elastic.co</a>
*/ */
public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException { public final RankEvalResponse rankEval(RankEvalRequest rankEvalRequest, Header... headers) throws IOException {
return performRequestAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, emptySet(), headers); return performRequestAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent,
emptySet(), headers);
} }
/** /**
@ -499,8 +507,8 @@ public class RestHighLevelClient implements Closeable {
* on elastic.co</a> * on elastic.co</a>
*/ */
public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener<RankEvalResponse> listener, Header... headers) { public final void rankEvalAsync(RankEvalRequest rankEvalRequest, ActionListener<RankEvalResponse> listener, Header... headers) {
performRequestAsyncAndParseEntity(rankEvalRequest, Request::rankEval, RankEvalResponse::fromXContent, listener, emptySet(), performRequestAsyncAndParseEntity(rankEvalRequest, RequestConverters::rankEval, RankEvalResponse::fromXContent, listener,
headers); emptySet(), headers);
} }
/** /**
@ -511,7 +519,7 @@ public class RestHighLevelClient implements Closeable {
*/ */
public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest, public final FieldCapabilitiesResponse fieldCaps(FieldCapabilitiesRequest fieldCapabilitiesRequest,
Header... headers) throws IOException { Header... headers) throws IOException {
return performRequestAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, return performRequestAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps,
FieldCapabilitiesResponse::fromXContent, emptySet(), headers); FieldCapabilitiesResponse::fromXContent, emptySet(), headers);
} }
@ -524,7 +532,7 @@ public class RestHighLevelClient implements Closeable {
public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest, public final void fieldCapsAsync(FieldCapabilitiesRequest fieldCapabilitiesRequest,
ActionListener<FieldCapabilitiesResponse> listener, ActionListener<FieldCapabilitiesResponse> listener,
Header... headers) { Header... headers) {
performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, Request::fieldCaps, performRequestAsyncAndParseEntity(fieldCapabilitiesRequest, RequestConverters::fieldCaps,
FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers); FieldCapabilitiesResponse::fromXContent, listener, emptySet(), headers);
} }
@ -544,9 +552,10 @@ public class RestHighLevelClient implements Closeable {
throw validationException; throw validationException;
} }
Request req = requestConverter.apply(request); Request req = requestConverter.apply(request);
req.setHeaders(headers);
Response response; Response response;
try { try {
response = client.performRequest(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), headers); response = client.performRequest(req);
} catch (ResponseException e) { } catch (ResponseException e) {
if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) {
try { try {
@ -593,9 +602,10 @@ public class RestHighLevelClient implements Closeable {
listener.onFailure(e); listener.onFailure(e);
return; return;
} }
req.setHeaders(headers);
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores); ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
client.performRequestAsync(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), responseListener, headers); client.performRequestAsync(req, responseListener);
} }
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter, final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,

View File

@ -20,7 +20,6 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.ProtocolVersion; import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine; import org.apache.http.RequestLine;
@ -52,14 +51,9 @@ import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyMapOf;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyVararg;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when; import static org.mockito.Mockito.when;
@ -79,14 +73,15 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
final RestClient restClient = mock(RestClient.class); final RestClient restClient = mock(RestClient.class);
restHighLevelClient = new CustomRestClient(restClient); restHighLevelClient = new CustomRestClient(restClient);
doAnswer(mock -> mockPerformRequest((Header) mock.getArguments()[4])) doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0]))
.when(restClient) .when(restClient)
.performRequest(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), anyObject(), anyVararg()); .performRequest(any(Request.class));
doAnswer(mock -> mockPerformRequestAsync((Header) mock.getArguments()[5], (ResponseListener) mock.getArguments()[4])) doAnswer(inv -> mockPerformRequestAsync(
((Request) inv.getArguments()[0]).getHeaders()[0],
(ResponseListener) inv.getArguments()[1]))
.when(restClient) .when(restClient)
.performRequestAsync(eq(HttpGet.METHOD_NAME), eq(ENDPOINT), anyMapOf(String.class, String.class), .performRequestAsync(any(Request.class), any(ResponseListener.class));
any(HttpEntity.class), any(ResponseListener.class), anyVararg());
} }
} }
@ -193,7 +188,7 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
} }
Request toRequest(MainRequest mainRequest) throws IOException { Request toRequest(MainRequest mainRequest) throws IOException {
return new Request(HttpGet.METHOD_NAME, ENDPOINT, emptyMap(), null); return new Request(HttpGet.METHOD_NAME, ENDPOINT);
} }
MainResponse toResponse(Response response) throws IOException { MainResponse toResponse(Response response) throws IOException {

View File

@ -82,6 +82,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.client.RequestConverters.EndpointBuilder;
import org.elasticsearch.client.RequestConverters.Params;
import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.index.RandomCreateIndexGenerator;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder;
@ -124,8 +126,8 @@ import java.util.function.Function;
import java.util.function.Supplier; import java.util.function.Supplier;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE; import static org.elasticsearch.client.RequestConverters.REQUEST_BODY_CONTENT_TYPE;
import static org.elasticsearch.client.Request.enforceSameContentType; import static org.elasticsearch.client.RequestConverters.enforceSameContentType;
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomAliases;
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomCreateIndexRequest;
import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings; import static org.elasticsearch.index.RandomCreateIndexGenerator.randomIndexSettings;
@ -137,40 +139,9 @@ import static org.hamcrest.Matchers.hasEntry;
import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
public class RequestTests extends ESTestCase { public class RequestConvertersTests extends ESTestCase {
public void testConstructor() {
final String method = randomFrom("GET", "PUT", "POST", "HEAD", "DELETE");
final String endpoint = randomAlphaOfLengthBetween(1, 10);
final Map<String, String> parameters = singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
final HttpEntity entity = randomBoolean() ? new StringEntity(randomAlphaOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null;
NullPointerException e = expectThrows(NullPointerException.class, () -> new Request(null, endpoint, parameters, entity));
assertEquals("method cannot be null", e.getMessage());
e = expectThrows(NullPointerException.class, () -> new Request(method, null, parameters, entity));
assertEquals("endpoint cannot be null", e.getMessage());
e = expectThrows(NullPointerException.class, () -> new Request(method, endpoint, null, entity));
assertEquals("parameters cannot be null", e.getMessage());
final Request request = new Request(method, endpoint, parameters, entity);
assertEquals(method, request.getMethod());
assertEquals(endpoint, request.getEndpoint());
assertEquals(parameters, request.getParameters());
assertEquals(entity, request.getEntity());
final Constructor<?>[] constructors = Request.class.getConstructors();
assertEquals("Expected only 1 constructor", 1, constructors.length);
assertTrue("Request constructor is not public", Modifier.isPublic(constructors[0].getModifiers()));
}
public void testClassVisibility() {
assertTrue("Request class is not public", Modifier.isPublic(Request.class.getModifiers()));
}
public void testPing() { public void testPing() {
Request request = Request.ping(); Request request = RequestConverters.ping();
assertEquals("/", request.getEndpoint()); assertEquals("/", request.getEndpoint());
assertEquals(0, request.getParameters().size()); assertEquals(0, request.getParameters().size());
assertNull(request.getEntity()); assertNull(request.getEntity());
@ -178,7 +149,7 @@ public class RequestTests extends ESTestCase {
} }
public void testInfo() { public void testInfo() {
Request request = Request.info(); Request request = RequestConverters.info();
assertEquals("/", request.getEndpoint()); assertEquals("/", request.getEndpoint());
assertEquals(0, request.getParameters().size()); assertEquals(0, request.getParameters().size());
assertNull(request.getEntity()); assertNull(request.getEntity());
@ -186,7 +157,7 @@ public class RequestTests extends ESTestCase {
} }
public void testGet() { public void testGet() {
getAndExistsTest(Request::get, HttpGet.METHOD_NAME); getAndExistsTest(RequestConverters::get, HttpGet.METHOD_NAME);
} }
public void testMultiGet() throws IOException { public void testMultiGet() throws IOException {
@ -232,7 +203,7 @@ public class RequestTests extends ESTestCase {
multiGetRequest.add(item); multiGetRequest.add(item);
} }
Request request = Request.multiGet(multiGetRequest); Request request = RequestConverters.multiGet(multiGetRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_mget", request.getEndpoint()); assertEquals("/_mget", request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
@ -260,7 +231,7 @@ public class RequestTests extends ESTestCase {
} }
} }
Request request = Request.delete(deleteRequest); Request request = RequestConverters.delete(deleteRequest);
assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint()); assertEquals("/" + index + "/" + type + "/" + id, request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
@ -268,7 +239,7 @@ public class RequestTests extends ESTestCase {
} }
public void testExists() { public void testExists() {
getAndExistsTest(Request::exists, HttpHead.METHOD_NAME); getAndExistsTest(RequestConverters::exists, HttpHead.METHOD_NAME);
} }
public void testIndicesExist() { public void testIndicesExist() {
@ -282,7 +253,7 @@ public class RequestTests extends ESTestCase {
setRandomHumanReadable(getIndexRequest, expectedParams); setRandomHumanReadable(getIndexRequest, expectedParams);
setRandomIncludeDefaults(getIndexRequest, expectedParams); setRandomIncludeDefaults(getIndexRequest, expectedParams);
final Request request = Request.indicesExist(getIndexRequest); final Request request = RequestConverters.indicesExist(getIndexRequest);
assertEquals(HttpHead.METHOD_NAME, request.getMethod()); assertEquals(HttpHead.METHOD_NAME, request.getMethod());
assertEquals("/" + String.join(",", indices), request.getEndpoint()); assertEquals("/" + String.join(",", indices), request.getEndpoint());
@ -291,8 +262,8 @@ public class RequestTests extends ESTestCase {
} }
public void testIndicesExistEmptyIndices() { public void testIndicesExistEmptyIndices() {
expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest())); expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest()));
expectThrows(IllegalArgumentException.class, () -> Request.indicesExist(new GetIndexRequest().indices((String[])null))); expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null)));
} }
private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) { private static void getAndExistsTest(Function<GetRequest, Request> requestConverter, String method) {
@ -361,7 +332,7 @@ public class RequestTests extends ESTestCase {
setRandomMasterTimeout(createIndexRequest, expectedParams); setRandomMasterTimeout(createIndexRequest, expectedParams);
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
Request request = Request.createIndex(createIndexRequest); Request request = RequestConverters.createIndex(createIndexRequest);
assertEquals("/" + createIndexRequest.index(), request.getEndpoint()); assertEquals("/" + createIndexRequest.index(), request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertEquals(HttpPut.METHOD_NAME, request.getMethod()); assertEquals(HttpPut.METHOD_NAME, request.getMethod());
@ -382,7 +353,7 @@ public class RequestTests extends ESTestCase {
setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomTimeout(indicesAliasesRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
setRandomMasterTimeout(indicesAliasesRequest, expectedParams); setRandomMasterTimeout(indicesAliasesRequest, expectedParams);
Request request = Request.updateAliases(indicesAliasesRequest); Request request = RequestConverters.updateAliases(indicesAliasesRequest);
assertEquals("/_aliases", request.getEndpoint()); assertEquals("/_aliases", request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertToXContentBody(indicesAliasesRequest, request.getEntity()); assertToXContentBody(indicesAliasesRequest, request.getEntity());
@ -402,7 +373,7 @@ public class RequestTests extends ESTestCase {
setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomTimeout(putMappingRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
setRandomMasterTimeout(putMappingRequest, expectedParams); setRandomMasterTimeout(putMappingRequest, expectedParams);
Request request = Request.putMapping(putMappingRequest); Request request = RequestConverters.putMapping(putMappingRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
String index = String.join(",", indices); String index = String.join(",", indices);
if (Strings.hasLength(index)) { if (Strings.hasLength(index)) {
@ -427,7 +398,7 @@ public class RequestTests extends ESTestCase {
setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams); setRandomIndicesOptions(deleteIndexRequest::indicesOptions, deleteIndexRequest::indicesOptions, expectedParams);
Request request = Request.deleteIndex(deleteIndexRequest); Request request = RequestConverters.deleteIndex(deleteIndexRequest);
assertEquals("/" + String.join(",", indices), request.getEndpoint()); assertEquals("/" + String.join(",", indices), request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
@ -451,7 +422,7 @@ public class RequestTests extends ESTestCase {
setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams); setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams);
setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams); setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams);
Request request = Request.openIndex(openIndexRequest); Request request = RequestConverters.openIndex(openIndexRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open"); StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open");
assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
assertThat(expectedParams, equalTo(request.getParameters())); assertThat(expectedParams, equalTo(request.getParameters()));
@ -474,7 +445,7 @@ public class RequestTests extends ESTestCase {
setRandomMasterTimeout(closeIndexRequest, expectedParams); setRandomMasterTimeout(closeIndexRequest, expectedParams);
setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams); setRandomIndicesOptions(closeIndexRequest::indicesOptions, closeIndexRequest::indicesOptions, expectedParams);
Request request = Request.closeIndex(closeIndexRequest); Request request = RequestConverters.closeIndex(closeIndexRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close"); StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_close");
assertThat(endpoint.toString(), equalTo(request.getEndpoint())); assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
assertThat(expectedParams, equalTo(request.getParameters())); assertThat(expectedParams, equalTo(request.getParameters()));
@ -542,7 +513,7 @@ public class RequestTests extends ESTestCase {
indexRequest.source(builder); indexRequest.source(builder);
} }
Request request = Request.index(indexRequest); Request request = RequestConverters.index(indexRequest);
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) { if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.getEndpoint()); assertEquals("/" + index + "/" + type + "/" + id + "/_create", request.getEndpoint());
} else if (id != null) { } else if (id != null) {
@ -572,7 +543,7 @@ public class RequestTests extends ESTestCase {
} }
Map<String, String> expectedParams = new HashMap<>(); Map<String, String> expectedParams = new HashMap<>();
setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams); setRandomIndicesOptions(refreshRequest::indicesOptions, refreshRequest::indicesOptions, expectedParams);
Request request = Request.refresh(refreshRequest); Request request = RequestConverters.refresh(refreshRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices)); endpoint.add(String.join(",", indices));
@ -604,7 +575,7 @@ public class RequestTests extends ESTestCase {
} }
expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing())); expectedParams.put("wait_if_ongoing", Boolean.toString(flushRequest.waitIfOngoing()));
Request request = Request.flush(flushRequest); Request request = RequestConverters.flush(flushRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices)); endpoint.add(String.join(",", indices));
@ -641,7 +612,7 @@ public class RequestTests extends ESTestCase {
} }
expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush())); expectedParams.put("flush", Boolean.toString(forceMergeRequest.flush()));
Request request = Request.forceMerge(forceMergeRequest); Request request = RequestConverters.forceMerge(forceMergeRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices)); endpoint.add(String.join(",", indices));
@ -681,7 +652,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields())); expectedParams.put("fields", String.join(",", clearIndicesCacheRequest.fields()));
} }
Request request = Request.clearCache(clearIndicesCacheRequest); Request request = RequestConverters.clearCache(clearIndicesCacheRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices)); endpoint.add(String.join(",", indices));
@ -754,7 +725,7 @@ public class RequestTests extends ESTestCase {
randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams); randomizeFetchSourceContextParams(updateRequest::fetchSource, expectedParams);
} }
Request request = Request.update(updateRequest); Request request = RequestConverters.update(updateRequest);
assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint()); assertEquals("/" + index + "/" + type + "/" + id + "/_update", request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
@ -791,7 +762,7 @@ public class RequestTests extends ESTestCase {
UpdateRequest updateRequest = new UpdateRequest(); UpdateRequest updateRequest = new UpdateRequest();
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON)); updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML)); updateRequest.upsert(new IndexRequest().source(singletonMap("field", "upsert"), XContentType.YAML));
Request.update(updateRequest); RequestConverters.update(updateRequest);
}); });
assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents", assertEquals("Update request cannot have different content types for doc [JSON] and upsert [YAML] documents",
exception.getMessage()); exception.getMessage());
@ -859,7 +830,7 @@ public class RequestTests extends ESTestCase {
bulkRequest.add(docWriteRequest); bulkRequest.add(docWriteRequest);
} }
Request request = Request.bulk(bulkRequest); Request request = RequestConverters.bulk(bulkRequest);
assertEquals("/_bulk", request.getEndpoint()); assertEquals("/_bulk", request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
@ -914,7 +885,7 @@ public class RequestTests extends ESTestCase {
bulkRequest.add(new UpdateRequest("index", "type", "1").script(mockScript("test"))); bulkRequest.add(new UpdateRequest("index", "type", "1").script(mockScript("test")));
bulkRequest.add(new DeleteRequest("index", "type", "2")); bulkRequest.add(new DeleteRequest("index", "type", "2"));
Request request = Request.bulk(bulkRequest); Request request = RequestConverters.bulk(bulkRequest);
assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
} }
{ {
@ -924,7 +895,7 @@ public class RequestTests extends ESTestCase {
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType)); bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), xContentType));
bulkRequest.add(new DeleteRequest("index", "type", "2")); bulkRequest.add(new DeleteRequest("index", "type", "2"));
Request request = Request.bulk(bulkRequest); Request request = RequestConverters.bulk(bulkRequest);
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
} }
{ {
@ -936,14 +907,14 @@ public class RequestTests extends ESTestCase {
updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType)); updateRequest.upsert(new IndexRequest().source(singletonMap("field", "value"), xContentType));
} }
Request request = Request.bulk(new BulkRequest().add(updateRequest)); Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest));
assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue());
} }
{ {
BulkRequest bulkRequest = new BulkRequest(); BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [JSON], " + assertEquals("Mismatching content-type found for request with content-type [JSON], " +
"previous requests have content-type [SMILE]", exception.getMessage()); "previous requests have content-type [SMILE]", exception.getMessage());
} }
@ -957,7 +928,7 @@ public class RequestTests extends ESTestCase {
.doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON))
.upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))
); );
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
assertEquals("Mismatching content-type found for request with content-type [SMILE], " + assertEquals("Mismatching content-type found for request with content-type [SMILE], " +
"previous requests have content-type [JSON]", exception.getMessage()); "previous requests have content-type [JSON]", exception.getMessage());
} }
@ -970,7 +941,7 @@ public class RequestTests extends ESTestCase {
bulkRequest.add(new DeleteRequest("index", "type", "3")); bulkRequest.add(new DeleteRequest("index", "type", "3"));
bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new IndexRequest("index", "type", "4").source(singletonMap("field", "value"), XContentType.JSON));
bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), xContentType));
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> Request.bulk(bulkRequest)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest));
assertEquals("Unsupported content-type found for request with content-type [" + xContentType assertEquals("Unsupported content-type found for request with content-type [" + xContentType
+ "], only JSON and SMILE are supported", exception.getMessage()); + "], only JSON and SMILE are supported", exception.getMessage());
} }
@ -978,7 +949,7 @@ public class RequestTests extends ESTestCase {
public void testSearchNullSource() throws IOException { public void testSearchNullSource() throws IOException {
SearchRequest searchRequest = new SearchRequest(); SearchRequest searchRequest = new SearchRequest();
Request request = Request.search(searchRequest); Request request = RequestConverters.search(searchRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_search", request.getEndpoint()); assertEquals("/_search", request.getEndpoint());
assertNull(request.getEntity()); assertNull(request.getEntity());
@ -1073,7 +1044,7 @@ public class RequestTests extends ESTestCase {
searchRequest.source(searchSourceBuilder); searchRequest.source(searchSourceBuilder);
} }
Request request = Request.search(searchRequest); Request request = RequestConverters.search(searchRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
String index = String.join(",", indices); String index = String.join(",", indices);
if (Strings.hasLength(index)) { if (Strings.hasLength(index)) {
@ -1127,7 +1098,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests())); expectedParams.put("max_concurrent_searches", Integer.toString(multiSearchRequest.maxConcurrentSearchRequests()));
} }
Request request = Request.multiSearch(multiSearchRequest); Request request = RequestConverters.multiSearch(multiSearchRequest);
assertEquals("/_msearch", request.getEndpoint()); assertEquals("/_msearch", request.getEndpoint());
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());
@ -1152,7 +1123,7 @@ public class RequestTests extends ESTestCase {
if (randomBoolean()) { if (randomBoolean()) {
searchScrollRequest.scroll(randomPositiveTimeValue()); searchScrollRequest.scroll(randomPositiveTimeValue());
} }
Request request = Request.searchScroll(searchScrollRequest); Request request = RequestConverters.searchScroll(searchScrollRequest);
assertEquals(HttpPost.METHOD_NAME, request.getMethod()); assertEquals(HttpPost.METHOD_NAME, request.getMethod());
assertEquals("/_search/scroll", request.getEndpoint()); assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size()); assertEquals(0, request.getParameters().size());
@ -1166,7 +1137,7 @@ public class RequestTests extends ESTestCase {
for (int i = 0; i < numScrolls; i++) { for (int i = 0; i < numScrolls; i++) {
clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10)); clearScrollRequest.addScrollId(randomAlphaOfLengthBetween(5, 10));
} }
Request request = Request.clearScroll(clearScrollRequest); Request request = RequestConverters.clearScroll(clearScrollRequest);
assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(HttpDelete.METHOD_NAME, request.getMethod());
assertEquals("/_search/scroll", request.getEndpoint()); assertEquals("/_search/scroll", request.getEndpoint());
assertEquals(0, request.getParameters().size()); assertEquals(0, request.getParameters().size());
@ -1191,7 +1162,7 @@ public class RequestTests extends ESTestCase {
setRandomLocal(getAliasesRequest, expectedParams); setRandomLocal(getAliasesRequest, expectedParams);
setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams); setRandomIndicesOptions(getAliasesRequest::indicesOptions, getAliasesRequest::indicesOptions, expectedParams);
Request request = Request.existsAlias(getAliasesRequest); Request request = RequestConverters.existsAlias(getAliasesRequest);
StringJoiner expectedEndpoint = new StringJoiner("/", "/", ""); StringJoiner expectedEndpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
expectedEndpoint.add(String.join(",", indices)); expectedEndpoint.add(String.join(",", indices));
@ -1209,13 +1180,15 @@ public class RequestTests extends ESTestCase {
public void testExistsAliasNoAliasNoIndex() { public void testExistsAliasNoAliasNoIndex() {
{ {
GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); GetAliasesRequest getAliasesRequest = new GetAliasesRequest();
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
RequestConverters.existsAlias(getAliasesRequest));
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
} }
{ {
GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null);
getAliasesRequest.indices((String[])null); getAliasesRequest.indices((String[])null);
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.existsAlias(getAliasesRequest)); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
RequestConverters.existsAlias(getAliasesRequest));
assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage());
} }
} }
@ -1234,7 +1207,7 @@ public class RequestTests extends ESTestCase {
fieldCapabilitiesRequest::indicesOptions, fieldCapabilitiesRequest::indicesOptions,
indicesOptionsParams); indicesOptionsParams);
Request request = Request.fieldCaps(fieldCapabilitiesRequest); Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest);
// Verify that the resulting REST request looks as expected. // Verify that the resulting REST request looks as expected.
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
@ -1270,7 +1243,7 @@ public class RequestTests extends ESTestCase {
Map<String, String> expectedParams = new HashMap<>(); Map<String, String> expectedParams = new HashMap<>();
setRandomIndicesOptions(rankEvalRequest::indicesOptions, rankEvalRequest::indicesOptions, expectedParams); setRandomIndicesOptions(rankEvalRequest::indicesOptions, rankEvalRequest::indicesOptions, expectedParams);
Request request = Request.rankEval(rankEvalRequest); Request request = RequestConverters.rankEval(rankEvalRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
String index = String.join(",", indices); String index = String.join(",", indices);
if (Strings.hasLength(index)) { if (Strings.hasLength(index)) {
@ -1284,25 +1257,25 @@ public class RequestTests extends ESTestCase {
} }
public void testSplit() throws IOException { public void testSplit() throws IOException {
resizeTest(ResizeType.SPLIT, Request::split); resizeTest(ResizeType.SPLIT, RequestConverters::split);
} }
public void testSplitWrongResizeType() { public void testSplitWrongResizeType() {
ResizeRequest resizeRequest = new ResizeRequest("target", "source"); ResizeRequest resizeRequest = new ResizeRequest("target", "source");
resizeRequest.setResizeType(ResizeType.SHRINK); resizeRequest.setResizeType(ResizeType.SHRINK);
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.split(resizeRequest)); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.split(resizeRequest));
assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage()); assertEquals("Wrong resize type [SHRINK] for indices split request", iae.getMessage());
} }
public void testShrinkWrongResizeType() { public void testShrinkWrongResizeType() {
ResizeRequest resizeRequest = new ResizeRequest("target", "source"); ResizeRequest resizeRequest = new ResizeRequest("target", "source");
resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.setResizeType(ResizeType.SPLIT);
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Request.shrink(resizeRequest)); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> RequestConverters.shrink(resizeRequest));
assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage()); assertEquals("Wrong resize type [SPLIT] for indices shrink request", iae.getMessage());
} }
public void testShrink() throws IOException { public void testShrink() throws IOException {
resizeTest(ResizeType.SHRINK, Request::shrink); resizeTest(ResizeType.SHRINK, RequestConverters::shrink);
} }
private static void resizeTest(ResizeType resizeType, CheckedFunction<ResizeRequest, Request, IOException> function) private static void resizeTest(ResizeType resizeType, CheckedFunction<ResizeRequest, Request, IOException> function)
@ -1341,7 +1314,7 @@ public class RequestTests extends ESTestCase {
setRandomMasterTimeout(request, expectedParams); setRandomMasterTimeout(request, expectedParams);
setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); setRandomTimeout(request::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
Request expectedRequest = Request.clusterPutSettings(request); Request expectedRequest = RequestConverters.clusterPutSettings(request);
assertEquals("/_cluster/settings", expectedRequest.getEndpoint()); assertEquals("/_cluster/settings", expectedRequest.getEndpoint());
assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod()); assertEquals(HttpPut.METHOD_NAME, expectedRequest.getMethod());
assertEquals(expectedParams, expectedRequest.getParameters()); assertEquals(expectedParams, expectedRequest.getParameters());
@ -1374,7 +1347,7 @@ public class RequestTests extends ESTestCase {
} }
setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams); setRandomWaitForActiveShards(rolloverRequest.getCreateIndexRequest()::waitForActiveShards, expectedParams);
Request request = Request.rollover(rolloverRequest); Request request = RequestConverters.rollover(rolloverRequest);
if (rolloverRequest.getNewIndexName() == null) { if (rolloverRequest.getNewIndexName() == null) {
assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint()); assertEquals("/" + rolloverRequest.getAlias() + "/_rollover", request.getEndpoint());
} else { } else {
@ -1399,7 +1372,7 @@ public class RequestTests extends ESTestCase {
} }
} }
Request request = Request.indexPutSettings(updateSettingsRequest); Request request = RequestConverters.indexPutSettings(updateSettingsRequest);
StringJoiner endpoint = new StringJoiner("/", "/", ""); StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) { if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices)); endpoint.add(String.join(",", indices));
@ -1417,143 +1390,115 @@ public class RequestTests extends ESTestCase {
assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity)));
} }
public void testParams() {
final int nbParams = randomIntBetween(0, 10);
Request.Params params = Request.Params.builder();
Map<String, String> expectedParams = new HashMap<>();
for (int i = 0; i < nbParams; i++) {
String paramName = "p_" + i;
String paramValue = randomAlphaOfLength(5);
params.putParam(paramName, paramValue);
expectedParams.put(paramName, paramValue);
}
Map<String, String> requestParams = params.getParams();
assertEquals(nbParams, requestParams.size());
assertEquals(expectedParams, requestParams);
}
public void testParamsNoDuplicates() {
Request.Params params = Request.Params.builder();
params.putParam("test", "1");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> params.putParam("test", "2"));
assertEquals("Request parameter [test] is already registered", e.getMessage());
Map<String, String> requestParams = params.getParams();
assertEquals(1L, requestParams.size());
assertEquals("1", requestParams.values().iterator().next());
}
public void testEndpointBuilder() { public void testEndpointBuilder() {
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder(); EndpointBuilder endpointBuilder = new EndpointBuilder();
assertEquals("/", endpointBuilder.build()); assertEquals("/", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart(Strings.EMPTY_ARRAY);
assertEquals("/", endpointBuilder.build()); assertEquals("/", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart(""); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("");
assertEquals("/", endpointBuilder.build()); assertEquals("/", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b");
assertEquals("/a/b", endpointBuilder.build()); assertEquals("/a/b", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPart("b") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b")
.addPathPartAsIs("_create"); .addPathPartAsIs("_create");
assertEquals("/a/b/_create", endpointBuilder.build()); assertEquals("/a/b/_create", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a", "b", "c") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c")
.addPathPartAsIs("_create"); .addPathPartAsIs("_create");
assertEquals("/a/b/c/_create", endpointBuilder.build()); assertEquals("/a/b/c/_create", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("a").addPathPartAsIs("_create"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPartAsIs("_create");
assertEquals("/a/_create", endpointBuilder.build()); assertEquals("/a/_create", endpointBuilder.build());
} }
} }
public void testEndpointBuilderEncodeParts() { public void testEndpointBuilderEncodeParts() {
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("-#index1,index#2", "type", "id"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("-#index1,index#2", "type", "id");
assertEquals("/-%23index1,index%232/type/id", endpointBuilder.build()); assertEquals("/-%23index1,index%232/type/id", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type#2", "id"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type#2", "id");
assertEquals("/index/type%232/id", endpointBuilder.build()); assertEquals("/index/type%232/id", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this/is/the/id"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this/is/the/id");
assertEquals("/index/type/this%2Fis%2Fthe%2Fid", endpointBuilder.build()); assertEquals("/index/type/this%2Fis%2Fthe%2Fid", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "this|is|the|id"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "this|is|the|id");
assertEquals("/index/type/this%7Cis%7Cthe%7Cid", endpointBuilder.build()); assertEquals("/index/type/this%7Cis%7Cthe%7Cid", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("index", "type", "id#1"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("index", "type", "id#1");
assertEquals("/index/type/id%231", endpointBuilder.build()); assertEquals("/index/type/id%231", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("<logstash-{now/M}>", "_search"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("<logstash-{now/M}>", "_search");
assertEquals("/%3Clogstash-%7Bnow%2FM%7D%3E/_search", endpointBuilder.build()); assertEquals("/%3Clogstash-%7Bnow%2FM%7D%3E/_search", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("中文"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("中文");
assertEquals("/中文", endpointBuilder.build()); assertEquals("/中文", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo bar"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo bar");
assertEquals("/foo%20bar", endpointBuilder.build()); assertEquals("/foo%20bar", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar");
assertEquals("/foo+bar", endpointBuilder.build()); assertEquals("/foo+bar", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo+bar"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo+bar");
assertEquals("/foo+bar", endpointBuilder.build()); assertEquals("/foo+bar", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo/bar"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo/bar");
assertEquals("/foo%2Fbar", endpointBuilder.build()); assertEquals("/foo%2Fbar", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("foo^bar"); EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("foo^bar");
assertEquals("/foo%5Ebar", endpointBuilder.build()); assertEquals("/foo%5Ebar", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder().addPathPart("cluster1:index1,index2") EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2")
.addPathPartAsIs("_search"); .addPathPartAsIs("_search");
assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build());
} }
{ {
Request.EndpointBuilder endpointBuilder = new Request.EndpointBuilder() EndpointBuilder endpointBuilder = new EndpointBuilder()
.addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); .addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear");
assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); assertEquals("/index1,index2/cache/clear", endpointBuilder.build());
} }
} }
public void testEndpoint() { public void testEndpoint() {
assertEquals("/index/type/id", Request.endpoint("index", "type", "id")); assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id"));
assertEquals("/index/type/id/_endpoint", Request.endpoint("index", "type", "id", "_endpoint")); assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint"));
assertEquals("/index1,index2", Request.endpoint(new String[]{"index1", "index2"})); assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"}));
assertEquals("/index1,index2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, "_endpoint")); assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint"));
assertEquals("/index1,index2/type1,type2/_endpoint", Request.endpoint(new String[]{"index1", "index2"}, assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"},
new String[]{"type1", "type2"}, "_endpoint")); new String[]{"type1", "type2"}, "_endpoint"));
assertEquals("/index1,index2/_endpoint/suffix1,suffix2", Request.endpoint(new String[]{"index1", "index2"}, assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"},
"_endpoint", new String[]{"suffix1", "suffix2"})); "_endpoint", new String[]{"suffix1", "suffix2"}));
} }
public void testCreateContentType() { public void testCreateContentType() {
final XContentType xContentType = randomFrom(XContentType.values()); final XContentType xContentType = randomFrom(XContentType.values());
assertEquals(xContentType.mediaTypeWithoutParameters(), Request.createContentType(xContentType).getMimeType()); assertEquals(xContentType.mediaTypeWithoutParameters(), RequestConverters.createContentType(xContentType).getMimeType());
} }
public void testEnforceSameContentType() { public void testEnforceSameContentType() {

View File

@ -94,14 +94,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.client.RestClientTestUtil.randomHeaders; import static org.elasticsearch.client.RestClientTestUtil.randomHeaders;
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent; import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.instanceOf;
import static org.mockito.Matchers.anyMapOf; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Matchers.anyString;
import static org.mockito.Matchers.anyVararg;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isNotNull;
import static org.mockito.Matchers.isNull;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
@ -134,31 +127,22 @@ public class RestHighLevelClientTests extends ESTestCase {
Header[] headers = randomHeaders(random(), "Header"); Header[] headers = randomHeaders(random(), "Header");
Response response = mock(Response.class); Response response = mock(Response.class);
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK)); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.OK));
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenReturn(response);
anyObject(), anyVararg())).thenReturn(response);
assertTrue(restHighLevelClient.ping(headers)); assertTrue(restHighLevelClient.ping(headers));
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
public void testPing404NotFound() throws IOException { public void testPing404NotFound() throws IOException {
Header[] headers = randomHeaders(random(), "Header"); Header[] headers = randomHeaders(random(), "Header");
Response response = mock(Response.class); Response response = mock(Response.class);
when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND)); when(response.getStatusLine()).thenReturn(newStatusLine(RestStatus.NOT_FOUND));
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenReturn(response);
anyObject(), anyVararg())).thenReturn(response);
assertFalse(restHighLevelClient.ping(headers)); assertFalse(restHighLevelClient.ping(headers));
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
public void testPingSocketTimeout() throws IOException { public void testPingSocketTimeout() throws IOException {
Header[] headers = randomHeaders(random(), "Header"); Header[] headers = randomHeaders(random(), "Header");
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(new SocketTimeoutException());
anyObject(), anyVararg())).thenThrow(new SocketTimeoutException());
expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers)); expectThrows(SocketTimeoutException.class, () -> restHighLevelClient.ping(headers));
verify(restClient).performRequest(eq(HttpHead.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
public void testInfo() throws IOException { public void testInfo() throws IOException {
@ -168,8 +152,6 @@ public class RestHighLevelClientTests extends ESTestCase {
mockResponse(testInfo); mockResponse(testInfo);
MainResponse receivedInfo = restHighLevelClient.info(headers); MainResponse receivedInfo = restHighLevelClient.info(headers);
assertEquals(testInfo, receivedInfo); assertEquals(testInfo, receivedInfo);
verify(restClient).performRequest(eq(HttpGet.METHOD_NAME), eq("/"), eq(Collections.emptyMap()),
isNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
public void testSearchScroll() throws IOException { public void testSearchScroll() throws IOException {
@ -185,8 +167,6 @@ public class RestHighLevelClientTests extends ESTestCase {
assertEquals(5, searchResponse.getTotalShards()); assertEquals(5, searchResponse.getTotalShards());
assertEquals(5, searchResponse.getSuccessfulShards()); assertEquals(5, searchResponse.getSuccessfulShards());
assertEquals(100, searchResponse.getTook().getMillis()); assertEquals(100, searchResponse.getTook().getMillis());
verify(restClient).performRequest(eq(HttpPost.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
public void testClearScroll() throws IOException { public void testClearScroll() throws IOException {
@ -198,17 +178,14 @@ public class RestHighLevelClientTests extends ESTestCase {
ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers); ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, headers);
assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded()); assertEquals(mockClearScrollResponse.isSucceeded(), clearScrollResponse.isSucceeded());
assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed()); assertEquals(mockClearScrollResponse.getNumFreed(), clearScrollResponse.getNumFreed());
verify(restClient).performRequest(eq(HttpDelete.METHOD_NAME), eq("/_search/scroll"), eq(Collections.emptyMap()),
isNotNull(HttpEntity.class), argThat(new HeadersVarargMatcher(headers)));
} }
private void mockResponse(ToXContent toXContent) throws IOException { private void mockResponse(ToXContent toXContent) throws IOException {
Response response = mock(Response.class); Response response = mock(Response.class);
ContentType contentType = ContentType.parse(Request.REQUEST_BODY_CONTENT_TYPE.mediaType()); ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType());
String requestBody = toXContent(toXContent, Request.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString();
when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType));
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenReturn(response);
anyObject(), anyVararg())).thenReturn(response);
} }
public void testRequestValidation() { public void testRequestValidation() {
@ -336,13 +313,11 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnSuccess() throws IOException { public void testPerformRequestOnSuccess() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values()); RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse);
anyObject(), anyVararg())).thenReturn(mockResponse);
{ {
Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter, Integer result = restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.emptySet()); response -> response.getStatusLine().getStatusCode(), Collections.emptySet());
@ -358,14 +333,12 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException { public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values()); RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
@ -376,16 +349,14 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithEntity() throws IOException { public void testPerformRequestOnResponseExceptionWithEntity() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values()); RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}",
ContentType.APPLICATION_JSON)); ContentType.APPLICATION_JSON));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
@ -396,15 +367,13 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException { public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values()); RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
@ -416,15 +385,13 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException { public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
RestStatus restStatus = randomFrom(RestStatus.values()); RestStatus restStatus = randomFrom(RestStatus.values());
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus));
httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.emptySet())); response -> response.getStatusLine().getStatusCode(), Collections.emptySet()));
@ -436,13 +403,11 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
//although we got an exception, we turn it into a successful response because the status code was provided among ignores //although we got an exception, we turn it into a successful response because the status code was provided among ignores
assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter, assertEquals(Integer.valueOf(404), restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> response.getStatusLine().getStatusCode(), Collections.singleton(404))); response -> response.getStatusLine().getStatusCode(), Collections.singleton(404)));
@ -450,13 +415,11 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> {throw new IllegalStateException();}, Collections.singleton(404))); response -> {throw new IllegalStateException();}, Collections.singleton(404)));
@ -467,15 +430,13 @@ public class RestHighLevelClientTests extends ESTestCase {
public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException {
MainRequest mainRequest = new MainRequest(); MainRequest mainRequest = new MainRequest();
CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> CheckedFunction<MainRequest, Request, IOException> requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/");
new Request(HttpGet.METHOD_NAME, "/", Collections.emptyMap(), null);
HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND));
httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}",
ContentType.APPLICATION_JSON)); ContentType.APPLICATION_JSON));
Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse);
ResponseException responseException = new ResponseException(mockResponse); ResponseException responseException = new ResponseException(mockResponse);
when(restClient.performRequest(anyString(), anyString(), anyMapOf(String.class, String.class), when(restClient.performRequest(any(Request.class))).thenThrow(responseException);
anyObject(), anyVararg())).thenThrow(responseException);
ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class, ElasticsearchException elasticsearchException = expectThrows(ElasticsearchException.class,
() -> restHighLevelClient.performRequest(mainRequest, requestConverter, () -> restHighLevelClient.performRequest(mainRequest, requestConverter,
response -> {throw new IllegalStateException();}, Collections.singleton(404))); response -> {throw new IllegalStateException();}, Collections.singleton(404)));
@ -696,23 +657,6 @@ public class RestHighLevelClientTests extends ESTestCase {
} }
} }
private static class HeadersVarargMatcher extends ArgumentMatcher<Header[]> implements VarargMatcher {
private Header[] expectedHeaders;
HeadersVarargMatcher(Header... expectedHeaders) {
this.expectedHeaders = expectedHeaders;
}
@Override
public boolean matches(Object varargArgument) {
if (varargArgument instanceof Header[]) {
Header[] actualHeaders = (Header[]) varargArgument;
return new ArrayEquals(expectedHeaders).matches(actualHeaders);
}
return false;
}
}
private static StatusLine newStatusLine(RestStatus restStatus) { private static StatusLine newStatusLine(RestStatus restStatus) {
return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name());
} }

View File

@ -0,0 +1,202 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import static java.util.Collections.unmodifiableMap;
/**
* HTTP Request to Elasticsearch.
*/
public final class Request {
private static final Header[] NO_HEADERS = new Header[0];
private final String method;
private final String endpoint;
private final Map<String, String> parameters = new HashMap<>();
private HttpEntity entity;
private Header[] headers = NO_HEADERS;
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory =
HttpAsyncResponseConsumerFactory.DEFAULT;
/**
* Create the {@linkplain Request}.
* @param method the HTTP method
* @param endpoint the path of the request (without scheme, host, port, or prefix)
*/
public Request(String method, String endpoint) {
this.method = Objects.requireNonNull(method, "method cannot be null");
this.endpoint = Objects.requireNonNull(endpoint, "endpoint cannot be null");
}
/**
* The HTTP method.
*/
public String getMethod() {
return method;
}
/**
* The path of the request (without scheme, host, port, or prefix).
*/
public String getEndpoint() {
return endpoint;
}
/**
* Add a query string parameter.
* @param name the name of the url parameter. Must not be null.
* @param value the value of the url url parameter. If {@code null} then
* the parameter is sent as {@code name} rather than {@code name=value}
* @throws IllegalArgumentException if a parameter with that name has
* already been set
*/
public void addParameter(String name, String value) {
Objects.requireNonNull(name, "url parameter name cannot be null");
// .putIfAbsent(name, value) except we are in Java 7 which doesn't have that.
if (parameters.containsKey(name)) {
throw new IllegalArgumentException("url parameter [" + name + "] has already been set to [" + parameters.get(name) + "]");
} else {
parameters.put(name, value);
}
}
/**
* Query string parameters. The returned map is an unmodifiable view of the
* map in the request so calls to {@link #addParameter(String, String)}
* will change it.
*/
public Map<String, String> getParameters() {
return unmodifiableMap(parameters);
}
/**
* Set the body of the request. If not set or set to {@code null} then no
* body is sent with the request.
*/
public void setEntity(HttpEntity entity) {
this.entity = entity;
}
/**
* The body of the request. If {@code null} then no body
* is sent with the request.
*/
public HttpEntity getEntity() {
return entity;
}
/**
* Set the headers to attach to the request.
*/
public void setHeaders(Header... headers) {
Objects.requireNonNull(headers, "headers cannot be null");
for (Header header : headers) {
Objects.requireNonNull(header, "header cannot be null");
}
this.headers = headers;
}
/**
* Headers to attach to the request.
*/
public Header[] getHeaders() {
return headers;
}
/**
* set the {@link HttpAsyncResponseConsumerFactory} used to create one
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
* response body gets streamed from a non-blocking HTTP connection on the
* client side.
*/
public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
this.httpAsyncResponseConsumerFactory =
Objects.requireNonNull(httpAsyncResponseConsumerFactory, "httpAsyncResponseConsumerFactory cannot be null");
}
/**
* The {@link HttpAsyncResponseConsumerFactory} used to create one
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
* response body gets streamed from a non-blocking HTTP connection on the
* client side.
*/
public HttpAsyncResponseConsumerFactory getHttpAsyncResponseConsumerFactory() {
return httpAsyncResponseConsumerFactory;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("Request{");
b.append("method='").append(method).append('\'');
b.append(", endpoint='").append(endpoint).append('\'');
if (false == parameters.isEmpty()) {
b.append(", params=").append(parameters);
}
if (entity != null) {
b.append(", entity=").append(entity);
}
if (headers.length > 0) {
b.append(", headers=");
for (int h = 0; h < headers.length; h++) {
if (h != 0) {
b.append(',');
}
b.append(headers[h].toString());
}
}
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
}
return b.append('}').toString();
}
@Override
public boolean equals(Object obj) {
if (obj == null || (obj.getClass() != getClass())) {
return false;
}
if (obj == this) {
return true;
}
Request other = (Request) obj;
return method.equals(other.method)
&& endpoint.equals(other.endpoint)
&& parameters.equals(other.parameters)
&& Objects.equals(entity, other.entity)
&& Arrays.equals(headers, other.headers)
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
}
@Override
public int hashCode() {
return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory);
}
}

View File

@ -143,6 +143,61 @@ public class RestClient implements Closeable {
this.blacklist.clear(); this.blacklist.clear();
} }
/**
* Sends a request to the Elasticsearch cluster that the client points to.
* Blocks until the request is completed and returns its response or fails
* by throwing an exception. Selects a host out of the provided ones in a
* round-robin fashion. Failing hosts are marked dead and retried after a
* certain amount of time (minimum 1 minute, maximum 30 minutes), depending
* on how many times they previously failed (the more failures, the later
* they will be retried). In case of failures all of the alive nodes (or
* dead nodes that deserve a retry) are retried until one responds or none
* of them does, in which case an {@link IOException} will be thrown.
*
* This method works by performing an asynchronous call and waiting
* for the result. If the asynchronous call throws an exception we wrap
* it and rethrow it so that the stack trace attached to the exception
* contains the call site. While we attempt to preserve the original
* exception this isn't always possible and likely haven't covered all of
* the cases. You can get the original exception from
* {@link Exception#getCause()}.
*
* @param request the request to perform
* @return the response returned by Elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(Request request) throws IOException {
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
performRequestAsyncNoCatch(request, listener);
return listener.get();
}
/**
* Sends a request to the Elasticsearch cluster that the client points to.
* The request is executed asynchronously and the provided
* {@link ResponseListener} gets notified upon request completion or
* failure. Selects a host out of the provided ones in a round-robin
* fashion. Failing hosts are marked dead and retried after a certain
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how
* many times they previously failed (the more failures, the later they
* will be retried). In case of failures all of the alive nodes (or dead
* nodes that deserve a retry) are retried until one responds or none of
* them does, in which case an {@link IOException} will be thrown.
*
* @param request the request to perform
* @param responseListener the {@link ResponseListener} to notify when the
* request is completed or fails
*/
public void performRequestAsync(Request request, ResponseListener responseListener) {
try {
performRequestAsyncNoCatch(request, responseListener);
} catch (Exception e) {
responseListener.onFailure(e);
}
}
/** /**
* Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response * Sends a request to the Elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
@ -157,7 +212,9 @@ public class RestClient implements Closeable {
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/ */
public Response performRequest(String method, String endpoint, Header... headers) throws IOException { public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers); Request request = new Request(method, endpoint);
request.setHeaders(headers);
return performRequest(request);
} }
/** /**
@ -174,7 +231,10 @@ public class RestClient implements Closeable {
* @throws ResponseException in case Elasticsearch responded with a status code that indicated an error * @throws ResponseException in case Elasticsearch responded with a status code that indicated an error
*/ */
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException { public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
return performRequest(method, endpoint, params, (HttpEntity)null, headers); Request request = new Request(method, endpoint);
addParameters(request, params);
request.setHeaders(headers);
return performRequest(request);
} }
/** /**
@ -195,7 +255,11 @@ public class RestClient implements Closeable {
*/ */
public Response performRequest(String method, String endpoint, Map<String, String> params, public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, Header... headers) throws IOException { HttpEntity entity, Header... headers) throws IOException {
return performRequest(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, headers); Request request = new Request(method, endpoint);
addParameters(request, params);
request.setEntity(entity);
request.setHeaders(headers);
return performRequest(request);
} }
/** /**
@ -229,10 +293,12 @@ public class RestClient implements Closeable {
public Response performRequest(String method, String endpoint, Map<String, String> params, public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
Header... headers) throws IOException { Header... headers) throws IOException {
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis); Request request = new Request(method, endpoint);
performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory, addParameters(request, params);
listener, headers); request.setEntity(entity);
return listener.get(); request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
request.setHeaders(headers);
return performRequest(request);
} }
/** /**
@ -246,7 +312,15 @@ public class RestClient implements Closeable {
* @param headers the optional request headers * @param headers the optional request headers
*/ */
public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) { public void performRequestAsync(String method, String endpoint, ResponseListener responseListener, Header... headers) {
performRequestAsync(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers); Request request;
try {
request = new Request(method, endpoint);
request.setHeaders(headers);
} catch (Exception e) {
responseListener.onFailure(e);
return;
}
performRequestAsync(request, responseListener);
} }
/** /**
@ -262,7 +336,16 @@ public class RestClient implements Closeable {
*/ */
public void performRequestAsync(String method, String endpoint, Map<String, String> params, public void performRequestAsync(String method, String endpoint, Map<String, String> params,
ResponseListener responseListener, Header... headers) { ResponseListener responseListener, Header... headers) {
performRequestAsync(method, endpoint, params, null, responseListener, headers); Request request;
try {
request = new Request(method, endpoint);
addParameters(request, params);
request.setHeaders(headers);
} catch (Exception e) {
responseListener.onFailure(e);
return;
}
performRequestAsync(request, responseListener);
} }
/** /**
@ -281,7 +364,17 @@ public class RestClient implements Closeable {
*/ */
public void performRequestAsync(String method, String endpoint, Map<String, String> params, public void performRequestAsync(String method, String endpoint, Map<String, String> params,
HttpEntity entity, ResponseListener responseListener, Header... headers) { HttpEntity entity, ResponseListener responseListener, Header... headers) {
performRequestAsync(method, endpoint, params, entity, HttpAsyncResponseConsumerFactory.DEFAULT, responseListener, headers); Request request;
try {
request = new Request(method, endpoint);
addParameters(request, params);
request.setEntity(entity);
request.setHeaders(headers);
} catch (Exception e) {
responseListener.onFailure(e);
return;
}
performRequestAsync(request, responseListener);
} }
/** /**
@ -305,24 +398,27 @@ public class RestClient implements Closeable {
public void performRequestAsync(String method, String endpoint, Map<String, String> params, public void performRequestAsync(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
ResponseListener responseListener, Header... headers) { ResponseListener responseListener, Header... headers) {
Request request;
try { try {
performRequestAsyncNoCatch(method, endpoint, params, entity, httpAsyncResponseConsumerFactory, request = new Request(method, endpoint);
responseListener, headers); addParameters(request, params);
request.setEntity(entity);
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
request.setHeaders(headers);
} catch (Exception e) { } catch (Exception e) {
responseListener.onFailure(e); responseListener.onFailure(e);
return;
} }
performRequestAsync(request, responseListener);
} }
void performRequestAsyncNoCatch(String method, String endpoint, Map<String, String> params, void performRequestAsyncNoCatch(Request request, ResponseListener listener) {
HttpEntity entity, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, Map<String, String> requestParams = new HashMap<>(request.getParameters());
ResponseListener responseListener, Header... headers) {
Objects.requireNonNull(params, "params must not be null");
Map<String, String> requestParams = new HashMap<>(params);
//ignore is a special parameter supported by the clients, shouldn't be sent to es //ignore is a special parameter supported by the clients, shouldn't be sent to es
String ignoreString = requestParams.remove("ignore"); String ignoreString = requestParams.remove("ignore");
Set<Integer> ignoreErrorCodes; Set<Integer> ignoreErrorCodes;
if (ignoreString == null) { if (ignoreString == null) {
if (HttpHead.METHOD_NAME.equals(method)) { if (HttpHead.METHOD_NAME.equals(request.getMethod())) {
//404 never causes error if returned for a HEAD request //404 never causes error if returned for a HEAD request
ignoreErrorCodes = Collections.singleton(404); ignoreErrorCodes = Collections.singleton(404);
} else { } else {
@ -331,7 +427,7 @@ public class RestClient implements Closeable {
} else { } else {
String[] ignoresArray = ignoreString.split(","); String[] ignoresArray = ignoreString.split(",");
ignoreErrorCodes = new HashSet<>(); ignoreErrorCodes = new HashSet<>();
if (HttpHead.METHOD_NAME.equals(method)) { if (HttpHead.METHOD_NAME.equals(request.getMethod())) {
//404 never causes error if returned for a HEAD request //404 never causes error if returned for a HEAD request
ignoreErrorCodes.add(404); ignoreErrorCodes.add(404);
} }
@ -343,13 +439,13 @@ public class RestClient implements Closeable {
} }
} }
} }
URI uri = buildUri(pathPrefix, endpoint, requestParams); URI uri = buildUri(pathPrefix, request.getEndpoint(), requestParams);
HttpRequestBase request = createHttpRequest(method, uri, entity); HttpRequestBase httpRequest = createHttpRequest(request.getMethod(), uri, request.getEntity());
setHeaders(request, headers); setHeaders(httpRequest, request.getHeaders());
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener);
long startTime = System.nanoTime(); long startTime = System.nanoTime();
performRequestAsync(startTime, nextHost(), request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes,
failureTrackingResponseListener); request.getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
} }
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request, private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request,
@ -428,11 +524,9 @@ public class RestClient implements Closeable {
} }
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
Objects.requireNonNull(requestHeaders, "request headers must not be null");
// request headers override default headers, so we don't add default headers if they exist as request headers // request headers override default headers, so we don't add default headers if they exist as request headers
final Set<String> requestNames = new HashSet<>(requestHeaders.length); final Set<String> requestNames = new HashSet<>(requestHeaders.length);
for (Header requestHeader : requestHeaders) { for (Header requestHeader : requestHeaders) {
Objects.requireNonNull(requestHeader, "request header must not be null");
httpRequest.addHeader(requestHeader); httpRequest.addHeader(requestHeader);
requestNames.add(requestHeader.getName()); requestNames.add(requestHeader.getName());
} }
@ -766,4 +860,15 @@ public class RestClient implements Closeable {
this.authCache = authCache; this.authCache = authCache;
} }
} }
/**
* Add all parameters from a map to a {@link Request}. This only exists
* to support methods that exist for backwards compatibility.
*/
private static void addParameters(Request request, Map<String, String> parameters) {
Objects.requireNonNull(parameters, "parameters cannot be null");
for (Map.Entry<String, String> entry : parameters.entrySet()) {
request.addParameter(entry.getKey(), entry.getValue());
}
}
} }

View File

@ -0,0 +1,137 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.util.HashMap;
import java.util.Map;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class RequestTests extends RestClientTestCase {
public void testConstructor() {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
try {
new Request(null, endpoint);
fail("expected failure");
} catch (NullPointerException e) {
assertEquals("method cannot be null", e.getMessage());
}
try {
new Request(method, null);
fail("expected failure");
} catch (NullPointerException e) {
assertEquals("endpoint cannot be null", e.getMessage());
}
final Request request = new Request(method, endpoint);
assertEquals(method, request.getMethod());
assertEquals(endpoint, request.getEndpoint());
}
public void testAddParameters() {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
int parametersCount = between(1, 3);
final Map<String, String> parameters = new HashMap<>(parametersCount);
while (parameters.size() < parametersCount) {
parameters.put(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5));
}
Request request = new Request(method, endpoint);
try {
request.addParameter(null, "value");
fail("expected failure");
} catch (NullPointerException e) {
assertEquals("url parameter name cannot be null", e.getMessage());
}
for (Map.Entry<String, String> entry : parameters.entrySet()) {
request.addParameter(entry.getKey(), entry.getValue());
}
assertEquals(parameters, request.getParameters());
// Test that adding parameters with a null value is ok.
request.addParameter("is_null", null);
parameters.put("is_null", null);
assertEquals(parameters, request.getParameters());
// Test that adding a duplicate parameter fails
String firstValue = randomBoolean() ? null : "value";
request.addParameter("name", firstValue);
try {
request.addParameter("name", randomBoolean() ? firstValue : "second_value");
fail("expected failure");
} catch (IllegalArgumentException e) {
assertEquals("url parameter [name] has already been set to [" + firstValue + "]", e.getMessage());
}
}
public void testSetEntity() {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
final HttpEntity entity =
randomBoolean() ? new StringEntity(randomAsciiLettersOfLengthBetween(1, 100), ContentType.TEXT_PLAIN) : null;
Request request = new Request(method, endpoint);
request.setEntity(entity);
assertEquals(entity, request.getEntity());
}
public void testSetHeaders() {
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
Request request = new Request(method, endpoint);
try {
request.setHeaders((Header[]) null);
fail("expected failure");
} catch (NullPointerException e) {
assertEquals("headers cannot be null", e.getMessage());
}
try {
request.setHeaders(new Header [] {null});
fail("expected failure");
} catch (NullPointerException e) {
assertEquals("header cannot be null", e.getMessage());
}
Header[] headers = new Header[between(0, 5)];
for (int i = 0; i < headers.length; i++) {
headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
}
request.setHeaders(headers);
assertArrayEquals(headers, request.getHeaders());
}
// TODO equals and hashcode
}

View File

@ -138,7 +138,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom());
Response response; Response response;
try { try {
response = restClient.performRequest(method, "/" + statusCode); response = restClient.performRequest(new Request(method, "/" + statusCode));
} catch(ResponseException responseException) { } catch(ResponseException responseException) {
response = responseException.getResponse(); response = responseException.getResponse();
} }
@ -156,7 +156,7 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
final String method = RestClientTestUtil.randomHttpMethod(getRandom()); final String method = RestClientTestUtil.randomHttpMethod(getRandom());
//we don't test status codes that are subject to retries as they interfere with hosts being stopped //we don't test status codes that are subject to retries as they interfere with hosts being stopped
final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom()); final int statusCode = randomBoolean() ? randomOkStatusCode(getRandom()) : randomErrorNoRetryStatusCode(getRandom());
restClient.performRequestAsync(method, "/" + statusCode, new ResponseListener() { restClient.performRequestAsync(new Request(method, "/" + statusCode), new ResponseListener() {
@Override @Override
public void onSuccess(Response response) { public void onSuccess(Response response) {
responses.add(new TestResponse(method, statusCode, response)); responses.add(new TestResponse(method, statusCode, response));

View File

@ -62,6 +62,7 @@ import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
@ -280,13 +281,17 @@ public class RestClientSingleHostTests extends RestClientTestCase {
StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON); StringEntity entity = new StringEntity(body, ContentType.APPLICATION_JSON);
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
for (int okStatusCode : getOkStatusCodes()) { for (int okStatusCode : getOkStatusCodes()) {
Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity); Request request = new Request(method, "/" + okStatusCode);
request.setEntity(entity);
Response response = restClient.performRequest(request);
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
} }
for (int errorStatusCode : getAllErrorStatusCodes()) { for (int errorStatusCode : getAllErrorStatusCodes()) {
Request request = new Request(method, "/" + errorStatusCode);
request.setEntity(entity);
try { try {
restClient.performRequest(method, "/" + errorStatusCode, Collections.<String, String>emptyMap(), entity); restClient.performRequest(request);
fail("request should have failed"); fail("request should have failed");
} catch(ResponseException e) { } catch(ResponseException e) {
Response response = e.getResponse(); Response response = e.getResponse();
@ -297,8 +302,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
Request request = new Request(method, "/" + randomStatusCode(getRandom()));
request.setEntity(entity);
try { try {
restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.<String, String>emptyMap(), entity); restClient.performRequest(request);
fail("request should have failed"); fail("request should have failed");
} catch(UnsupportedOperationException e) { } catch(UnsupportedOperationException e) {
assertThat(e.getMessage(), equalTo(method + " with body is not supported")); assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
@ -306,7 +313,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
public void testNullHeaders() throws IOException { /**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
*/
@Deprecated
public void tesPerformRequestOldStyleNullHeaders() throws IOException {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
try { try {
@ -323,20 +334,24 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
public void testNullParams() throws IOException { /**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}.
*/
@Deprecated
public void testPerformRequestOldStyleWithNullParams() throws IOException {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
try { try {
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null); restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
fail("request should have failed"); fail("request should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("params must not be null", e.getMessage()); assertEquals("parameters cannot be null", e.getMessage());
} }
try { try {
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null); restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
fail("request should have failed"); fail("request should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("params must not be null", e.getMessage()); assertEquals("parameters cannot be null", e.getMessage());
} }
} }
@ -348,9 +363,11 @@ public class RestClientSingleHostTests extends RestClientTestCase {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
final int statusCode = randomStatusCode(getRandom()); final int statusCode = randomStatusCode(getRandom());
Request request = new Request(method, "/" + statusCode);
request.setHeaders(requestHeaders);
Response esResponse; Response esResponse;
try { try {
esResponse = restClient.performRequest(method, "/" + statusCode, requestHeaders); esResponse = restClient.performRequest(request);
} catch(ResponseException e) { } catch(ResponseException e) {
esResponse = e.getResponse(); esResponse = e.getResponse();
} }
@ -361,16 +378,15 @@ public class RestClientSingleHostTests extends RestClientTestCase {
private HttpUriRequest performRandomRequest(String method) throws Exception { private HttpUriRequest performRandomRequest(String method) throws Exception {
String uriAsString = "/" + randomStatusCode(getRandom()); String uriAsString = "/" + randomStatusCode(getRandom());
Request request = new Request(method, uriAsString);
URIBuilder uriBuilder = new URIBuilder(uriAsString); URIBuilder uriBuilder = new URIBuilder(uriAsString);
final Map<String, String> params = new HashMap<>(); if (randomBoolean()) {
boolean hasParams = randomBoolean();
if (hasParams) {
int numParams = randomIntBetween(1, 3); int numParams = randomIntBetween(1, 3);
for (int i = 0; i < numParams; i++) { for (int i = 0; i < numParams; i++) {
String paramKey = "param-" + i; String name = "param-" + i;
String paramValue = randomAsciiOfLengthBetween(3, 10); String value = randomAsciiAlphanumOfLengthBetween(3, 10);
params.put(paramKey, paramValue); request.addParameter(name, value);
uriBuilder.addParameter(paramKey, paramValue); uriBuilder.addParameter(name, value);
} }
} }
if (randomBoolean()) { if (randomBoolean()) {
@ -379,81 +395,82 @@ public class RestClientSingleHostTests extends RestClientTestCase {
if (randomBoolean()) { if (randomBoolean()) {
ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes())); ignore += "," + Integer.toString(randomFrom(RestClientTestUtil.getAllErrorStatusCodes()));
} }
params.put("ignore", ignore); request.addParameter("ignore", ignore);
} }
URI uri = uriBuilder.build(); URI uri = uriBuilder.build();
HttpUriRequest request; HttpUriRequest expectedRequest;
switch(method) { switch(method) {
case "DELETE": case "DELETE":
request = new HttpDeleteWithEntity(uri); expectedRequest = new HttpDeleteWithEntity(uri);
break; break;
case "GET": case "GET":
request = new HttpGetWithEntity(uri); expectedRequest = new HttpGetWithEntity(uri);
break; break;
case "HEAD": case "HEAD":
request = new HttpHead(uri); expectedRequest = new HttpHead(uri);
break; break;
case "OPTIONS": case "OPTIONS":
request = new HttpOptions(uri); expectedRequest = new HttpOptions(uri);
break; break;
case "PATCH": case "PATCH":
request = new HttpPatch(uri); expectedRequest = new HttpPatch(uri);
break; break;
case "POST": case "POST":
request = new HttpPost(uri); expectedRequest = new HttpPost(uri);
break; break;
case "PUT": case "PUT":
request = new HttpPut(uri); expectedRequest = new HttpPut(uri);
break; break;
case "TRACE": case "TRACE":
request = new HttpTrace(uri); expectedRequest = new HttpTrace(uri);
break; break;
default: default:
throw new UnsupportedOperationException("method not supported: " + method); throw new UnsupportedOperationException("method not supported: " + method);
} }
HttpEntity entity = null; if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) {
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean(); HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON);
if (hasBody) { ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity);
entity = new StringEntity(randomAsciiOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); request.setEntity(entity);
((HttpEntityEnclosingRequest) request).setEntity(entity);
} }
Header[] headers = new Header[0];
final Set<String> uniqueNames = new HashSet<>(); final Set<String> uniqueNames = new HashSet<>();
if (randomBoolean()) { if (randomBoolean()) {
headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
request.setHeaders(headers);
for (Header header : headers) { for (Header header : headers) {
request.addHeader(header); expectedRequest.addHeader(header);
uniqueNames.add(header.getName()); uniqueNames.add(header.getName());
} }
} }
for (Header defaultHeader : defaultHeaders) { for (Header defaultHeader : defaultHeaders) {
// request level headers override default headers // request level headers override default headers
if (uniqueNames.contains(defaultHeader.getName()) == false) { if (uniqueNames.contains(defaultHeader.getName()) == false) {
request.addHeader(defaultHeader); expectedRequest.addHeader(defaultHeader);
} }
} }
try { try {
if (hasParams == false && hasBody == false && randomBoolean()) { restClient.performRequest(request);
restClient.performRequest(method, uriAsString, headers);
} else if (hasBody == false && randomBoolean()) {
restClient.performRequest(method, uriAsString, params, headers);
} else {
restClient.performRequest(method, uriAsString, params, entity, headers);
}
} catch(ResponseException e) { } catch(ResponseException e) {
//all good //all good
} }
return request; return expectedRequest;
} }
/**
* @deprecated prefer {@link RestClient#performRequest(Request)}.
*/
@Deprecated
private Response performRequest(String method, String endpoint, Header... headers) throws IOException { private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers); return performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
} }
/**
* @deprecated prefer {@link RestClient#performRequest(Request)}.
*/
@Deprecated
private Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException { private Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
int methodSelector; int methodSelector;
if (params.isEmpty()) { if (params.isEmpty()) {

View File

@ -52,6 +52,30 @@ public class RestClientTests extends RestClientTestCase {
} }
public void testPerformAsyncWithUnsupportedMethod() throws Exception { public void testPerformAsyncWithUnsupportedMethod() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() {
@Override
public void onSuccess(Response response) {
fail("should have failed because of unsupported method");
}
@Override
public void onFailure(Exception exception) {
assertThat(exception, instanceOf(UnsupportedOperationException.class));
assertEquals("http method not supported: unsupported", exception.getMessage());
latch.countDown();
}
});
latch.await();
}
}
/**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithUnsupportedMethod()}.
*/
@Deprecated
public void testPerformAsyncOldStyleWithUnsupportedMethod() throws Exception {
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() { restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() {
@ -71,7 +95,11 @@ public class RestClientTests extends RestClientTestCase {
} }
} }
public void testPerformAsyncWithNullParams() throws Exception { /**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}.
*/
@Deprecated
public void testPerformOldStyleAsyncWithNullParams() throws Exception {
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() { restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() {
@ -83,7 +111,7 @@ public class RestClientTests extends RestClientTestCase {
@Override @Override
public void onFailure(Exception exception) { public void onFailure(Exception exception) {
assertThat(exception, instanceOf(NullPointerException.class)); assertThat(exception, instanceOf(NullPointerException.class));
assertEquals("params must not be null", exception.getMessage()); assertEquals("parameters cannot be null", exception.getMessage());
latch.countDown(); latch.countDown();
} }
}); });
@ -91,7 +119,11 @@ public class RestClientTests extends RestClientTestCase {
} }
} }
public void testPerformAsyncWithNullHeaders() throws Exception { /**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
*/
@Deprecated
public void testPerformOldStyleAsyncWithNullHeaders() throws Exception {
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
ResponseListener listener = new ResponseListener() { ResponseListener listener = new ResponseListener() {
@ -103,7 +135,7 @@ public class RestClientTests extends RestClientTestCase {
@Override @Override
public void onFailure(Exception exception) { public void onFailure(Exception exception) {
assertThat(exception, instanceOf(NullPointerException.class)); assertThat(exception, instanceOf(NullPointerException.class));
assertEquals("request header must not be null", exception.getMessage()); assertEquals("header cannot be null", exception.getMessage());
latch.countDown(); latch.countDown();
} }
}; };
@ -113,6 +145,30 @@ public class RestClientTests extends RestClientTestCase {
} }
public void testPerformAsyncWithWrongEndpoint() throws Exception { public void testPerformAsyncWithWrongEndpoint() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() {
@Override
public void onSuccess(Response response) {
fail("should have failed because of wrong endpoint");
}
@Override
public void onFailure(Exception exception) {
assertThat(exception, instanceOf(IllegalArgumentException.class));
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
latch.countDown();
}
});
latch.await();
}
}
/**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link #testPerformAsyncWithWrongEndpoint()}.
*/
@Deprecated
public void testPerformAsyncOldStyleWithWrongEndpoint() throws Exception {
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.performRequestAsync("GET", "::http:///", new ResponseListener() { restClient.performRequestAsync("GET", "::http:///", new ResponseListener() {
@ -175,6 +231,10 @@ public class RestClientTests extends RestClientTestCase {
} }
} }
/**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testConstructor()}.
*/
@Deprecated
public void testNullPath() throws IOException { public void testNullPath() throws IOException {
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
@ -182,7 +242,7 @@ public class RestClientTests extends RestClientTestCase {
restClient.performRequest(method, null); restClient.performRequest(method, null);
fail("path set to null should fail!"); fail("path set to null should fail!");
} catch (NullPointerException e) { } catch (NullPointerException e) {
assertEquals("path must not be null", e.getMessage()); assertEquals("endpoint cannot be null", e.getMessage());
} }
} }
} }

View File

@ -27,7 +27,9 @@ import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials; import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider; import org.apache.http.client.CredentialsProvider;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.entity.BasicHttpEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.BasicCredentialsProvider; import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.impl.nio.reactor.IOReactorConfig; import org.apache.http.impl.nio.reactor.IOReactorConfig;
@ -37,6 +39,7 @@ import org.apache.http.ssl.SSLContextBuilder;
import org.apache.http.ssl.SSLContexts; import org.apache.http.ssl.SSLContexts;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseListener; import org.elasticsearch.client.ResponseListener;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
@ -134,107 +137,61 @@ public class RestClientDocumentation {
} }
{ {
//tag::rest-client-verb-endpoint //tag::rest-client-sync
Response response = restClient.performRequest("GET", "/"); // <1> Request request = new Request(
//end::rest-client-verb-endpoint "GET", // <1>
"/"); // <2>
Response response = restClient.performRequest(request);
//end::rest-client-sync
} }
{ {
//tag::rest-client-headers //tag::rest-client-async
Response response = restClient.performRequest("GET", "/", new BasicHeader("header", "value")); Request request = new Request(
//end::rest-client-headers "GET", // <1>
} "/"); // <2>
{ restClient.performRequestAsync(request, new ResponseListener() {
//tag::rest-client-verb-endpoint-params
Map<String, String> params = Collections.singletonMap("pretty", "true");
Response response = restClient.performRequest("GET", "/", params); // <1>
//end::rest-client-verb-endpoint-params
}
{
//tag::rest-client-verb-endpoint-params-body
Map<String, String> params = Collections.emptyMap();
String jsonString = "{" +
"\"user\":\"kimchy\"," +
"\"postDate\":\"2013-01-30\"," +
"\"message\":\"trying out Elasticsearch\"" +
"}";
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
Response response = restClient.performRequest("PUT", "/posts/doc/1", params, entity); // <1>
//end::rest-client-verb-endpoint-params-body
}
{
//tag::rest-client-response-consumer
Map<String, String> params = Collections.emptyMap();
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
Response response = restClient.performRequest("GET", "/posts/_search", params, null, consumerFactory); // <1>
//end::rest-client-response-consumer
}
{
//tag::rest-client-verb-endpoint-async
ResponseListener responseListener = new ResponseListener() {
@Override @Override
public void onSuccess(Response response) { public void onSuccess(Response response) {
// <1> // <3>
} }
@Override @Override
public void onFailure(Exception exception) { public void onFailure(Exception exception) {
// <2> // <4>
} }
}; });
restClient.performRequestAsync("GET", "/", responseListener); // <3> //end::rest-client-async
//end::rest-client-verb-endpoint-async
//tag::rest-client-headers-async
Header[] headers = {
new BasicHeader("header1", "value1"),
new BasicHeader("header2", "value2")
};
restClient.performRequestAsync("GET", "/", responseListener, headers);
//end::rest-client-headers-async
//tag::rest-client-verb-endpoint-params-async
Map<String, String> params = Collections.singletonMap("pretty", "true");
restClient.performRequestAsync("GET", "/", params, responseListener); // <1>
//end::rest-client-verb-endpoint-params-async
//tag::rest-client-verb-endpoint-params-body-async
String jsonString = "{" +
"\"user\":\"kimchy\"," +
"\"postDate\":\"2013-01-30\"," +
"\"message\":\"trying out Elasticsearch\"" +
"}";
HttpEntity entity = new NStringEntity(jsonString, ContentType.APPLICATION_JSON);
restClient.performRequestAsync("PUT", "/posts/doc/1", params, entity, responseListener); // <1>
//end::rest-client-verb-endpoint-params-body-async
//tag::rest-client-response-consumer-async
HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory =
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024);
restClient.performRequestAsync("GET", "/posts/_search", params, null, consumerFactory, responseListener); // <1>
//end::rest-client-response-consumer-async
} }
{ {
//tag::rest-client-response2 Request request = new Request("GET", "/");
Response response = restClient.performRequest("GET", "/"); //tag::rest-client-parameters
RequestLine requestLine = response.getRequestLine(); // <1> request.addParameter("pretty", "true");
HttpHost host = response.getHost(); // <2> //end::rest-client-parameters
int statusCode = response.getStatusLine().getStatusCode(); // <3> //tag::rest-client-body
Header[] headers = response.getHeaders(); // <4> request.setEntity(new StringEntity(
String responseBody = EntityUtils.toString(response.getEntity()); // <5> "{\"json\":\"text\"}",
//end::rest-client-response2 ContentType.APPLICATION_JSON));
//end::rest-client-body
//tag::rest-client-headers
request.setHeaders(
new BasicHeader("Accept", "text/plain"),
new BasicHeader("Cache-Control", "no-cache"));
//end::rest-client-headers
//tag::rest-client-response-consumer
request.setHttpAsyncResponseConsumerFactory(
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
//end::rest-client-response-consumer
} }
{ {
HttpEntity[] documents = new HttpEntity[10]; HttpEntity[] documents = new HttpEntity[10];
//tag::rest-client-async-example //tag::rest-client-async-example
final CountDownLatch latch = new CountDownLatch(documents.length); final CountDownLatch latch = new CountDownLatch(documents.length);
for (int i = 0; i < documents.length; i++) { for (int i = 0; i < documents.length; i++) {
Request request = new Request("PUT", "/posts/doc/" + i);
//let's assume that the documents are stored in an HttpEntity array
request.setEntity(documents[i]);
restClient.performRequestAsync( restClient.performRequestAsync(
"PUT", request,
"/posts/doc/" + i,
Collections.<String, String>emptyMap(),
//let's assume that the documents are stored in an HttpEntity array
documents[i],
new ResponseListener() { new ResponseListener() {
@Override @Override
public void onSuccess(Response response) { public void onSuccess(Response response) {
@ -253,7 +210,16 @@ public class RestClientDocumentation {
latch.await(); latch.await();
//end::rest-client-async-example //end::rest-client-async-example
} }
{
//tag::rest-client-response2
Response response = restClient.performRequest("GET", "/");
RequestLine requestLine = response.getRequestLine(); // <1>
HttpHost host = response.getHost(); // <2>
int statusCode = response.getStatusLine().getStatusCode(); // <3>
Header[] headers = response.getHeaders(); // <4>
String responseBody = EntityUtils.toString(response.getEntity()); // <5>
//end::rest-client-response2
}
} }
@SuppressWarnings("unused") @SuppressWarnings("unused")

View File

@ -1,12 +1,26 @@
[[es-release-notes]]
= {es} Release Notes
[partintro]
--
// Use these for links to issue and pulls. Note issues and pulls redirect one to // Use these for links to issue and pulls. Note issues and pulls redirect one to
// each other on Github, so don't worry too much on using the right prefix. // each other on Github, so don't worry too much on using the right prefix.
:issue: https://github.com/elastic/elasticsearch/issues/ :issue: https://github.com/elastic/elasticsearch/issues/
:pull: https://github.com/elastic/elasticsearch/pull/ :pull: https://github.com/elastic/elasticsearch/pull/
= Elasticsearch Release Notes This section summarizes the changes in each release.
== Elasticsearch 7.0.0 * <<release-notes-7.0.0>>
* <<release-notes-6.4.0>>
--
[[release-notes-7.0.0]]
== {es} 7.0.0
[float]
[[breaking-7.0.0]]
=== Breaking Changes === Breaking Changes
<<write-thread-pool-fallback, Removed `thread_pool.bulk.*` settings and <<write-thread-pool-fallback, Removed `thread_pool.bulk.*` settings and
@ -16,35 +30,67 @@
<<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185]) <<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185])
=== Breaking Java Changes Machine Learning::
* The `max_running_jobs` node property is removed in this release. Use the
`xpack.ml.max_open_jobs` setting instead. For more information, see <<ml-settings>>.
=== Deprecations Monitoring::
* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1`
to disable monitoring data collection. Use `xpack.monitoring.collection.enabled`
and set it to `false` (its default), which was added in 6.3.0.
=== New Features Security::
* The fields returned as part of the mappings section by get index, get
mappings, get field mappings, and field capabilities API are now only the
ones that the user is authorized to access in case field level security is enabled.
=== Enhancements //[float]
//=== Breaking Java Changes
//[float]
//=== Deprecations
//[float]
//=== New Features
//[float]
//=== Enhancements
[float]
=== Bug Fixes === Bug Fixes
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
=== Regressions //[float]
//=== Regressions
=== Known Issues //[float]
//=== Known Issues
== Elasticsearch version 6.4.0 [[release-notes-6.4.0]]
== {es} 6.4.0
=== New Features //[float]
//=== New Features
[float]
=== Enhancements === Enhancements
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
Added new "Request" object flavored request methods. Prefer these instead of the
multi-argument versions. ({pull}29623[#29623])
[float]
=== Bug Fixes === Bug Fixes
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])
=== Regressions
=== Known Issues
//[float]
//=== Regressions
//[float]
//=== Known Issues

View File

@ -218,93 +218,74 @@ http://hc.apache.org/httpcomponents-asyncclient-dev/httpasyncclient/apidocs/org/
[[java-rest-low-usage-requests]] [[java-rest-low-usage-requests]]
=== Performing requests === Performing requests
Once the `RestClient` has been created, requests can be sent by calling one of Once the `RestClient` has been created, requests can be sent by calling either
the available `performRequest` or `performRequestAsync` method variants. `performRequest` or `performRequestAsync`. `performRequest` is synchronous and
The `performRequest` methods are synchronous and return the `Response` directly, will block the calling thread and return the `Response` when the request is
meaning that the client will block and wait for a response to be returned. successful or throw an exception if it fails. `performRequestAsync` is
The `performRequestAsync` variants return `void` and accept an extra asynchronous and accepts a `ResponseListener` argument that it calls with a
`ResponseListener` as an argument instead, meaning that they are executed `Response` when the request is successful or with an `Exception` if it4 fails.
asynchronously. The provided listener will be notified upon request completion
or failure. This is synchronous:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-sync]
-------------------------------------------------- --------------------------------------------------
<1> Send a request by providing only the verb and the endpoint, minimum set <1> The HTTP method (`GET`, `POST`, `HEAD`, etc)
of required arguments <2> The endpoint on the server
And this is asynchronous:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async]
-------------------------------------------------- --------------------------------------------------
<1> Send a request by providing the verb, the endpoint, and some querystring <1> The HTTP method (`GET`, `POST`, `HEAD`, etc)
parameter <2> The endpoint on the server
<3> Handle the response
<4> Handle the failure
You can add request parameters to the request object:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-parameters]
--------------------------------------------------
You can set the body of the request to any `HttpEntity`:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body]
-------------------------------------------------- --------------------------------------------------
<1> Send a request by providing the verb, the endpoint, optional querystring
parameters and the request body enclosed in an `org.apache.http.HttpEntity`
object
IMPORTANT: The `ContentType` specified for the `HttpEntity` is important IMPORTANT: The `ContentType` specified for the `HttpEntity` is important
because it will be used to set the `Content-Type` header so that Elasticsearch because it will be used to set the `Content-Type` header so that Elasticsearch
can properly parse the content. can properly parse the content.
And you can set a list of headers to send with the request:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers]
--------------------------------------------------
You can also customize the response consumer used to buffer the asynchronous
responses. The default consumer will buffer up to 100MB of response on the
JVM heap. If the response is larger then the request will fail. You could,
for example, lower the maximum size which might be useful if you are running
in a heap constrained environment:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer]
-------------------------------------------------- --------------------------------------------------
<1> Send a request by providing the verb, the endpoint, optional querystring
parameters, optional request body and the optional factory that is used to
create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`]
callback instance per request attempt. Controls how the response body gets
streamed from a non-blocking HTTP connection on the client side. When not
provided, the default implementation is used which buffers the whole response
body in heap memory, up to 100 MB.
["source","java",subs="attributes,callouts,macros"] ==== Multiple parallel asynchronous actions
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-async]
--------------------------------------------------
<1> Define what needs to happen when the request is successfully performed
<2> Define what needs to happen when the request fails, meaning whenever
there's a connection error or a response with error status code is returned.
<3> Send an async request by providing only the verb, the endpoint, and the
response listener to be notified once the request is completed, minimum set
of required arguments
["source","java",subs="attributes,callouts,macros"] The client is quite happy to execute many actions in parallel. The following
-------------------------------------------------- example indexes many documents in parallel. In a real world scenario you'd
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-async] probably want to use the `_bulk` API instead, but the example is illustative.
--------------------------------------------------
<1> Send an async request by providing the verb, the endpoint, some querystring
parameter and the response listener to be notified once the request is completed
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-verb-endpoint-params-body-async]
--------------------------------------------------
<1> Send an async request by providing the verb, the endpoint, optional
querystring parameters, the request body enclosed in an
`org.apache.http.HttpEntity` object and the response listener to be
notified once the request is completed
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer-async]
--------------------------------------------------
<1> Send an async request by providing the verb, the endpoint, optional
querystring parameters, optional request body and the optional factory that is
used to create an http://hc.apache.org/httpcomponents-core-ga/httpcore-nio/apidocs/org/apache/http/nio/protocol/HttpAsyncResponseConsumer.html[`org.apache.http.nio.protocol.HttpAsyncResponseConsumer`]
callback instance per request attempt. Controls how the response body gets
streamed from a non-blocking HTTP connection on the client side. When not
provided, the default implementation is used which buffers the whole response
body in heap memory, up to 100 MB.
The following is a basic example of how async requests can be sent:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -314,19 +295,6 @@ include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-async-examp
<2> Handle the returned exception, due to communication error or a response <2> Handle the returned exception, due to communication error or a response
with status code that indicates an error with status code that indicates an error
Each of the above listed method supports sending headers along with the
request through a `Header` varargs argument as in the following examples:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers]
--------------------------------------------------
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers-async]
--------------------------------------------------
[[java-rest-low-usage-responses]] [[java-rest-low-usage-responses]]
=== Reading responses === Reading responses
@ -396,4 +364,3 @@ still yields the same response as it did. Enable trace logging for the `tracer`
package to have such log lines printed out. Do note that this type of logging is package to have such log lines printed out. Do note that this type of logging is
expensive and should not be enabled at all times in production environments, expensive and should not be enabled at all times in production environments,
but rather temporarily used only when needed. but rather temporarily used only when needed.

View File

@ -5,4 +5,4 @@ include::testing.asciidoc[]
include::glossary.asciidoc[] include::glossary.asciidoc[]
include::release-notes.asciidoc[] include::{docdir}/../CHANGELOG.asciidoc[]

View File

@ -121,8 +121,13 @@ POST my_source_index/_shrink/my_target_index
NOTE: Mappings may not be specified in the `_shrink` request. NOTE: Mappings may not be specified in the `_shrink` request.
NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
index are not copied during a shrink operation. and `index.sort` settings, index settings on the source index are not copied
during a shrink operation. With the exception of non-copyable settings, settings
from the source index can be copied to the target index by adding the URL
parameter `copy_settings=true` to the request.
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
[float] [float]
=== Monitoring the shrink process === Monitoring the shrink process

View File

@ -177,8 +177,13 @@ POST my_source_index/_split/my_target_index
NOTE: Mappings may not be specified in the `_split` request. NOTE: Mappings may not be specified in the `_split` request.
NOTE: By default, with the exception of `index.analysis`, `index.similarity`, and `index.sort` settings, index settings on the source NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
index are not copied during a shrink operation. and `index.sort` settings, index settings on the source index are not copied
during a split operation. With the exception of non-copyable settings, settings
from the source index can be copied to the target index by adding the URL
parameter `copy_settings=true` to the request.
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
[float] [float]
=== Monitoring the split process === Monitoring the split process

View File

@ -61,7 +61,6 @@ backwards compatibility. Backwards support for the `suggest` metric was
deprecated in 6.3.0 and now removed in 7.0.0. deprecated in 6.3.0 and now removed in 7.0.0.
[[remove-field-caps-body]] [[remove-field-caps-body]]
==== In the fields capabilities API, `fields` can no longer be provided in the request body.
In the past, `fields` could be provided either as a parameter, or as part of the request In the past, `fields` could be provided either as a parameter, or as part of the request
body. Specifying `fields` in the request body as opposed to a parameter was deprecated body. Specifying `fields` in the request body as opposed to a parameter was deprecated

View File

@ -186,8 +186,7 @@ process. It does not support field name prefixes, wildcard characters,
or other "advanced" features. For this reason, chances of it failing are or other "advanced" features. For this reason, chances of it failing are
very small / non existent, and it provides an excellent behavior when it very small / non existent, and it provides an excellent behavior when it
comes to just analyze and run that text as a query behavior (which is comes to just analyze and run that text as a query behavior (which is
usually what a text search box does). Also, the `phrase_prefix` type can usually what a text search box does). Also, the <<query-dsl-match-query-phrase-prefix,`match_phrase_prefix`>>
provide a great "as you type" behavior to automatically load search type can provide a great "as you type" behavior to automatically load search results.
results.
************************************************** **************************************************

View File

@ -23,7 +23,7 @@ POST twitter/_search
}, },
"suggest" : { "suggest" : {
"my-suggestion" : { "my-suggestion" : {
"text" : "trying out Elasticsearch", "text" : "tring out Elasticsearch",
"term" : { "term" : {
"field" : "message" "field" : "message"
} }

View File

@ -38,8 +38,7 @@ Elasticsearch on Windows. MSIs may be downloaded from the Elasticsearch website.
`docker`:: `docker`::
Images are available for running Elasticsearch as Docker containers. They may be Images are available for running Elasticsearch as Docker containers. They may be
downloaded from the Elastic Docker Registry. The default image ships with downloaded from the Elastic Docker Registry.
{xpack-ref}/index.html[X-Pack] pre-installed.
+ +
{ref}/docker.html[Install {es} with Docker] {ref}/docker.html[Install {es} with Docker]

View File

@ -2,9 +2,11 @@
=== Install Elasticsearch with Debian Package === Install Elasticsearch with Debian Package
The Debian package for Elasticsearch can be <<install-deb,downloaded from our website>> The Debian package for Elasticsearch can be <<install-deb,downloaded from our website>>
or from our <<deb-repo,APT repository>>. It can be used to install or from our <<deb-repo,APT repository>>. It can be used to install
Elasticsearch on any Debian-based system such as Debian and Ubuntu. Elasticsearch on any Debian-based system such as Debian and Ubuntu.
include::license.asciidoc[]
The latest stable version of Elasticsearch can be found on the The latest stable version of Elasticsearch can be found on the
link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can
be found on the link:/downloads/past-releases[Past Releases page]. be found on the link:/downloads/past-releases[Past Releases page].
@ -125,6 +127,10 @@ sudo dpkg -i elasticsearch-{version}.deb
-------------------------------------------- --------------------------------------------
<1> Compares the SHA of the downloaded Debian package and the published checksum, which should output <1> Compares the SHA of the downloaded Debian package and the published checksum, which should output
`elasticsearch-{version}.deb: OK`. `elasticsearch-{version}.deb: OK`.
Alternatively, you can download the following package, which contains only
features that are available under the Apache 2.0 license:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.deb
endif::[] endif::[]

View File

@ -0,0 +1,6 @@
This package is free to use under the Elastic license. It contains open source
and free commercial features and access to paid commercial features.
{stack-ov}/license-management.html[Start a 30-day trial] to try out all of the
paid commercial features. See the
https://www.elastic.co/subscriptions[Subscriptions] page for information about
Elastic license levels.

View File

@ -9,6 +9,8 @@ and Oracle Enterprise.
NOTE: RPM install is not supported on distributions with old versions of RPM, NOTE: RPM install is not supported on distributions with old versions of RPM,
such as SLES 11 and CentOS 5. Please see <<zip-targz>> instead. such as SLES 11 and CentOS 5. Please see <<zip-targz>> instead.
include::license.asciidoc[]
The latest stable version of Elasticsearch can be found on the The latest stable version of Elasticsearch can be found on the
link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can link:/downloads/elasticsearch[Download Elasticsearch] page. Other versions can
be found on the link:/downloads/past-releases[Past Releases page]. be found on the link:/downloads/past-releases[Past Releases page].
@ -110,6 +112,10 @@ sudo rpm --install elasticsearch-{version}.rpm
-------------------------------------------- --------------------------------------------
<1> Compares the SHA of the downloaded RPM and the published checksum, which should output <1> Compares the SHA of the downloaded RPM and the published checksum, which should output
`elasticsearch-{version}.rpm: OK`. `elasticsearch-{version}.rpm: OK`.
Alternatively, you can download the following package, which contains only
features that are available under the Apache 2.0 license:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.rpm
endif::[] endif::[]

View File

@ -10,6 +10,8 @@ the included `elasticsearch.exe` executable.
TIP: Elasticsearch has historically been installed on Windows using the <<zip-windows, .zip>> archive. TIP: Elasticsearch has historically been installed on Windows using the <<zip-windows, .zip>> archive.
You can continue using the `.zip` approach if you prefer. You can continue using the `.zip` approach if you prefer.
include::license.asciidoc[]
The latest stable version of Elasticsearch can be found on the The latest stable version of Elasticsearch can be found on the
link:/downloads/elasticsearch[Download Elasticsearch] page. link:/downloads/elasticsearch[Download Elasticsearch] page.
Other versions can be found on the Other versions can be found on the
@ -32,6 +34,10 @@ ifeval::["{release-state}"!="unreleased"]
Download the `.msi` package for Elasticsearch v{version} from https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi Download the `.msi` package for Elasticsearch v{version} from https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.msi
Alternatively, you can download the following package, which contains only
features that are available under the Apache 2.0 license:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.msi
endif::[] endif::[]
[[install-msi-gui]] [[install-msi-gui]]

View File

@ -5,6 +5,8 @@ Elasticsearch is provided as a `.zip` and as a `.tar.gz` package. These
packages can be used to install Elasticsearch on any system and are the packages can be used to install Elasticsearch on any system and are the
easiest package format to use when trying out Elasticsearch. easiest package format to use when trying out Elasticsearch.
include::license.asciidoc[]
The latest stable version of Elasticsearch can be found on the The latest stable version of Elasticsearch can be found on the
link:/downloads/elasticsearch[Download Elasticsearch] page. link:/downloads/elasticsearch[Download Elasticsearch] page.
Other versions can be found on the Other versions can be found on the
@ -40,6 +42,10 @@ cd elasticsearch-{version}/ <2>
`elasticsearch-{version}.zip: OK`. `elasticsearch-{version}.zip: OK`.
<2> This directory is known as `$ES_HOME`. <2> This directory is known as `$ES_HOME`.
Alternatively, you can download the following package, which contains only
features that are available under the Apache 2.0 license:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip
endif::[] endif::[]
@ -68,6 +74,10 @@ cd elasticsearch-{version}/ <2>
`elasticsearch-{version}.tar.gz: OK`. `elasticsearch-{version}.tar.gz: OK`.
<2> This directory is known as `$ES_HOME`. <2> This directory is known as `$ES_HOME`.
Alternatively, you can download the following package, which includes only
Apache 2.0 licensed code:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.tar.gz
endif::[] endif::[]
ifdef::include-xpack[] ifdef::include-xpack[]

View File

@ -9,6 +9,8 @@ TIP: Elasticsearch has historically been installed on Windows using the `.zip` a
An <<windows, MSI installer package>> is available that provides the easiest getting started An <<windows, MSI installer package>> is available that provides the easiest getting started
experience for Windows. You can continue using the `.zip` approach if you prefer. experience for Windows. You can continue using the `.zip` approach if you prefer.
include::license.asciidoc[]
The latest stable version of Elasticsearch can be found on the The latest stable version of Elasticsearch can be found on the
link:/downloads/elasticsearch[Download Elasticsearch] page. link:/downloads/elasticsearch[Download Elasticsearch] page.
Other versions can be found on the Other versions can be found on the
@ -31,6 +33,10 @@ ifeval::["{release-state}"!="unreleased"]
Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip Download the `.zip` archive for Elasticsearch v{version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}.zip
Alternatively, you can download the following package, which contains only
features that are available under the Apache 2.0 license:
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-oss-{version}.zip
Unzip it with your favourite unzip tool. This will create a folder called Unzip it with your favourite unzip tool. This will create a folder called
+elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal +elasticsearch-{version}+, which we will refer to as `%ES_HOME%`. In a terminal
window, `cd` to the `%ES_HOME%` directory, for instance: window, `cd` to the `%ES_HOME%` directory, for instance:

View File

@ -91,25 +91,20 @@ already have local shard copies.
+ +
-- --
When all nodes have joined the cluster and recovered their primary shards, When all nodes have joined the cluster and recovered their primary shards,
reenable allocation. reenable allocation by restoring `cluster.routing.allocation.enable` to its
default:
[source,js] [source,js]
------------------------------------------------------ ------------------------------------------------------
PUT _cluster/settings PUT _cluster/settings
{ {
"transient": { "persistent": {
"cluster.routing.allocation.enable": "all" "cluster.routing.allocation.enable": null
} }
} }
------------------------------------------------------ ------------------------------------------------------
// CONSOLE // CONSOLE
NOTE: Because <<_precedence_of_settings, transient
settings take precedence over persistent settings>>, this overrides the
persistent setting used to disable shard allocation in the first step. If you
don't explicitly reenable shard allocation after a full cluster restart, the
persistent setting is used and shard allocation remains disabled.
Once allocation is reenabled, the cluster starts allocating replica shards to Once allocation is reenabled, the cluster starts allocating replica shards to
the data nodes. At this point it is safe to resume indexing and searching, the data nodes. At this point it is safe to resume indexing and searching,
but your cluster will recover more quickly if you can wait until all primary but your cluster will recover more quickly if you can wait until all primary

View File

@ -72,21 +72,15 @@ GET _cat/nodes
+ +
-- --
NOTE: Because <<_precedence_of_settings, transient Once the node has joined the cluster, remove the `cluster.routing.allocation.enable`
settings take precedence over persistent settings>>, this overrides the setting to enable shard allocation and start using the node:
persistent setting used to disable shard allocation in the first step. If you
don't explicitly reenable shard allocation after a full cluster restart, the
persistent setting is used and shard allocation remains disabled.
Once the node has joined the cluster, reenable shard allocation to start using
the node:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
PUT _cluster/settings PUT _cluster/settings
{ {
"transient": { "persistent": {
"cluster.routing.allocation.enable": "all" "cluster.routing.allocation.enable": null
} }
} }
-------------------------------------------------- --------------------------------------------------

View File

@ -84,7 +84,10 @@ dependencies {
} }
if (Os.isFamily(Os.FAMILY_WINDOWS)) { if (Os.isFamily(Os.FAMILY_WINDOWS)) {
// we can't get the pid files in windows so we skip reindex-from-old logger.warn("Disabling reindex-from-old tests because we can't get the pid file on windows")
integTestRunner.systemProperty "tests.fromOld", "false"
} else if (rootProject.rootDir.toString().contains(" ")) {
logger.warn("Disabling reindex-from-old tests because Elasticsearch 1.7 won't start with spaces in the path")
integTestRunner.systemProperty "tests.fromOld", "false" integTestRunner.systemProperty "tests.fromOld", "false"
} else { } else {
integTestRunner.systemProperty "tests.fromOld", "true" integTestRunner.systemProperty "tests.fromOld", "true"

View File

@ -79,7 +79,11 @@ final class RemoteRequestBuilders {
} }
params.put("size", Integer.toString(searchRequest.source().size())); params.put("size", Integer.toString(searchRequest.source().size()));
if (searchRequest.source().version() == null || searchRequest.source().version() == true) { if (searchRequest.source().version() == null || searchRequest.source().version() == true) {
// false is the only value that makes it false. Null defaults to true.... /*
* Passing `null` here just add the `version` request parameter
* without any value. This way of requesting the version works
* for all supported versions of Elasticsearch.
*/
params.put("version", null); params.put("version", null);
} }
if (searchRequest.source().sorts() != null) { if (searchRequest.source().sorts() != null) {

View File

@ -16,7 +16,6 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
import org.elasticsearch.gradle.test.AntFixture
esplugin { esplugin {
description 'The Azure Repository plugin adds support for Azure storage repositories.' description 'The Azure Repository plugin adds support for Azure storage repositories.'
@ -43,28 +42,12 @@ thirdPartyAudit.excludes = [
'org.slf4j.LoggerFactory', 'org.slf4j.LoggerFactory',
] ]
forbiddenApisTest { check {
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage // also execute the QA tests when testing the plugin
bundledSignatures -= 'jdk-non-portable' dependsOn 'qa:microsoft-azure-storage:check'
bundledSignatures += 'jdk-internal'
}
/** A task to start the fixture which emulates an Azure Storage service **/
task azureStorageFixture(type: AntFixture) {
dependsOn compileTestJava
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
executable = new File(project.runtimeJavaHome, 'bin/java')
args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, 'container_test'
} }
integTestCluster { integTestCluster {
dependsOn azureStorageFixture keystoreSetting 'azure.client.integration_test.account', 'azure_account'
keystoreSetting 'azure.client.integration_test.key', 'azure_key'
keystoreSetting 'azure.client.integration_test.account', "azure_integration_test_account" }
/* The key is "azure_integration_test_key" encoded using base64 */
keystoreSetting 'azure.client.integration_test.key', "YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk="
// Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used
// in a hacky way to change the protocol and endpoint. We must fix that.
setting 'azure.client.integration_test.endpoint_suffix',
"ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }"
}

View File

View File

@ -0,0 +1,85 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.MavenFilteringHack
import org.elasticsearch.gradle.test.AntFixture
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.rest-test'
dependencies {
testCompile project(path: ':plugins:repository-azure', configuration: 'runtime')
}
integTestCluster {
plugin ':plugins:repository-azure'
}
forbiddenApisTest {
// we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
bundledSignatures -= 'jdk-non-portable'
bundledSignatures += 'jdk-internal'
}
boolean useFixture = false
String azureAccount = System.getenv("azure_storage_account")
String azureKey = System.getenv("azure_storage_key")
String azureContainer = System.getenv("azure_storage_container")
String azureBasePath = System.getenv("azure_storage_base_path")
if (!azureAccount && !azureKey && !azureContainer && !azureBasePath) {
azureAccount = 'azure_integration_test_account'
azureKey = 'YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk=' // The key is "azure_integration_test_key" encoded using base64
azureContainer = 'container_test'
azureBasePath = 'integration_test'
useFixture = true
}
/** A task to start the fixture which emulates an Azure Storage service **/
task azureStorageFixture(type: AntFixture) {
dependsOn compileTestJava
env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }"
executable = new File(project.runtimeJavaHome, 'bin/java')
args 'org.elasticsearch.repositories.azure.AzureStorageFixture', baseDir, azureContainer
}
Map<String, Object> expansions = [
'container': azureContainer,
'base_path': azureBasePath
]
processTestResources {
inputs.properties(expansions)
MavenFilteringHack.filter(it, expansions)
}
integTestCluster {
keystoreSetting 'azure.client.integration_test.account', azureAccount
keystoreSetting 'azure.client.integration_test.key', azureKey
if (useFixture) {
dependsOn azureStorageFixture
// Use a closure on the string to delay evaluation until tests are executed. The endpoint_suffix is used
// in a hacky way to change the protocol and endpoint. We must fix that.
setting 'azure.client.integration_test.endpoint_suffix',
"ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${ -> azureStorageFixture.addressAndPort }"
} else {
println "Using an external service to test the repository-azure plugin"
}
}

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.azure;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
public class AzureStorageRepositoryClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
public AzureStorageRepositoryClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
super(testCandidate);
}
@ParametersFactory
public static Iterable<Object[]> parameters() throws Exception {
return ESClientYamlSuiteTestCase.createParameters();
}
@Override
protected Settings restClientSettings() {
// Give more time to repository-azure to complete the snapshot operations
return Settings.builder().put(super.restClientSettings())
.put(ESRestTestCase.CLIENT_RETRY_TIMEOUT, "60s")
.put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "60s")
.build();
}
}

View File

@ -0,0 +1,174 @@
# Integration tests for repository-azure
---
"Snapshot/Restore with repository-azure":
# Register repository
- do:
snapshot.create_repository:
repository: repository
body:
type: azure
settings:
container: ${container}
client: "integration_test"
base_path: ${base_path}
- match: { acknowledged: true }
# Get repository
- do:
snapshot.get_repository:
repository: repository
- match: { repository.settings.container: ${container} }
- match: { repository.settings.client : "integration_test" }
- match: { repository.settings.base_path : ${base_path} }
# Index documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 1
- snapshot: one
- index:
_index: docs
_type: doc
_id: 2
- snapshot: one
- index:
_index: docs
_type: doc
_id: 3
- snapshot: one
- do:
count:
index: docs
- match: {count: 3}
# Create a first snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-one }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.include_global_state: true }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.status:
repository: repository
snapshot: snapshot-one
- is_true: snapshots
- match: { snapshots.0.snapshot: snapshot-one }
- match: { snapshots.0.state : SUCCESS }
# Index more documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 4
- snapshot: two
- index:
_index: docs
_type: doc
_id: 5
- snapshot: two
- index:
_index: docs
_type: doc
_id: 6
- snapshot: two
- index:
_index: docs
_type: doc
_id: 7
- snapshot: two
- do:
count:
index: docs
- match: {count: 7}
# Create a second snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-two }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.get:
repository: repository
snapshot: snapshot-one,snapshot-two
- is_true: snapshots
- match: { snapshots.0.state : SUCCESS }
- match: { snapshots.1.state : SUCCESS }
# Delete the index
- do:
indices.delete:
index: docs
# Restore the second snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 7}
# Delete the index again
- do:
indices.delete:
index: docs
# Restore the first snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 3}
# Remove the snapshots
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-two
master_timeout: 5m
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-one
master_timeout: 5m

View File

@ -11,177 +11,3 @@
nodes.info: {} nodes.info: {}
- match: { nodes.$master.plugins.0.name: repository-azure } - match: { nodes.$master.plugins.0.name: repository-azure }
---
"Snapshot/Restore with repository-azure":
# Register repository
- do:
snapshot.create_repository:
repository: repository
body:
type: azure
settings:
container: "container_test"
client: "integration_test"
- match: { acknowledged: true }
# Get repository
- do:
snapshot.get_repository:
repository: repository
- match: {repository.settings.container : "container_test"}
- match: {repository.settings.client : "integration_test"}
# Index documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 1
- snapshot: one
- index:
_index: docs
_type: doc
_id: 2
- snapshot: one
- index:
_index: docs
_type: doc
_id: 3
- snapshot: one
- do:
count:
index: docs
- match: {count: 3}
# Create a first snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-one }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.include_global_state: true }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.status:
repository: repository
snapshot: snapshot-one
- is_true: snapshots
- match: { snapshots.0.snapshot: snapshot-one }
- match: { snapshots.0.state : SUCCESS }
# Index more documents
- do:
bulk:
refresh: true
body:
- index:
_index: docs
_type: doc
_id: 4
- snapshot: two
- index:
_index: docs
_type: doc
_id: 5
- snapshot: two
- index:
_index: docs
_type: doc
_id: 6
- snapshot: two
- index:
_index: docs
_type: doc
_id: 7
- snapshot: two
- do:
count:
index: docs
- match: {count: 7}
# Create a second snapshot
- do:
snapshot.create:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- match: { snapshot.snapshot: snapshot-two }
- match: { snapshot.state : SUCCESS }
- match: { snapshot.shards.failed : 0 }
- do:
snapshot.get:
repository: repository
snapshot: snapshot-one,snapshot-two
- is_true: snapshots
- match: { snapshots.0.state : SUCCESS }
- match: { snapshots.1.state : SUCCESS }
# Delete the index
- do:
indices.delete:
index: docs
# Restore the second snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-two
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 7}
# Delete the index again
- do:
indices.delete:
index: docs
# Restore the first snapshot
- do:
snapshot.restore:
repository: repository
snapshot: snapshot-one
wait_for_completion: true
- do:
count:
index: docs
- match: {count: 3}
# Remove the snapshots
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-two
- do:
snapshot.delete:
repository: repository
snapshot: snapshot-one
# Remove our repository
- do:
snapshot.delete_repository:
repository: repository

View File

@ -1,82 +0,0 @@
"Deprecated Repository can be registered":
- skip:
features: warnings
- do:
warnings:
- "[account] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version."
snapshot.create_repository:
repository: test_repo_azure
verify: false
body:
type: azure
settings:
account : "my_test_account"
container : "backup-container"
base_path : "backups"
chunk_size: "32m"
compress : true
- is_true: acknowledged
- do:
snapshot.get_repository:
repository: test_repo_azure
- is_true : test_repo_azure
- match : { test_repo_azure.settings.account : "my_test_account" }
- match : { test_repo_azure.settings.container : "backup-container" }
- match : { test_repo_azure.settings.base_path : "backups" }
- match : { test_repo_azure.settings.chunk_size: "32m" }
- match : { test_repo_azure.settings.compress : "true" }
---
"Default repository can be registered":
- do:
snapshot.create_repository:
repository: test_repo_azure
verify: false
body:
type: azure
settings:
container : "backup-container"
base_path : "backups"
chunk_size: "32m"
compress : true
- is_true: acknowledged
- do:
snapshot.get_repository:
repository: test_repo_azure
- is_true : test_repo_azure
- match : { test_repo_azure.settings.container : "backup-container" }
- match : { test_repo_azure.settings.base_path : "backups" }
- match : { test_repo_azure.settings.chunk_size: "32m" }
- match : { test_repo_azure.settings.compress : "true" }
---
"Named client repository can be registered":
- do:
snapshot.create_repository:
repository: test_repo_azure
verify: false
body:
type: azure
settings:
client : "secondary"
container : "backup-container"
base_path : "backups"
chunk_size: "32m"
compress : true
- is_true: acknowledged
- do:
snapshot.get_repository:
repository: test_repo_azure
- is_true : test_repo_azure
- match : { test_repo_azure.settings.client : "secondary" }
- match : { test_repo_azure.settings.container : "backup-container" }
- match : { test_repo_azure.settings.base_path : "backups" }
- match : { test_repo_azure.settings.chunk_size: "32m" }
- match : { test_repo_azure.settings.compress : "true" }

View File

@ -18,6 +18,10 @@
} }
}, },
"params": { "params": {
"copy_settings": {
"type" : "boolean",
"description" : "whether or not to copy settings from the source index (defaults to false)"
},
"timeout": { "timeout": {
"type" : "time", "type" : "time",
"description" : "Explicit operation timeout" "description" : "Explicit operation timeout"

View File

@ -18,6 +18,10 @@
} }
}, },
"params": { "params": {
"copy_settings": {
"type" : "boolean",
"description" : "whether or not to copy settings from the source index (defaults to false)"
},
"timeout": { "timeout": {
"type" : "time", "type" : "time",
"description" : "Explicit operation timeout" "description" : "Explicit operation timeout"

View File

@ -0,0 +1,94 @@
---
"Copy settings during shrink index":
- skip:
version: " - 6.3.99"
reason: copy_settings did not exist prior to 6.4.0
features: "warnings"
- do:
cluster.state: {}
# get master node id
- set: { master_node: master }
- do:
indices.create:
index: source
wait_for_active_shards: 1
body:
settings:
# ensure everything is allocated on the master node
index.routing.allocation.include._id: $master
index.number_of_replicas: 0
index.merge.scheduler.max_merge_count: 4
# make it read-only
- do:
indices.put_settings:
index: source
body:
index.blocks.write: true
index.number_of_replicas: 0
- do:
cluster.health:
wait_for_status: green
index: source
# now we do a actual shrink and copy settings
- do:
indices.shrink:
index: "source"
target: "copy-settings-target"
wait_for_active_shards: 1
master_timeout: 10s
copy_settings: true
body:
settings:
index.number_of_replicas: 0
index.merge.scheduler.max_thread_count: 2
warnings:
- "parameter [copy_settings] is deprecated but was [true]"
- do:
cluster.health:
wait_for_status: green
- do:
indices.get_settings:
index: "copy-settings-target"
# settings should be copied
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { copy-settings-target.settings.index.blocks.write: "true" }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
# now we do a actual shrink and do not copy settings
- do:
indices.shrink:
index: "source"
target: "no-copy-settings-target"
wait_for_active_shards: 1
master_timeout: 10s
copy_settings: false
body:
settings:
index.number_of_replicas: 0
index.merge.scheduler.max_thread_count: 2
warnings:
- "parameter [copy_settings] is deprecated but was [false]"
- do:
cluster.health:
wait_for_status: green
- do:
indices.get_settings:
index: "no-copy-settings-target"
# only the request setting should be copied
- is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- is_false: no-copy-settings-target.settings.index.blocks.write
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id

View File

@ -0,0 +1,98 @@
---
"Copy settings during split index":
- skip:
version: " - 6.3.99"
reason: copy_settings did not exist prior to 6.4.0
features: "warnings"
- do:
cluster.state: {}
# get master node id
- set: { master_node: master }
- do:
indices.create:
index: source
wait_for_active_shards: 1
body:
settings:
# ensure everything is allocated on the master node
index.routing.allocation.include._id: $master
index.number_of_replicas: 0
index.number_of_shards: 1
index.number_of_routing_shards: 4
index.merge.scheduler.max_merge_count: 4
# make it read-only
- do:
indices.put_settings:
index: source
body:
index.blocks.write: true
index.number_of_replicas: 0
- do:
cluster.health:
wait_for_status: green
index: source
# now we do a actual split and copy settings
- do:
indices.split:
index: "source"
target: "copy-settings-target"
wait_for_active_shards: 1
master_timeout: 10s
copy_settings: true
body:
settings:
index.number_of_replicas: 0
index.number_of_shards: 2
index.merge.scheduler.max_thread_count: 2
warnings:
- "parameter [copy_settings] is deprecated but was [true]"
- do:
cluster.health:
wait_for_status: green
- do:
indices.get_settings:
index: "copy-settings-target"
# settings should be copied
- match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" }
- match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- match: { copy-settings-target.settings.index.blocks.write: "true" }
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
# now we do a actual shrink and do not copy settings
- do:
indices.split:
index: "source"
target: "no-copy-settings-target"
wait_for_active_shards: 1
master_timeout: 10s
copy_settings: false
body:
settings:
index.number_of_replicas: 0
index.number_of_shards: 2
index.merge.scheduler.max_thread_count: 2
warnings:
- "parameter [copy_settings] is deprecated but was [false]"
- do:
cluster.health:
wait_for_status: green
- do:
indices.get_settings:
index: "no-copy-settings-target"
# only the request setting should be copied
- is_false: no-copy-settings-target.settings.index.merge.scheduler.max_merge_count
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
- is_false: no-copy-settings-target.settings.index.blocks.write
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.LoggerMessageFormat; import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -635,8 +636,25 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public static ElasticsearchException[] guessRootCauses(Throwable t) { public static ElasticsearchException[] guessRootCauses(Throwable t) {
Throwable ex = ExceptionsHelper.unwrapCause(t); Throwable ex = ExceptionsHelper.unwrapCause(t);
if (ex instanceof ElasticsearchException) { if (ex instanceof ElasticsearchException) {
// ElasticsearchException knows how to guess its own root cause
return ((ElasticsearchException) ex).guessRootCauses(); return ((ElasticsearchException) ex).guessRootCauses();
} }
if (ex instanceof XContentParseException) {
/*
* We'd like to unwrap parsing exceptions to the inner-most
* parsing exception because that is generally the most interesting
* exception to return to the user. If that exception is caused by
* an ElasticsearchException we'd like to keep unwrapping because
* ElasticserachExceptions tend to contain useful information for
* the user.
*/
Throwable cause = ex.getCause();
if (cause != null) {
if (cause instanceof XContentParseException || cause instanceof ElasticsearchException) {
return guessRootCauses(ex.getCause());
}
}
}
return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) { return new ElasticsearchException[]{new ElasticsearchException(t.getMessage(), t) {
@Override @Override
protected String getExceptionName() { protected String getExceptionName() {

View File

@ -19,7 +19,11 @@
package org.elasticsearch; package org.elasticsearch;
/**
* An exception that is meant to be "unwrapped" when sent back to the user
* as an error because its is {@link #getCause() cause}, if non-null is
* <strong>always</strong> more useful to the user than the exception itself.
*/
public interface ElasticsearchWrapperException { public interface ElasticsearchWrapperException {
Throwable getCause(); Throwable getCause();
} }

View File

@ -45,6 +45,7 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final String providedName; private final String providedName;
private Index recoverFrom; private Index recoverFrom;
private ResizeType resizeType; private ResizeType resizeType;
private boolean copySettings;
private IndexMetaData.State state = IndexMetaData.State.OPEN; private IndexMetaData.State state = IndexMetaData.State.OPEN;
@ -112,6 +113,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return this; return this;
} }
public CreateIndexClusterStateUpdateRequest copySettings(final boolean copySettings) {
this.copySettings = copySettings;
return this;
}
public TransportMessage originalMessage() { public TransportMessage originalMessage() {
return originalMessage; return originalMessage;
} }
@ -170,4 +176,9 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
public ResizeType resizeType() { public ResizeType resizeType() {
return resizeType; return resizeType;
} }
public boolean copySettings() {
return copySettings;
}
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.admin.indices.shrink; package org.elasticsearch.action.admin.indices.shrink;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.Alias;
@ -55,6 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
private CreateIndexRequest targetIndexRequest; private CreateIndexRequest targetIndexRequest;
private String sourceIndex; private String sourceIndex;
private ResizeType type = ResizeType.SHRINK; private ResizeType type = ResizeType.SHRINK;
private boolean copySettings = false;
ResizeRequest() {} ResizeRequest() {}
@ -96,6 +98,11 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
} else { } else {
type = ResizeType.SHRINK; // BWC this used to be shrink only type = ResizeType.SHRINK; // BWC this used to be shrink only
} }
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
copySettings = in.readBoolean();
} else {
copySettings = false;
}
} }
@Override @Override
@ -106,6 +113,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
out.writeEnum(type); out.writeEnum(type);
} }
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeBoolean(copySettings);
}
} }
@Override @Override
@ -177,6 +187,14 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
return type; return type;
} }
public void setCopySettings(final boolean copySettings) {
this.copySettings = copySettings;
}
public boolean getCopySettings() {
return copySettings;
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();

View File

@ -178,19 +178,19 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
settingsBuilder.put("index.number_of_shards", numShards); settingsBuilder.put("index.number_of_shards", numShards);
targetIndex.settings(settingsBuilder); targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(targetIndex, return new CreateIndexClusterStateUpdateRequest(targetIndex, cause, targetIndex.index(), targetIndexName)
cause, targetIndex.index(), targetIndexName) // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be
// mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we
// applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss // miss the mappings for everything is corrupted and hard to debug
// the mappings for everything is corrupted and hard to debug .ackTimeout(targetIndex.timeout())
.ackTimeout(targetIndex.timeout()) .masterNodeTimeout(targetIndex.masterNodeTimeout())
.masterNodeTimeout(targetIndex.masterNodeTimeout()) .settings(targetIndex.settings())
.settings(targetIndex.settings()) .aliases(targetIndex.aliases())
.aliases(targetIndex.aliases()) .customs(targetIndex.customs())
.customs(targetIndex.customs()) .waitForActiveShards(targetIndex.waitForActiveShards())
.waitForActiveShards(targetIndex.waitForActiveShards()) .recoverFrom(metaData.getIndex())
.recoverFrom(metaData.getIndex()) .resizeType(resizeRequest.getResizeType())
.resizeType(resizeRequest.getResizeType()); .copySettings(resizeRequest.getCopySettings());
} }
@Override @Override

View File

@ -75,7 +75,11 @@ public class QueryExplanation implements Streamable {
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
index = in.readString(); if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
index = in.readOptionalString();
} else {
index = in.readString();
}
if (in.getVersion().onOrAfter(Version.V_5_4_0)) { if (in.getVersion().onOrAfter(Version.V_5_4_0)) {
shard = in.readInt(); shard = in.readInt();
} else { } else {
@ -88,7 +92,11 @@ public class QueryExplanation implements Streamable {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeString(index); if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
out.writeOptionalString(index);
} else {
out.writeString(index);
}
if (out.getVersion().onOrAfter(Version.V_5_4_0)) { if (out.getVersion().onOrAfter(Version.V_5_4_0)) {
out.writeInt(shard); out.writeInt(shard);
} }

View File

@ -38,8 +38,11 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.search.internal.AliasFilter;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
@ -54,6 +57,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.function.LongSupplier;
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> { public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
@ -71,7 +75,39 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override @Override
protected void doExecute(Task task, ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) { protected void doExecute(Task task, ValidateQueryRequest request, ActionListener<ValidateQueryResponse> listener) {
request.nowInMillis = System.currentTimeMillis(); request.nowInMillis = System.currentTimeMillis();
super.doExecute(task, request, listener); LongSupplier timeProvider = () -> request.nowInMillis;
ActionListener<org.elasticsearch.index.query.QueryBuilder> rewriteListener = ActionListener.wrap(rewrittenQuery -> {
request.query(rewrittenQuery);
super.doExecute(task, request, listener);
},
ex -> {
if (ex instanceof IndexNotFoundException ||
ex instanceof IndexClosedException) {
listener.onFailure(ex);
}
List<QueryExplanation> explanations = new ArrayList<>();
explanations.add(new QueryExplanation(null,
QueryExplanation.RANDOM_SHARD,
false,
null,
ex.getMessage()));
listener.onResponse(
new ValidateQueryResponse(
false,
explanations,
// totalShards is documented as "the total shards this request ran against",
// which is 0 since the failure is happening on the coordinating node.
0,
0 ,
0,
null));
});
if (request.query() == null) {
rewriteListener.onResponse(request.query());
} else {
Rewriteable.rewriteAndFetch(request.query(), searchService.getRewriteContext(timeProvider),
rewriteListener);
}
} }
@Override @Override

View File

@ -219,9 +219,19 @@ public class MetaDataCreateIndexService extends AbstractComponent {
Settings build = updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); Settings build = updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
indexScopedSettings.validate(build, true); // we do validate here - index setting must be consistent indexScopedSettings.validate(build, true); // we do validate here - index setting must be consistent
request.settings(build); request.settings(build);
clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", clusterService.submitStateUpdateTask(
new IndexCreationTask(logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, settings, "create-index [" + request.index() + "], cause [" + request.cause() + "]",
this::validate)); new IndexCreationTask(
logger,
allocationService,
request,
listener,
indicesService,
aliasValidator,
xContentRegistry,
settings,
this::validate,
indexScopedSettings));
} }
interface IndexValidator { interface IndexValidator {
@ -238,11 +248,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
private final AllocationService allocationService; private final AllocationService allocationService;
private final Settings settings; private final Settings settings;
private final IndexValidator validator; private final IndexValidator validator;
private final IndexScopedSettings indexScopedSettings;
IndexCreationTask(Logger logger, AllocationService allocationService, CreateIndexClusterStateUpdateRequest request, IndexCreationTask(Logger logger, AllocationService allocationService, CreateIndexClusterStateUpdateRequest request,
ActionListener<ClusterStateUpdateResponse> listener, IndicesService indicesService, ActionListener<ClusterStateUpdateResponse> listener, IndicesService indicesService,
AliasValidator aliasValidator, NamedXContentRegistry xContentRegistry, AliasValidator aliasValidator, NamedXContentRegistry xContentRegistry,
Settings settings, IndexValidator validator) { Settings settings, IndexValidator validator, IndexScopedSettings indexScopedSettings) {
super(Priority.URGENT, request, listener); super(Priority.URGENT, request, listener);
this.request = request; this.request = request;
this.logger = logger; this.logger = logger;
@ -252,6 +263,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
this.xContentRegistry = xContentRegistry; this.xContentRegistry = xContentRegistry;
this.settings = settings; this.settings = settings;
this.validator = validator; this.validator = validator;
this.indexScopedSettings = indexScopedSettings;
} }
@Override @Override
@ -273,7 +285,8 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// we only find a template when its an API call (a new index) // we only find a template when its an API call (a new index)
// find templates, highest order are better matching // find templates, highest order are better matching
List<IndexTemplateMetaData> templates = MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index()); List<IndexTemplateMetaData> templates =
MetaDataIndexTemplateService.findTemplates(currentState.metaData(), request.index());
Map<String, Custom> customs = new HashMap<>(); Map<String, Custom> customs = new HashMap<>();
@ -402,7 +415,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (recoverFromIndex != null) { if (recoverFromIndex != null) {
assert request.resizeType() != null; assert request.resizeType() != null;
prepareResizeIndexSettings( prepareResizeIndexSettings(
currentState, mappings.keySet(), indexSettingsBuilder, recoverFromIndex, request.index(), request.resizeType()); currentState,
mappings.keySet(),
indexSettingsBuilder,
recoverFromIndex,
request.index(),
request.resizeType(),
request.copySettings(),
indexScopedSettings);
} }
final Settings actualIndexSettings = indexSettingsBuilder.build(); final Settings actualIndexSettings = indexSettingsBuilder.build();
tmpImdBuilder.settings(actualIndexSettings); tmpImdBuilder.settings(actualIndexSettings);
@ -673,8 +693,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
return sourceMetaData; return sourceMetaData;
} }
static void prepareResizeIndexSettings(ClusterState currentState, Set<String> mappingKeys, Settings.Builder indexSettingsBuilder, static void prepareResizeIndexSettings(
Index resizeSourceIndex, String resizeIntoName, ResizeType type) { final ClusterState currentState,
final Set<String> mappingKeys,
final Settings.Builder indexSettingsBuilder,
final Index resizeSourceIndex,
final String resizeIntoName,
final ResizeType type,
final boolean copySettings,
final IndexScopedSettings indexScopedSettings) {
final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName()); final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName());
if (type == ResizeType.SHRINK) { if (type == ResizeType.SHRINK) {
final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(), final List<String> nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(),
@ -695,15 +722,33 @@ public class MetaDataCreateIndexService extends AbstractComponent {
throw new IllegalStateException("unknown resize type is " + type); throw new IllegalStateException("unknown resize type is " + type);
} }
final Predicate<String> sourceSettingsPredicate = final Settings.Builder builder = Settings.builder();
(s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort.")) if (copySettings) {
&& indexSettingsBuilder.keys().contains(s) == false; // copy all settings and non-copyable settings and settings that have already been set (e.g., from the request)
for (final String key : sourceMetaData.getSettings().keySet()) {
final Setting<?> setting = indexScopedSettings.get(key);
if (setting == null) {
assert indexScopedSettings.isPrivateSetting(key) : key;
} else if (setting.getProperties().contains(Setting.Property.NotCopyableOnResize)) {
continue;
}
// do not override settings that have already been set (for example, from the request)
if (indexSettingsBuilder.keys().contains(key)) {
continue;
}
builder.copy(key, sourceMetaData.getSettings());
}
} else {
final Predicate<String> sourceSettingsPredicate =
(s) -> (s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort."))
&& indexSettingsBuilder.keys().contains(s) == false;
builder.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate));
}
indexSettingsBuilder indexSettingsBuilder
// now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they
// wanna add extra settings
.put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion())
.put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion())
.put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) .put(builder.build())
.put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize())
.put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName())
.put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID());

View File

@ -50,8 +50,8 @@ public class RepositoriesMetaData extends AbstractNamedDiffable<Custom> implemen
* *
* @param repositories list of repositories * @param repositories list of repositories
*/ */
public RepositoriesMetaData(RepositoryMetaData... repositories) { public RepositoriesMetaData(List<RepositoryMetaData> repositories) {
this.repositories = Arrays.asList(repositories); this.repositories = repositories;
} }
/** /**
@ -164,7 +164,7 @@ public class RepositoriesMetaData extends AbstractNamedDiffable<Custom> implemen
throw new ElasticsearchParseException("failed to parse repositories"); throw new ElasticsearchParseException("failed to parse repositories");
} }
} }
return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); return new RepositoriesMetaData(repository);
} }
/** /**

View File

@ -43,6 +43,7 @@ import org.elasticsearch.indices.IndicesRequestCache;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;

View File

@ -114,7 +114,13 @@ public class Setting<T> implements ToXContentObject {
/** /**
* Index scope * Index scope
*/ */
IndexScope IndexScope,
/**
* Mark this setting as not copyable during an index resize (shrink or split). This property can only be applied to settings that
* also have {@link Property#IndexScope}.
*/
NotCopyableOnResize
} }
private final Key key; private final Key key;
@ -142,10 +148,15 @@ public class Setting<T> implements ToXContentObject {
if (properties.length == 0) { if (properties.length == 0) {
this.properties = EMPTY_PROPERTIES; this.properties = EMPTY_PROPERTIES;
} else { } else {
this.properties = EnumSet.copyOf(Arrays.asList(properties)); final EnumSet<Property> propertiesAsSet = EnumSet.copyOf(Arrays.asList(properties));
if (isDynamic() && isFinal()) { if (propertiesAsSet.contains(Property.Dynamic) && propertiesAsSet.contains(Property.Final)) {
throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic"); throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic");
} }
if (propertiesAsSet.contains(Property.NotCopyableOnResize) && propertiesAsSet.contains(Property.IndexScope) == false) {
throw new IllegalArgumentException(
"non-index-scoped setting [" + key + "] can not have property [" + Property.NotCopyableOnResize + "]");
}
this.properties = propertiesAsSet;
} }
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.ParsingException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -111,7 +112,7 @@ public interface Rewriteable<T> {
} }
} }
rewriteResponse.onResponse(builder); rewriteResponse.onResponse(builder);
} catch (IOException ex) { } catch (IOException|IllegalArgumentException|ParsingException ex) {
rewriteResponse.onFailure(ex); rewriteResponse.onFailure(ex);
} }
} }

View File

@ -114,7 +114,8 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE);
if (repositories == null) { if (repositories == null) {
logger.info("put repository [{}]", request.name); logger.info("put repository [{}]", request.name);
repositories = new RepositoriesMetaData(new RepositoryMetaData(request.name, request.type, request.settings)); repositories = new RepositoriesMetaData(
Collections.singletonList(new RepositoryMetaData(request.name, request.type, request.settings)));
} else { } else {
boolean found = false; boolean found = false;
List<RepositoryMetaData> repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1); List<RepositoryMetaData> repositoriesMetaData = new ArrayList<>(repositories.repositories().size() + 1);
@ -133,7 +134,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
} else { } else {
logger.info("update repository [{}]", request.name); logger.info("update repository [{}]", request.name);
} }
repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); repositories = new RepositoriesMetaData(repositoriesMetaData);
} }
mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
return ClusterState.builder(currentState).metaData(mdBuilder).build(); return ClusterState.builder(currentState).metaData(mdBuilder).build();
@ -185,7 +186,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta
} }
} }
if (changed) { if (changed) {
repositories = new RepositoriesMetaData(repositoriesMetaData.toArray(new RepositoryMetaData[repositoriesMetaData.size()])); repositories = new RepositoriesMetaData(repositoriesMetaData);
mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories);
return ClusterState.builder(currentState).metaData(mdBuilder).build(); return ClusterState.builder(currentState).metaData(mdBuilder).build();
} }

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
@ -46,6 +47,19 @@ public abstract class RestResizeHandler extends BaseRestHandler {
public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
resizeRequest.setResizeType(getResizeType()); resizeRequest.setResizeType(getResizeType());
final String rawCopySettings = request.param("copy_settings");
final boolean copySettings;
if (rawCopySettings == null) {
copySettings = resizeRequest.getCopySettings();
} else {
deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]");
if (rawCopySettings.length() == 0) {
copySettings = true;
} else {
copySettings = Booleans.parseBoolean(rawCopySettings);
}
}
resizeRequest.setCopySettings(copySettings);
request.applyContentParser(resizeRequest::fromXContent); request.applyContentParser(resizeRequest::fromXContent);
resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout()));
resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout()));

View File

@ -412,7 +412,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
// Make sure that the number of shards is the same. That's the only thing that we cannot change // Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) { if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
"] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards"); "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
} }
} }

View File

@ -41,6 +41,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParseException;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
@ -78,6 +79,7 @@ import static org.hamcrest.CoreMatchers.hasItem;
import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.Matchers.startsWith;
public class ElasticsearchExceptionTests extends ESTestCase { public class ElasticsearchExceptionTests extends ESTestCase {
@ -124,13 +126,13 @@ public class ElasticsearchExceptionTests extends ESTestCase {
} else { } else {
rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex); rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex);
} }
assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception"); assertEquals("parsing_exception", ElasticsearchException.getExceptionName(rootCauses[0]));
assertEquals(rootCauses[0].getMessage(), "foobar"); assertEquals("foobar", rootCauses[0].getMessage());
ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar")); ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar"));
rootCauses = oneLevel.guessRootCauses(); rootCauses = oneLevel.guessRootCauses();
assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception"); assertEquals("exception", ElasticsearchException.getExceptionName(rootCauses[0]));
assertEquals(rootCauses[0].getMessage(), "foo"); assertEquals("foo", rootCauses[0].getMessage());
} }
{ {
ShardSearchFailure failure = new ShardSearchFailure( ShardSearchFailure failure = new ShardSearchFailure(
@ -146,20 +148,40 @@ public class ElasticsearchExceptionTests extends ESTestCase {
assertEquals(rootCauses.length, 2); assertEquals(rootCauses.length, 2);
assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception"); assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "parsing_exception");
assertEquals(rootCauses[0].getMessage(), "foobar"); assertEquals(rootCauses[0].getMessage(), "foobar");
assertEquals(((ParsingException) rootCauses[0]).getLineNumber(), 1); assertEquals(1, ((ParsingException) rootCauses[0]).getLineNumber());
assertEquals(((ParsingException) rootCauses[0]).getColumnNumber(), 2); assertEquals(2, ((ParsingException) rootCauses[0]).getColumnNumber());
assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "query_shard_exception"); assertEquals("query_shard_exception", ElasticsearchException.getExceptionName(rootCauses[1]));
assertEquals((rootCauses[1]).getIndex().getName(), "foo1"); assertEquals("foo1", rootCauses[1].getIndex().getName());
assertEquals(rootCauses[1].getMessage(), "foobar"); assertEquals("foobar", rootCauses[1].getMessage());
} }
{ {
final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar")); final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar"));
assertEquals(foobars.length, 1); assertEquals(foobars.length, 1);
assertTrue(foobars[0] instanceof ElasticsearchException); assertThat(foobars[0], instanceOf(ElasticsearchException.class));
assertEquals(foobars[0].getMessage(), "foobar"); assertEquals("foobar", foobars[0].getMessage());
assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class); assertEquals(IllegalArgumentException.class, foobars[0].getCause().getClass());
assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception"); assertEquals("illegal_argument_exception", foobars[0].getExceptionName());
}
{
XContentParseException inner = new XContentParseException(null, "inner");
XContentParseException outer = new XContentParseException(null, "outer", inner);
final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer);
assertEquals(causes.length, 1);
assertThat(causes[0], instanceOf(ElasticsearchException.class));
assertEquals("inner", causes[0].getMessage());
assertEquals("x_content_parse_exception", causes[0].getExceptionName());
}
{
ElasticsearchException inner = new ElasticsearchException("inner");
XContentParseException outer = new XContentParseException(null, "outer", inner);
final ElasticsearchException[] causes = ElasticsearchException.guessRootCauses(outer);
assertEquals(causes.length, 1);
assertThat(causes[0], instanceOf(ElasticsearchException.class));
assertEquals("inner", causes[0].getMessage());
assertEquals("exception", causes[0].getExceptionName());
} }
} }

View File

@ -640,7 +640,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
@Override @Override
public MetaData.Custom randomCreate(String name) { public MetaData.Custom randomCreate(String name) {
if (randomBoolean()) { if (randomBoolean()) {
return new RepositoriesMetaData(); return new RepositoriesMetaData(Collections.emptyList());
} else { } else {
return IndexGraveyardTests.createRandom(); return IndexGraveyardTests.createRandom();
} }

View File

@ -39,6 +39,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -388,8 +389,7 @@ public class IndexCreationTaskTests extends ESTestCase {
setupRequest(); setupRequest();
final MetaDataCreateIndexService.IndexCreationTask task = new MetaDataCreateIndexService.IndexCreationTask( final MetaDataCreateIndexService.IndexCreationTask task = new MetaDataCreateIndexService.IndexCreationTask(
logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, clusterStateSettings.build(), logger, allocationService, request, listener, indicesService, aliasValidator, xContentRegistry, clusterStateSettings.build(),
validator validator, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
);
return task.execute(state); return task.execute(state);
} }

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -34,22 +35,27 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.ResourceAlreadyExistsException;
import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Set;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.min;
import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -228,90 +234,146 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
Settings.builder().put("index.number_of_shards", targetShards).build()); Settings.builder().put("index.number_of_shards", targetShards).build());
} }
public void testResizeIndexSettings() { public void testPrepareResizeIndexSettings() {
String indexName = randomAlphaOfLength(10); final List<Version> versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()));
List<Version> versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()),
VersionUtils.randomVersion(random()));
versions.sort(Comparator.comparingLong(l -> l.id)); versions.sort(Comparator.comparingLong(l -> l.id));
Version version = versions.get(0); final Version version = versions.get(0);
Version minCompat = versions.get(1); final Version upgraded = versions.get(1);
Version upgraded = versions.get(2); final Settings indexSettings =
// create one that won't fail Settings.builder()
ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, .put("index.version.created", version)
Settings.builder() .put("index.version.upgraded", upgraded)
.put("index.blocks.write", true) .put("index.similarity.default.type", "BM25")
.put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.default.tokenizer", "keyword")
.put("index.version.created", version) .build();
.put("index.version.upgraded", upgraded) runPrepareResizeIndexSettingsTest(
.put("index.version.minimum_compatible", minCompat.luceneVersion.toString()) indexSettings,
.put("index.analysis.analyzer.default.tokenizer", "keyword") Settings.EMPTY,
.build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) Collections.emptyList(),
.build(); randomBoolean(),
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, settings -> {
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25"));
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); assertThat(
"analysis settings must be copied",
settings.get("index.analysis.analyzer.default.tokenizer"),
equalTo("keyword"));
assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1"));
assertThat(settings.get("index.allocation.max_retries"), equalTo("1"));
assertThat(settings.getAsVersion("index.version.created", null), equalTo(version));
assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded));
});
}
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); public void testPrepareResizeIndexSettingsCopySettings() {
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); final int maxMergeCount = randomIntBetween(1, 16);
// now we start the shard final int maxThreadCount = randomIntBetween(1, 16);
routingTable = service.applyStartedShards(clusterState, final Setting<String> nonCopyableExistingIndexSetting =
routingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); Setting.simpleString("index.non_copyable.existing", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize);
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); final Setting<String> nonCopyableRequestIndexSetting =
Setting.simpleString("index.non_copyable.request", Setting.Property.IndexScope, Setting.Property.NotCopyableOnResize);
{ runPrepareResizeIndexSettingsTest(
final Settings.Builder builder = Settings.builder(); Settings.builder()
builder.put("index.number_of_shards", 1); .put("index.merge.scheduler.max_merge_count", maxMergeCount)
MetaDataCreateIndexService.prepareResizeIndexSettings( .put("index.non_copyable.existing", "existing")
clusterState, .build(),
Collections.emptySet(), Settings.builder()
builder, .put("index.blocks.write", (String) null)
clusterState.metaData().index(indexName).getIndex(), .put("index.merge.scheduler.max_thread_count", maxThreadCount)
"target", .put("index.non_copyable.request", "request")
ResizeType.SHRINK); .build(),
final Settings settings = builder.build(); Arrays.asList(nonCopyableExistingIndexSetting, nonCopyableRequestIndexSetting),
assertThat("similarity settings must be copied", settings.get("index.similarity.default.type"), equalTo("BM25")); true,
assertThat( settings -> {
"analysis settings must be copied", settings.get("index.analysis.analyzer.default.tokenizer"), equalTo("keyword")); assertNull(settings.getAsBoolean("index.blocks.write", null));
assertThat(settings.get("index.routing.allocation.initial_recovery._id"), equalTo("node1")); assertThat(settings.get("index.routing.allocation.require._name"), equalTo("node1"));
assertThat(settings.get("index.allocation.max_retries"), equalTo("1")); assertThat(settings.getAsInt("index.merge.scheduler.max_merge_count", null), equalTo(maxMergeCount));
assertThat(settings.getAsVersion("index.version.created", null), equalTo(version)); assertThat(settings.getAsInt("index.merge.scheduler.max_thread_count", null), equalTo(maxThreadCount));
assertThat(settings.getAsVersion("index.version.upgraded", null), equalTo(upgraded)); assertNull(settings.get("index.non_copyable.existing"));
} assertThat(settings.get("index.non_copyable.request"), equalTo("request"));
});
}
public void testPrepareResizeIndexSettingsAnalysisSettings() {
// analysis settings from the request are not overwritten // analysis settings from the request are not overwritten
{ runPrepareResizeIndexSettingsTest(
final Settings.Builder builder = Settings.builder(); Settings.EMPTY,
builder.put("index.number_of_shards", 1); Settings.builder().put("index.analysis.analyzer.default.tokenizer", "whitespace").build(),
builder.put("index.analysis.analyzer.default.tokenizer", "whitespace"); Collections.emptyList(),
MetaDataCreateIndexService.prepareResizeIndexSettings( randomBoolean(),
clusterState, settings ->
Collections.emptySet(), assertThat(
builder, "analysis settings are not overwritten",
clusterState.metaData().index(indexName).getIndex(), settings.get("index.analysis.analyzer.default.tokenizer"),
"target", equalTo("whitespace"))
ResizeType.SHRINK); );
final Settings settings = builder.build();
assertThat(
"analysis settings are not overwritten",
settings.get("index.analysis.analyzer.default.tokenizer"),
equalTo("whitespace"));
}
}
public void testPrepareResizeIndexSettingsSimilaritySettings() {
// similarity settings from the request are not overwritten // similarity settings from the request are not overwritten
{ runPrepareResizeIndexSettingsTest(
final Settings.Builder builder = Settings.builder(); Settings.EMPTY,
builder.put("index.number_of_shards", 1); Settings.builder().put("index.similarity.sim.type", "DFR").build(),
builder.put("index.similarity.default.type", "DFR"); Collections.emptyList(),
MetaDataCreateIndexService.prepareResizeIndexSettings( randomBoolean(),
clusterState, settings ->
Collections.emptySet(), assertThat("similarity settings are not overwritten", settings.get("index.similarity.sim.type"), equalTo("DFR")));
builder,
clusterState.metaData().index(indexName).getIndex(), }
"target",
ResizeType.SHRINK); private void runPrepareResizeIndexSettingsTest(
final Settings settings = builder.build(); final Settings sourceSettings,
assertThat("similarity settings are not overwritten", settings.get("index.similarity.default.type"), equalTo("DFR")); final Settings requestSettings,
} final Collection<Setting<?>> additionalIndexScopedSettings,
final boolean copySettings,
final Consumer<Settings> consumer) {
final String indexName = randomAlphaOfLength(10);
final Settings indexSettings = Settings.builder()
.put("index.blocks.write", true)
.put("index.routing.allocation.require._name", "node1")
.put(sourceSettings)
.build();
final ClusterState initialClusterState =
ClusterState
.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, indexSettings))
.nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
final AllocationService service = new AllocationService(
Settings.builder().build(),
new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
new TestGatewayAllocator(),
new BalancedShardsAllocator(Settings.EMPTY),
EmptyClusterInfoService.INSTANCE);
final RoutingTable initialRoutingTable = service.reroute(initialClusterState, "reroute").routingTable();
final ClusterState routingTableClusterState = ClusterState.builder(initialClusterState).routingTable(initialRoutingTable).build();
// now we start the shard
final RoutingTable routingTable = service.applyStartedShards(
routingTableClusterState,
initialRoutingTable.index(indexName).shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
final ClusterState clusterState = ClusterState.builder(routingTableClusterState).routingTable(routingTable).build();
final Settings.Builder indexSettingsBuilder = Settings.builder().put("index.number_of_shards", 1).put(requestSettings);
final Set<Setting<?>> settingsSet =
Stream.concat(
IndexScopedSettings.BUILT_IN_INDEX_SETTINGS.stream(),
additionalIndexScopedSettings.stream())
.collect(Collectors.toSet());
MetaDataCreateIndexService.prepareResizeIndexSettings(
clusterState,
Collections.emptySet(),
indexSettingsBuilder,
clusterState.metaData().index(indexName).getIndex(),
"target",
ResizeType.SHRINK,
copySettings,
new IndexScopedSettings(Settings.EMPTY, settingsSet));
consumer.accept(indexSettingsBuilder.build());
} }
private DiscoveryNode newNode(String nodeId) { private DiscoveryNode newNode(String nodeId) {

View File

@ -722,12 +722,19 @@ public class SettingTests extends ESTestCase {
assertThat(ex.getMessage(), containsString("properties cannot be null for setting")); assertThat(ex.getMessage(), containsString("properties cannot be null for setting"));
} }
public void testRejectConflictProperties() { public void testRejectConflictingDynamicAndFinalProperties() {
IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, IllegalArgumentException ex = expectThrows(IllegalArgumentException.class,
() -> Setting.simpleString("foo.bar", Property.Final, Property.Dynamic)); () -> Setting.simpleString("foo.bar", Property.Final, Property.Dynamic));
assertThat(ex.getMessage(), containsString("final setting [foo.bar] cannot be dynamic")); assertThat(ex.getMessage(), containsString("final setting [foo.bar] cannot be dynamic"));
} }
public void testRejectNonIndexScopedNotCopyableOnResizeSetting() {
final IllegalArgumentException e = expectThrows(
IllegalArgumentException.class,
() -> Setting.simpleString("foo.bar", Property.NotCopyableOnResize));
assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [NotCopyableOnResize]")));
}
public void testTimeValue() { public void testTimeValue() {
final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test"); final TimeValue random = TimeValue.parseTimeValue(randomTimeValue(), "test");

View File

@ -51,6 +51,7 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
@ -117,7 +118,7 @@ public class FlushIT extends ESIntegTestCase {
ShardsSyncedFlushResult result; ShardsSyncedFlushResult result;
if (randomBoolean()) { if (randomBoolean()) {
logger.info("--> sync flushing shard 0"); logger.info("--> sync flushing shard 0");
result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId(index, 0)); result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0));
} else { } else {
logger.info("--> sync flushing index [test]"); logger.info("--> sync flushing index [test]");
SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get();
@ -246,11 +247,14 @@ public class FlushIT extends ESIntegTestCase {
} }
private String syncedFlushDescription(ShardsSyncedFlushResult result) { private String syncedFlushDescription(ShardsSyncedFlushResult result) {
return result.shardResponses().entrySet().stream() String detail = result.shardResponses().entrySet().stream()
.map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]")
.collect(Collectors.joining(",")); .collect(Collectors.joining(","));
return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]",
result.totalShards(), result.failed(), result.failureReason(), detail);
} }
@TestLogging("_root:DEBUG")
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
final int numberOfReplicas = internalCluster().numDataNodes() - 1; final int numberOfReplicas = internalCluster().numDataNodes() - 1;
@ -275,7 +279,7 @@ public class FlushIT extends ESIntegTestCase {
for (int i = 0; i < extraDocs; i++) { for (int i = 0; i < extraDocs; i++) {
indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i);
} }
final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); logger.info("Partial seal: {}", syncedFlushDescription(partialResult));
assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1));
assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas));
@ -287,7 +291,7 @@ public class FlushIT extends ESIntegTestCase {
indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i);
} }
} }
final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1));
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
} }
@ -308,11 +312,11 @@ public class FlushIT extends ESIntegTestCase {
for (int i = 0; i < numDocs; i++) { for (int i = 0; i < numDocs; i++) {
index("test", "doc", Integer.toString(i)); index("test", "doc", Integer.toString(i));
} }
final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
logger.info("First seal: {}", syncedFlushDescription(firstSeal)); logger.info("First seal: {}", syncedFlushDescription(firstSeal));
assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1));
// Do not renew synced-flush // Do not renew synced-flush
final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); logger.info("Second seal: {}", syncedFlushDescription(secondSeal));
assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId()));
@ -321,7 +325,7 @@ public class FlushIT extends ESIntegTestCase {
for (int i = 0; i < moreDocs; i++) { for (int i = 0; i < moreDocs; i++) {
index("test", "doc", Integer.toString(i)); index("test", "doc", Integer.toString(i));
} }
final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); logger.info("Third seal: {}", syncedFlushDescription(thirdSeal));
assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId())));
@ -337,7 +341,7 @@ public class FlushIT extends ESIntegTestCase {
shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true));
assertThat(shard.commitStats().syncId(), nullValue()); assertThat(shard.commitStats().syncId(), nullValue());
} }
final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); logger.info("Forth seal: {}", syncedFlushDescription(forthSeal));
assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1));
assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId())));

View File

@ -18,11 +18,11 @@
*/ */
package org.elasticsearch.indices.flush; package org.elasticsearch.indices.flush;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.InternalTestCluster;
@ -40,8 +40,10 @@ public class SyncedFlushUtil {
/** /**
* Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
*/ */
public static ShardsSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, ShardId shardId) { public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) {
SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]",
service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state());
LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>(); LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>();
service.attemptSyncedFlush(shardId, listener); service.attemptSyncedFlush(shardId, listener);
try { try {

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
import java.io.IOException;
import java.util.Collections;
import static org.mockito.Mockito.mock;
public class RestResizeHandlerTests extends ESTestCase {
public void testShrinkCopySettingsDeprecated() throws IOException {
final RestResizeHandler.RestShrinkIndexAction handler =
new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class));
final String copySettings = randomFrom("true", "false");
final FakeRestRequest request =
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
.withParams(Collections.singletonMap("copy_settings", copySettings))
.withPath("source/_shrink/target")
.build();
handler.prepareRequest(request, mock(NodeClient.class));
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
}
public void testSplitCopySettingsDeprecated() throws IOException {
final RestResizeHandler.RestSplitIndexAction handler =
new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class));
final String copySettings = randomFrom("true", "false");
final FakeRestRequest request =
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
.withParams(Collections.singletonMap("copy_settings", copySettings))
.withPath("source/_split/target")
.build();
handler.prepareRequest(request, mock(NodeClient.class));
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
}
}

View File

@ -45,7 +45,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri
entries.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); entries.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()));
} }
entries.sort(Comparator.comparing(RepositoryMetaData::name)); entries.sort(Comparator.comparing(RepositoryMetaData::name));
return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); return new RepositoriesMetaData(entries);
} }
@Override @Override
@ -62,7 +62,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri
} else { } else {
entries.remove(randomIntBetween(0, entries.size() - 1)); entries.remove(randomIntBetween(0, entries.size() - 1));
} }
return new RepositoriesMetaData(entries.toArray(new RepositoryMetaData[entries.size()])); return new RepositoriesMetaData(entries);
} }
public Settings randomSettings() { public Settings randomSettings() {
@ -94,7 +94,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri
repos.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings())); repos.add(new RepositoryMetaData(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings()));
} }
} }
return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); return new RepositoriesMetaData(repos);
} }
@Override @Override
@ -114,7 +114,7 @@ public class RepositoriesMetaDataSerializationTests extends AbstractDiffableSeri
assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken());
List<RepositoryMetaData> repos = repositoriesMetaData.repositories(); List<RepositoryMetaData> repos = repositoriesMetaData.repositories();
repos.sort(Comparator.comparing(RepositoryMetaData::name)); repos.sort(Comparator.comparing(RepositoryMetaData::name));
return new RepositoriesMetaData(repos.toArray(new RepositoryMetaData[repos.size()])); return new RepositoriesMetaData(repos);
} }
} }

View File

@ -29,6 +29,8 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.TermsQueryBuilder;
import org.elasticsearch.indices.TermsLookup;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
@ -330,4 +332,21 @@ public class SimpleValidateQueryIT extends ESIntegTestCase {
assertThat(response.isValid(), equalTo(true)); assertThat(response.isValid(), equalTo(true));
} }
} }
public void testExplainTermsQueryWithLookup() throws Exception {
client().admin().indices().prepareCreate("twitter")
.addMapping("_doc", "user", "type=integer", "followers", "type=integer")
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get();
client().prepareIndex("twitter", "_doc", "1")
.setSource("followers", new int[] {1, 2, 3}).get();
refresh();
TermsQueryBuilder termsLookupQuery = QueryBuilders.termsLookupQuery("user", new TermsLookup("twitter", "_doc", "1", "followers"));
ValidateQueryResponse response = client().admin().indices().prepareValidateQuery("twitter")
.setTypes("_doc")
.setQuery(termsLookupQuery)
.setExplain(true)
.execute().actionGet();
assertThat(response.isValid(), is(true));
}
} }

View File

@ -31,8 +31,5 @@ include::commands/index.asciidoc[]
:edit_url: :edit_url:
include::{es-repo-dir}/index-shared4.asciidoc[] include::{es-repo-dir}/index-shared4.asciidoc[]
:edit_url!:
include::release-notes/xpack-xes.asciidoc[]
:edit_url: :edit_url:
include::{es-repo-dir}/index-shared5.asciidoc[] include::{es-repo-dir}/index-shared5.asciidoc[]

View File

@ -1,26 +0,0 @@
[[xes-7.0.0-alpha1]]
== {es} {xpack} 7.0.0-alpha1 Release Notes
[float]
[[xes-breaking-7.0.0-alpha1]]
=== Breaking Changes
Machine Learning::
* The `max_running_jobs` node property is removed in this release. Use the
`xpack.ml.max_open_jobs` setting instead. For more information, see <<ml-settings>>.
Monitoring::
* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1`
to disable monitoring data collection. Use `xpack.monitoring.collection.enabled`
and set it to `false` (its default), which was added in 6.3.0.
Security::
* The fields returned as part of the mappings section by get index, get
mappings, get field mappings and field capabilities API are now only the
ones that the user is authorized to access in case field level security is enabled.
See also:
* <<release-notes-7.0.0-alpha1,{es} 7.0.0-alpha1 Release Notes>>
* {kibana-ref}/xkb-7.0.0-alpha1.html[{kib} {xpack} 7.0.0-alpha1 Release Notes]
* {logstash-ref}/xls-7.0.0-alpha1.html[Logstash {xpack} 7.0.0-alpha1 Release Notes]

View File

@ -12,8 +12,8 @@ your application from one version of {xpack} to another.
See also: See also:
* <<breaking-changes,{es} Breaking Changes>> * <<breaking-changes,{es} Breaking Changes>>
* {kibana-ref}/breaking-changes-xkb.html[{kib} {xpack} Breaking Changes] * {kibana-ref}/breaking-changes.html[{kib} Breaking Changes]
* {logstash-ref}/breaking-changes-xls.html[Logstash {xpack} Breaking Changes] * {logstash-ref}/breaking-changes.html[Logstash Breaking Changes]
-- --

View File

@ -1,20 +0,0 @@
[role="xpack"]
[[release-notes-xes]]
= {xpack} Release Notes
[partintro]
--
This section summarizes the changes in each release for all of the {xpack}
components in {es}.
* <<xes-7.0.0-alpha1>>
See also:
* <<es-release-notes,{es} Release Notes>>
* {kibana-ref}/release-notes-xkb.html[{kib} {xpack} Release Notes]
* {logstash-ref}/release-notes-xls.html[Logstash {xpack} Release Notes]
--
include::7.0.0-alpha1.asciidoc[]

View File

@ -6,14 +6,7 @@ users. To integrate with Active Directory, you configure an `active_directory`
realm and map Active Directory users and groups to {security} roles in the realm and map Active Directory users and groups to {security} roles in the
<<mapping-roles, role mapping file>>. <<mapping-roles, role mapping file>>.
To protect passwords, communications between Elasticsearch and the Active Directory See {ref}/configuring-ad-realm.html[Configuring an Active Directory Realm].
server should be encrypted using SSL/TLS. Clients and nodes that connect via
SSL/TLS to the Active Directory server need to have the Active Directory server's
certificate or the server's root CA certificate installed in their keystore or
truststore. For more information about installing certificates, see
<<active-directory-ssl>>.
==== Configuring an Active Directory Realm
{security} uses LDAP to communicate with Active Directory, so `active_directory` {security} uses LDAP to communicate with Active Directory, so `active_directory`
realms are similar to <<ldap-realm, `ldap` realms>>. Like LDAP directories, realms are similar to <<ldap-realm, `ldap` realms>>. Like LDAP directories,
@ -39,134 +32,8 @@ Active Directory. Once the user has been found, the Active Directory realm then
retrieves the user's group memberships from the `tokenGroups` attribute on the retrieves the user's group memberships from the `tokenGroups` attribute on the
user's entry in Active Directory. user's entry in Active Directory.
To configure an `active_directory` realm:
. Add a realm configuration of type `active_directory` to `elasticsearch.yml`
under the `xpack.security.authc.realms` namespace. At a minimum, you must set the realm
`type` to `active_directory` and specify the Active Directory `domain_name`. To
use SSL/TLS for secured communication with the Active Directory server, you must
also set the `url` attribute and specify the `ldaps` protocol and secure port
number. If you are configuring multiple realms, you should also explicitly set
the `order` attribute to control the order in which the realms are consulted
during authentication. See <<ad-settings, Active Directory Realm Settings>>
for all of the options you can set for an `active_directory` realm.
+
NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS.
If DNS is not being provided by a Windows DNS server, add a mapping for
the domain in the local `/etc/hosts` file.
+
For example, the following realm configuration configures {security} to connect
to `ldaps://example.com:636` to authenticate users through Active Directory.
+
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0 <1>
domain_name: ad.example.com
url: ldaps://ad.example.com:636 <2>
------------------------------------------------------------
<1> The realm order controls the order in which the configured realms are checked
when authenticating a user.
<2> If you don't specify the URL, it defaults to `ldap:<domain_name>:389`.
+
IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
realms you specify are used for authentication. If you also want to use the
`native` or `file` realms, you must include them in the realm chain.
. Restart Elasticsearch.
===== Configuring a Bind User
By default, all of the LDAP operations are run by the user that {security} is
authenticating. In some cases, regular users may not be able to access all of the
necessary items within Active Directory and a _bind user_ is needed. A bind user
can be configured and will be used to perform all operations other than the LDAP
bind request, which is required to authenticate the credentials provided by the user.
The use of a bind user enables the <<run-as-privilege,run as feature>> to be
used with the Active Directory realm and the ability to maintain a set of pooled
connections to Active Directory. These pooled connection reduce the number of
resources that must be created and destroyed with every user authentication.
The following example shows the configuration of a bind user through the user of the
`bind_dn` and `secure_bind_password` settings.
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: ad.example.com
url: ldaps://ad.example.com:636
bind_dn: es_svc_user@ad.example.com <1>
------------------------------------------------------------
<1> This is the user that all Active Directory search requests are executed as.
Without a bind user configured, all requests run as the user that is authenticating
with Elasticsearch.
The password for the `bind_dn` user should be configured by adding the appropriate
`secure_bind_password` setting to the {es} keystore.
For example, the following command adds the password for the example realm above:
[source, shell]
------------------------------------------------------------
bin/elasticsearch-keystore add xpack.security.authc.realms.active_directory.secure_bind_password
------------------------------------------------------------
When a bind user is configured, connection pooling is enabled by default.
Connection pooling can be disabled using the `user_search.pool.enabled` setting.
===== Multiple Domain Support
When authenticating users across multiple domains in a forest, there are a few minor
differences in the configuration and the way that users will authenticate. The `domain_name`
setting should be set to the forest root domain name. The `url` setting also needs to
be set as you will need to authenticate against the Global Catalog, which uses a different
port and may not be running on every Domain Controller.
For example, the following realm configuration configures {security} to connect to specific
Domain Controllers on the Global Catalog port with the domain name set to the forest root.
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: example.com <1>
url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2>
load_balance:
type: "round_robin" <3>
------------------------------------------------------------
<1> The `domain_name` is set to the name of the root domain in the forest.
<2> The `url` value used in this example has URLs for two different Domain Controllers,
which are also Global Catalog servers. Port 3268 is the default port for unencrypted
communication with the Global Catalog; port 3269 is the default port for SSL connections.
The servers that are being connected to can be in any domain of the forest as long as
they are also Global Catalog servers.
<3> A load balancing setting is provided to indicate the desired behavior when choosing
the server to connect to.
In this configuration, users will need to use either their full User Principal
Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of
the username with `@<DOMAIN_NAME` such as `johndoe@ad.example.com`. The Down-Level
Logon Name is the NetBIOS domain name, followed by a `\` and the username, such as
`AD\johndoe`. Use of Down-Level Logon Name requires a connection to the regular LDAP
ports (389 or 636) in order to query the configuration container to retrieve the
domain name from the NetBIOS name.
[[ad-load-balancing]] [[ad-load-balancing]]
===== Load Balancing and Failover ==== Load Balancing and Failover
The `load_balance.type` setting can be used at the realm level to configure how The `load_balance.type` setting can be used at the realm level to configure how
{security} should interact with multiple Active Directory servers. Two modes of {security} should interact with multiple Active Directory servers. Two modes of
operation are supported: failover and load balancing. operation are supported: failover and load balancing.
@ -174,93 +41,20 @@ operation are supported: failover and load balancing.
See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings]. See {ref}/security-settings.html#load-balancing[Load Balancing and Failover Settings].
[[ad-settings]] [[ad-settings]]
===== Active Directory Realm Settings ==== Active Directory Realm Settings
See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings]. See {ref}/security-settings.html#ref-ad-settings[Active Directory Realm Settings].
[[mapping-roles-ad]] [[mapping-roles-ad]]
==== Mapping Active Directory Users and Groups to Roles ==== Mapping Active Directory Users and Groups to Roles
An integral part of a realm authentication process is to resolve the roles See {ref}/configuring-ad-realm.html[Configuring an Active Directory realm].
associated with the authenticated user. Roles define the privileges a user has
in the cluster.
Since with the `active_directory` realm the users are managed externally in the
Active Directory server, the expectation is that their roles are managed there
as well. In fact, Active Directory supports the notion of groups, which often
represent user roles for different systems in the organization.
The `active_directory` realm enables you to map Active Directory users to roles
via their Active Directory groups, or other metadata. This role mapping can be
configured via the {ref}/security-api-role-mapping.html[role-mapping API], or by using
a file stored on each node. When a user authenticates against an Active
Directory realm, the privileges for that user are the union of all privileges
defined by the roles to which the user is mapped.
Within a mapping definition, you specify groups using their distinguished
names. For example, the following mapping configuration maps the Active
Directory `admins` group to both the `monitoring` and `user` roles, maps the
`users` group to the `user` role and maps the `John Doe` user to the `user`
role.
Configured via the role-mapping API:
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/admins
{
"roles" : [ "monitoring" , "user" ],
"rules" : { "field" : {
"groups" : "cn=admins,dc=example,dc=com" <1>
} },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The Active Directory distinguished name (DN) of the `admins` group.
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/basic_users
{
"roles" : [ "user" ],
"rules" : { "any": [
{ "field" : {
"groups" : "cn=users,dc=example,dc=com" <1>
} },
{ "field" : {
"dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2>
} }
] },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The Active Directory distinguished name (DN) of the `users` group.
<2> The Active Directory distinguished name (DN) of the user `John Doe`.
Or, alternatively, configured via the role-mapping file:
[source, yaml]
------------------------------------------------------------
monitoring: <1>
- "cn=admins,dc=example,dc=com" <2>
user:
- "cn=users,dc=example,dc=com" <3>
- "cn=admins,dc=example,dc=com"
- "cn=John Doe,cn=contractors,dc=example,dc=com" <4>
------------------------------------------------------------
<1> The name of the role.
<2> The Active Directory distinguished name (DN) of the `admins` group.
<3> The Active Directory distinguished name (DN) of the `users` group.
<4> The Active Directory distinguished name (DN) of the user `John Doe`.
For more information, see <<mapping-roles, Mapping Users and Groups to Roles>>.
[[ad-user-metadata]] [[ad-user-metadata]]
==== User Metadata in Active Directory Realms ==== User Metadata in Active Directory Realms
When a user is authenticated via an Active Directory realm, the following When a user is authenticated via an Active Directory realm, the following
properties are populated in the user's _metadata_. This metadata is returned in the properties are populated in the user's _metadata_:
{ref}/security-api-authenticate.html[authenticate API], and can be used with
<<templating-role-query, templated queries>> in roles.
|======================= |=======================
| Field | Description | Field | Description
@ -270,51 +64,15 @@ properties are populated in the user's _metadata_. This metadata is returned in
groups were mapped to a role). groups were mapped to a role).
|======================= |=======================
This metadata is returned in the
{ref}/security-api-authenticate.html[authenticate API] and can be used with
<<templating-role-query, templated queries>> in roles.
Additional metadata can be extracted from the Active Directory server by configuring Additional metadata can be extracted from the Active Directory server by configuring
the `metadata` setting on the Active Directory realm. the `metadata` setting on the Active Directory realm.
[[active-directory-ssl]] [[active-directory-ssl]]
==== Setting up SSL Between Elasticsearch and Active Directory ==== Setting up SSL Between Elasticsearch and Active Directory
To protect the user credentials that are sent for authentication, it's highly See
recommended to encrypt communications between Elasticsearch and your Active {ref}/configuring-tls.html#tls-active-directory[Encrypting communications between {es} and Active Directory].
Directory server. Connecting via SSL/TLS ensures that the identity of the Active
Directory server is authenticated before {security} transmits the user
credentials, and the usernames and passwords are encrypted in transit.
To encrypt communications between Elasticsearch and Active Directory:
. Configure each node to trust certificates signed by the CA that signed your
Active Directory server certificates. The following example demonstrates how to trust a CA certificate,
`cacert.pem`, located within the {xpack} configuration directory:
+
[source,shell]
--------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: ad.example.com
url: ldaps://ad.example.com:636
ssl:
certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ]
--------------------------------------------------
+
The CA cert must be a PEM encoded certificate.
. Set the `url` attribute in the realm configuration to specify the LDAPS protocol
and the secure port number. For example, `url: ldaps://ad.example.com:636`.
. Restart Elasticsearch.
NOTE: By default, when you configure {security} to connect to Active Directory
using SSL/TLS, {security} attempts to verify the hostname or IP address
specified with the `url` attribute in the realm configuration with the
values in the certificate. If the values in the certificate and realm
configuration do not match, {security} does not allow a connection to the
Active Directory server. This is done to protect against man-in-the-middle
attacks. If necessary, you can disable this behavior by setting the
{ref}/security-settings.html#ssl-tls-settings[`ssl.verification_mode`] property to `certificate`.

View File

@ -0,0 +1,248 @@
[role="xpack"]
[[configuring-ad-realm]]
=== Configuring an Active Directory realm
You can configure {security} to communicate with Active Directory to authenticate
users. To integrate with Active Directory, you configure an `active_directory`
realm and map Active Directory users and groups to {security} roles in the role
mapping file.
For more information about Active Directory realms, see
{xpack-ref}/active-directory-realm.html[Active Directory User Authentication].
. Add a realm configuration of type `active_directory` to `elasticsearch.yml`
under the `xpack.security.authc.realms` namespace. At a minimum, you must set
the realm `type` to `active_directory` and specify the Active Directory
`domain_name`. If you are configuring multiple realms, you should also
explicitly set the `order` attribute to control the order in which the realms
are consulted during authentication.
+
--
See <<ref-ad-settings>> for all of the options you can set for an
`active_directory` realm.
NOTE: Binding to Active Directory fails if the domain name is not mapped in DNS.
If DNS is not being provided by a Windows DNS server, add a mapping for
the domain in the local `/etc/hosts` file.
For example, the following realm configuration configures {security} to connect
to `ldaps://example.com:636` to authenticate users through Active Directory:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0 <1>
domain_name: ad.example.com
url: ldaps://ad.example.com:636 <2>
------------------------------------------------------------
<1> The realm order controls the order in which the configured realms are checked
when authenticating a user.
<2> If you don't specify the URL, it defaults to `ldap:<domain_name>:389`.
IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
realms you specify are used for authentication. If you also want to use the
`native` or `file` realms, you must include them in the realm chain.
--
. If you are authenticating users across multiple domains in a forest, extra
steps are required. There are a few minor differences in the configuration and
the way that users authenticate.
+
--
Set the `domain_name` setting to the forest root domain name.
You must also set the `url` setting, since you must authenticate against the
Global Catalog, which uses a different port and might not be running on every
Domain Controller.
For example, the following realm configuration configures {security} to connect
to specific Domain Controllers on the Global Catalog port with the domain name
set to the forest root:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: example.com <1>
url: ldaps://dc1.ad.example.com:3269, ldaps://dc2.ad.example.com:3269 <2>
load_balance:
type: "round_robin" <3>
------------------------------------------------------------
<1> The `domain_name` is set to the name of the root domain in the forest.
<2> The `url` value used in this example has URLs for two different Domain Controllers,
which are also Global Catalog servers. Port 3268 is the default port for unencrypted
communication with the Global Catalog; port 3269 is the default port for SSL connections.
The servers that are being connected to can be in any domain of the forest as long as
they are also Global Catalog servers.
<3> A load balancing setting is provided to indicate the desired behavior when choosing
the server to connect to.
In this configuration, users will need to use either their full User Principal
Name (UPN) or their Down-Level Logon Name. A UPN is typically a concatenation of
the username with `@<DOMAIN_NAME` such as `johndoe@ad.example.com`. The Down-Level
Logon Name is the NetBIOS domain name, followed by a `\` and the username, such as
`AD\johndoe`. Use of Down-Level Logon Name requires a connection to the regular LDAP
ports (389 or 636) in order to query the configuration container to retrieve the
domain name from the NetBIOS name.
--
. (Optional) Configure how {security} should interact with multiple Active
Directory servers.
+
--
The `load_balance.type` setting can be used at the realm level. Two modes of
operation are supported: failover and load balancing. See <<ref-ad-settings>>.
--
. (Optional) To protect passwords,
<<tls-active-directory,encrypt communications between {es} and the Active Directory server>>.
. Restart {es}.
. Configure a bind user.
+
--
The Active Directory realm authenticates users using an LDAP bind request. By
default, all of the LDAP operations are run by the user that {security} is
authenticating. In some cases, regular users may not be able to access all of the
necessary items within Active Directory and a _bind user_ is needed. A bind user
can be configured and is used to perform all operations other than the LDAP bind
request, which is required to authenticate the credentials provided by the user.
The use of a bind user enables the
{xpack-ref}/run-as-privilege.html[run as feature] to be used with the Active
Directory realm and the ability to maintain a set of pooled connections to
Active Directory. These pooled connection reduce the number of resources that
must be created and destroyed with every user authentication.
The following example shows the configuration of a bind user through the user of
the `bind_dn` and `secure_bind_password` settings:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: ad.example.com
url: ldaps://ad.example.com:636
bind_dn: es_svc_user@ad.example.com <1>
------------------------------------------------------------
<1> This is the user that all Active Directory search requests are executed as.
Without a bind user configured, all requests run as the user that is authenticating
with {es}.
The password for the `bind_dn` user should be configured by adding the
appropriate `secure_bind_password` setting to the {es} keystore. For example,
the following command adds the password for the example realm above:
[source, shell]
------------------------------------------------------------
bin/elasticsearch-keystore add \
xpack.security.authc.realms.active_directory.secure_bind_password
------------------------------------------------------------
When a bind user is configured, connection pooling is enabled by default.
Connection pooling can be disabled using the `user_search.pool.enabled` setting.
--
. Map Active Directory users and groups to roles.
+
--
An integral part of a realm authentication process is to resolve the roles
associated with the authenticated user. Roles define the privileges a user has
in the cluster.
Since with the `active_directory` realm the users are managed externally in the
Active Directory server, the expectation is that their roles are managed there
as well. In fact, Active Directory supports the notion of groups, which often
represent user roles for different systems in the organization.
The `active_directory` realm enables you to map Active Directory users to roles
via their Active Directory groups or other metadata. This role mapping can be
configured via the <<security-api-role-mapping,role-mapping API>> or by using
a file stored on each node. When a user authenticates against an Active
Directory realm, the privileges for that user are the union of all privileges
defined by the roles to which the user is mapped.
Within a mapping definition, you specify groups using their distinguished
names. For example, the following mapping configuration maps the Active
Directory `admins` group to both the `monitoring` and `user` roles, maps the
`users` group to the `user` role and maps the `John Doe` user to the `user`
role.
Configured via the role-mapping API:
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/admins
{
"roles" : [ "monitoring" , "user" ],
"rules" : { "field" : {
"groups" : "cn=admins,dc=example,dc=com" <1>
} },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The Active Directory distinguished name (DN) of the `admins` group.
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/basic_users
{
"roles" : [ "user" ],
"rules" : { "any": [
{ "field" : {
"groups" : "cn=users,dc=example,dc=com" <1>
} },
{ "field" : {
"dn" : "cn=John Doe,cn=contractors,dc=example,dc=com" <2>
} }
] },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The Active Directory distinguished name (DN) of the `users` group.
<2> The Active Directory distinguished name (DN) of the user `John Doe`.
Or, alternatively, configured via the role-mapping file:
[source, yaml]
------------------------------------------------------------
monitoring: <1>
- "cn=admins,dc=example,dc=com" <2>
user:
- "cn=users,dc=example,dc=com" <3>
- "cn=admins,dc=example,dc=com"
- "cn=John Doe,cn=contractors,dc=example,dc=com" <4>
------------------------------------------------------------
<1> The name of the role.
<2> The Active Directory distinguished name (DN) of the `admins` group.
<3> The Active Directory distinguished name (DN) of the `users` group.
<4> The Active Directory distinguished name (DN) of the user `John Doe`.
For more information, see
{xpack-ref}/mapping-roles.html[Mapping users and groups to roles].
--
. (Optional) Configure the `metadata` setting in the Active Directory realm to
include extra properties in the user's metadata.
+
--
By default, `ldap_dn` and `ldap_groups` are populated in the user's metadata.
For more information, see
{xpack-ref}/active-directory-realm.html#ad-user-metadata[User Metadata in Active Directory Realms].
--

View File

@ -0,0 +1,106 @@
[role="xpack"]
[[configuring-file-realm]]
=== Configuring a file realm
You can manage and authenticate users with the built-in `file` internal realm.
All the data about the users for the `file` realm is stored in two files on each
node in the cluster: `users` and `users_roles`. Both files are located in
`CONFIG_DIR/` and are read on startup.
[IMPORTANT]
==============================
The `users` and `users_roles` files are managed locally by the node and are
**not** managed globally by the cluster. This means that with a typical
multi-node cluster, the exact same changes need to be applied on each and every
node in the cluster.
A safer approach would be to apply the change on one of the nodes and have the
files distributed or copied to all other nodes in the cluster (either manually
or using a configuration management system such as Puppet or Chef).
==============================
The `file` realm is added to the realm chain by default. You don't need to
explicitly configure a `file` realm.
For more information about file realms, see
{xpack-ref}/file-realm.html[File-based user authentication].
. (Optional) Add a realm configuration of type `file` to `elasticsearch.yml`
under the `xpack.security.authc.realms` namespace. At a minimum, you must set
the realm `type` to `file`. If you are configuring multiple realms, you should
also explicitly set the `order` attribute.
+
--
//See <<ref-users-settings>> for all of the options you can set for a `file` realm.
For example, the following snippet shows a `file` realm configuration that sets
the `order` to zero so the realm is checked first:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
file1:
type: file
order: 0
------------------------------------------------------------
--
. Restart {es}.
. Add user information to the `CONFIG_DIR/users` file on each node in the
cluster.
+
--
The `users` file stores all the users and their passwords. Each line in the file
represents a single user entry consisting of the username and **hashed** password.
[source,bash]
----------------------------------------------------------------------
rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W
alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS
jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni
----------------------------------------------------------------------
{security} uses `bcrypt` to hash the user passwords.
While it is possible to modify this files directly using any standard text
editor, we strongly recommend using the <<users-command>> tool to apply the
required changes.
IMPORTANT: As the administrator of the cluster, it is your responsibility to
ensure the same users are defined on every node in the cluster.
{security} does not deliver any mechanism to guarantee this.
--
. Add role information to the `CONFIG_DIR/users_roles` file on each node
in the cluster.
+
--
The `users_roles` file stores the roles associated with the users. For example:
[source,shell]
--------------------------------------------------
admin:rdeniro
power_user:alpacino,jacknich
user:jacknich
--------------------------------------------------
Each row maps a role to a comma-separated list of all the users that are
associated with that role.
You can use the <<users-command>> tool to update this file. You must ensure that
the same changes are made on every node in the cluster.
--
. (Optional) Change how often the `users` and `users_roles` files are checked.
+
--
By default, {security} checks these files for changes every 5 seconds. You can
change this default behavior by changing the `resource.reload.interval.high`
setting in the `elasticsearch.yml` file (as this is a common setting in {es},
changing its value may effect other schedules in the system).
--

View File

@ -0,0 +1,176 @@
[role="xpack"]
[[configuring-pki-realm]]
=== Configuring a PKI realm
You can configure {security} to use Public Key Infrastructure (PKI) certificates
to authenticate users in {es}. This requires clients to present X.509
certificates.
NOTE: You cannot use PKI certificates to authenticate users in {kib}.
To use PKI in {es}, you configure a PKI realm, enable client authentication on
the desired network layers (transport or http), and map the Distinguished Names
(DNs) from the user certificates to {security} roles in the role mapping file.
You can also use a combination of PKI and username/password authentication. For
example, you can enable SSL/TLS on the transport layer and define a PKI realm to
require transport clients to authenticate with X.509 certificates, while still
authenticating HTTP traffic using username and password credentials. You can
also set `xpack.security.transport.ssl.client_authentication` to `optional` to
allow clients without certificates to authenticate with other credentials.
IMPORTANT: You must enable SSL/TLS and enable client authentication to use PKI.
For more information, see {xpack-ref}/pki-realm.html[PKI User Authentication].
. Add a realm configuration of type `pki` to `elasticsearch.yml` under the
`xpack.security.authc.realms` namespace. At a minimum, you must set the realm
`type` to `pki`. If you are configuring multiple realms, you should also
explicitly set the `order` attribute. See <<ref-pki-settings>> for all of the
options you can set for a `pki` realm.
+
--
For example, the following snippet shows the most basic `pki` realm configuration:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
------------------------------------------------------------
With this configuration, any certificate trusted by the SSL/TLS layer is accepted
for authentication. The username is the common name (CN) extracted from the DN
of the certificate.
IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
realms you specify are used for authentication. If you also want to use the
`native` or `file` realms, you must include them in the realm chain.
If you want to use something other than the CN of the DN as the username, you
can specify a regex to extract the desired username. For example, the regex in
the following configuration extracts the email address from the DN:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
username_pattern: "EMAILADDRESS=(.*?)(?:,|$)"
------------------------------------------------------------
--
. Restart {es}.
. <<configuring-tls,Enable SSL/TLS>>.
. Enable client authentication on the desired network layers (transport or http).
+
--
//TBD: This step might need to be split into a separate topic with additional details
//about setting up client authentication.
The PKI realm relies on the TLS settings of the node's network interface. The
realm can be configured to be more restrictive than the underlying network
connection - that is, it is possible to configure the node such that some
connections are accepted by the network interface but then fail to be
authenticated by the PKI realm. However, the reverse is not possible. The PKI
realm cannot authenticate a connection that has been refused by the network
interface.
In particular this means:
* The transport or http interface must request client certificates by setting
`client_authentication` to `optional` or `required`.
* The interface must _trust_ the certificate that is presented by the client
by configuring either the `truststore` or `certificate_authorities` paths,
or by setting `verification_mode` to `none`. See
<<ssl-tls-settings,`xpack.ssl.verification_mode`>> for an explanation of this
setting.
* The _protocols_ supported by the interface must be compatible with those
used by the client.
The relevant network interface (transport or http) must be configured to trust
any certificate that is to be used within the PKI realm. However, it possible to
configure the PKI realm to trust only a _subset_ of the certificates accepted
by the network interface. This is useful when the SSL/TLS layer trusts clients
with certificates that are signed by a different CA than the one that signs your
users' certificates.
To configure the PKI realm with its own truststore, specify the `truststore.path`
option. For example:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
truststore:
path: "/path/to/pki_truststore.jks"
password: "x-pack-test-password"
------------------------------------------------------------
The `certificate_authorities` option can be used as an alternative to the
`truststore.path` setting.
--
. Map roles for PKI users.
+
--
You map roles for PKI users through the
<<security-api-role-mapping,role-mapping API>> or by using a file stored on
each node. When a user authenticates against a PKI realm, the privileges for
that user are the union of all privileges defined by the roles to which the
user is mapped.
You identify a user by the distinguished name in their certificate.
For example, the following mapping configuration maps `John Doe` to the
`user` role:
Using the role-mapping API:
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/users
{
"roles" : [ "user" ],
"rules" : { "field" : {
"dn" : "cn=John Doe,ou=example,o=com" <1>
} },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The distinguished name (DN) of a PKI user.
Or, alternatively, configured in a role-mapping file:
[source, yaml]
------------------------------------------------------------
user: <1>
- "cn=John Doe,ou=example,o=com" <2>
------------------------------------------------------------
<1> The name of a role.
<2> The distinguished name (DN) of a PKI user.
The disinguished name for a PKI user follows X.500 naming conventions which
place the most specific fields (like `cn` or `uid`) at the beginning of the
name, and the most general fields (like `o` or `dc`) at the end of the name.
Some tools, such as _openssl_, may print out the subject name in a different
format.
One way that you can determine the correct DN for a certificate is to use the
<<security-api-authenticate,authenticate API>> (use the relevant PKI
certificate as the means of authentication) and inspect the metadata field in
the result. The user's distinguished name will be populated under the `pki_dn`
key. You can also use the authenticate API to validate your role mapping.
For more information, see
{xpack-ref}/mapping-roles.html[Mapping Users and Groups to Roles].
--

View File

@ -1,8 +1,8 @@
[[file-realm]] [[file-realm]]
=== File-based User Authentication === File-based User Authentication
You can manage and authenticate users with the built-in `file` internal realm. You can manage and authenticate users with the built-in `file` realm.
With the `file` realm users are defined in local files on each node in the cluster. With the `file` realm, users are defined in local files on each node in the cluster.
IMPORTANT: As the administrator of the cluster, it is your responsibility to IMPORTANT: As the administrator of the cluster, it is your responsibility to
ensure the same users are defined on every node in the cluster. ensure the same users are defined on every node in the cluster.
@ -20,127 +20,7 @@ realms you specify are used for authentication. To use the
To define users, {security} provides the {ref}/users-command.html[users] To define users, {security} provides the {ref}/users-command.html[users]
command-line tool. This tool enables you to add and remove users, assign user command-line tool. This tool enables you to add and remove users, assign user
roles and manage user passwords. roles, and manage user passwords.
==== Configuring a File Realm For more information, see
{ref}/configuring-file-realm.html[Configuring a file realm].
The `file` realm is added to the realm chain by default. You don't need to
explicitly configure a `file` realm to manage users with the `users` tool.
Like other realms, you can configure options for a `file` realm in the
`xpack.security.authc.realms` namespace in `elasticsearch.yml`.
To configure an `file` realm:
. Add a realm configuration of type `file` to `elasticsearch.yml` under the
`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
`file`. If you are configuring multiple realms, you should also explicitly set
the `order` attribute. See <<file-realm-settings>> for all of the options you can set
for a `file` realm.
+
For example, the following snippet shows a `file` realm configuration that sets
the `order` to zero so the realm is checked first:
+
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
file1:
type: file
order: 0
------------------------------------------------------------
. Restart Elasticsearch.
[[file-realm-settings]]
===== File Realm Settings
[cols="4,^3,10"]
|=======================
| Setting | Required | Description
| `type` | yes | Indicates the realm type. Must be set to `file`.
| `order` | no | Indicates the priority of this realm within the
realm chain. Realms with a lower order are
consulted first. Although not required, we
recommend explicitly setting this value when you
configure multiple realms. Defaults to
`Integer.MAX_VALUE`.
| `enabled` | no | Indicates whether this realm is enabled or
disabled. Enables you to disable a realm without
removing its configuration. Defaults to `true`.
| `cache.ttl` | no | Specifies the time-to-live for cached user entries.
A user's credentials are cached for this period of
time. Specify the time period using the standard
Elasticsearch {ref}/common-options.html#time-units[time units].
Defaults to `20m`.
| `cache.max_users` | no | Specifies the maximum number of user entries that
can be stored in the cache at one time. Defaults
to 100,000.
| `cache.hash_algo` | no | Specifies the hashing algorithm that is used for
the cached user credentials. See <<cache-hash-algo,
Cache hash algorithms>> for the possible values.
(Expert Setting).
|=======================
==== A Look Under the Hood
All the data about the users for the `file` realm is stored in two files, `users`
and `users_roles`. Both files are located in `CONFIG_DIR/x-pack/` and are read
on startup.
By default, {security} checks these files for changes every 5 seconds. You can
change this default behavior by changing the `resource.reload.interval.high` setting in
the `elasticsearch.yml` file (as this is a common setting in Elasticsearch,
changing its value may effect other schedules in the system).
[IMPORTANT]
==============================
These files are managed locally by the node and are **not** managed
globally by the cluster. This means that with a typical multi-node cluster,
the exact same changes need to be applied on each and every node in the
cluster.
A safer approach would be to apply the change on one of the nodes and have the
`users` and `users_roles` files distributed/copied to all other nodes in the
cluster (either manually or using a configuration management system such as
Puppet or Chef).
==============================
While it is possible to modify these files directly using any standard text
editor, we strongly recommend using the {ref}/users-command.html[`bin/elasticsearch-users`]
command-line tool to apply the required changes.
[float]
[[users-file]]
===== The `users` File
The `users` file stores all the users and their passwords. Each line in the
`users` file represents a single user entry consisting of the username and
**hashed** password.
[source,bash]
----------------------------------------------------------------------
rdeniro:$2a$10$BBJ/ILiyJ1eBTYoRKxkqbuDEdYECplvxnqQ47uiowE7yGqvCEgj9W
alpacino:$2a$10$cNwHnElYiMYZ/T3K4PvzGeJ1KbpXZp2PfoQD.gfaVdImnHOwIuBKS
jacknich:$2a$10$GYUNWyABV/Ols/.bcwxuBuuaQzV6WIauW6RdboojxcixBq3LtI3ni
----------------------------------------------------------------------
NOTE: {security} uses `bcrypt` to hash the user passwords.
[float]
[[users_defining-roles]]
==== The `users_roles` File
The `users_roles` file stores the roles associated with the users, as in the
following example:
[source,shell]
--------------------------------------------------
admin:rdeniro
power_user:alpacino,jacknich
user:jacknich
--------------------------------------------------
Each row maps a role to a comma-separated list of all the users that are
associated with that role.

View File

@ -1,5 +1,5 @@
[[pki-realm]] [[pki-realm]]
=== PKI User Authentication === PKI user authentication
You can configure {security} to use Public Key Infrastructure (PKI) certificates You can configure {security} to use Public Key Infrastructure (PKI) certificates
to authenticate users in {es}. This requires clients to present X.509 to authenticate users in {es}. This requires clients to present X.509
@ -12,171 +12,9 @@ the desired network layers (transport or http), and map the Distinguished Names
(DNs) from the user certificates to {security} roles in the (DNs) from the user certificates to {security} roles in the
<<mapping-roles, role mapping file>>. <<mapping-roles, role mapping file>>.
You can also use a combination of PKI and username/password authentication. For See {ref}/configuring-pki-realm.html[Configuring a PKI realm].
example, you can enable SSL/TLS on the transport layer and define a PKI realm to
require transport clients to authenticate with X.509 certificates, while still
authenticating HTTP traffic using username and password credentials. You can also set
`xpack.security.transport.ssl.client_authentication` to `optional` to allow clients without
certificates to authenticate with other credentials.
IMPORTANT: You must enable SSL/TLS and enabled client authentication to use PKI.
For more information, see <<ssl-tls, Setting Up SSL/TLS on a Cluster>>.
==== PKI Realm Configuration
Like other realms, you configure options for a `pki` realm under the
`xpack.security.authc.realms` namespace in `elasticsearch.yml`.
To configure a `pki` realm:
. Add a realm configuration of type `pki` to `elasticsearch.yml` under the
`xpack.security.authc.realms` namespace. At a minimum, you must set the realm `type` to
`pki`. If you are configuring multiple realms, you should also explicitly set
the `order` attribute. See <<pki-settings>> for all of the options you can set
for a `pki` realm.
+
For example, the following snippet shows the most basic `pki` realm configuration:
+
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
------------------------------------------------------------
+
With this configuration, any certificate trusted by the SSL/TLS layer is accepted
for authentication. The username is the common name (CN) extracted from the DN
of the certificate.
+
IMPORTANT: When you configure realms in `elasticsearch.yml`, only the
realms you specify are used for authentication. If you also want to use the
`native` or `file` realms, you must include them in the realm chain.
+
If you want to use something other than the CN of the DN as the username, you
can specify a regex to extract the desired username. For example, the regex in
the following configuration extracts the email address from the DN:
+
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
username_pattern: "EMAILADDRESS=(.*?)(?:,|$)"
------------------------------------------------------------
+
. Restart Elasticsearch.
[[pki-ssl-config]]
==== PKI and SSL Settings
The PKI realm relies on the SSL settings of the node's network interface
(transport or http). The realm can be configured to be more restrictive than
the underlying network connection - that is, it is possible to configure the
node such that some connections are accepted by the network interface but then
fail to be authenticated by the PKI realm. However the reverse is not possible
- the PKI realm cannot authenticate a connection that has been refused by the
network interface.
In particular this means:
* The transport or http interface must request client certificates by setting
`client_authentication` to `optional` or `required`.
* The interface must _trust_ the certificate that is presented by the client
by configuring either the `truststore` or `certificate_authorities` paths,
or by setting `verification_mode` to `none`.
+
See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
for an explanation of this setting.
* The _protocols_ supported by the interface must be compatible with those
used by the client.
The relevant network interface (transport or http) must be configured to trust
any certificate that is to be used within the PKI realm. However it possible to
configure the PKI realm to trust only a _subset_ of the certificates accepted
by the network interface.
This is useful when the SSL/TLS layer trusts clients with certificates that are
signed by a different CA than the one that signs your users' certificates.
To configure the PKI realm with its own truststore, specify the
`truststore.path` option as below:
[source, yaml]
------------------------------------------------------------
xpack:
security:
authc:
realms:
pki1:
type: pki
truststore:
path: "/path/to/pki_truststore.jks"
password: "x-pack-test-password"
------------------------------------------------------------
The `certificate_authorities` option may be used as an alternative to the
`truststore.path` setting.
[[pki-settings]] [[pki-settings]]
===== PKI Realm Settings ==== PKI Realm Settings
See {ref}/security-settings.html#ref-pki-settings[PKI Realm Settings]. See {ref}/security-settings.html#ref-pki-settings[PKI realm settings].
[[assigning-roles-pki]]
==== Mapping Roles for PKI Users
You map roles for PKI users through the
{ref}/security-api-role-mapping.html[role-mapping API], or by using a file stored on
each node. When a user authenticates against a PKI realm, the privileges for
that user are the union of all privileges defined by the roles to which the
user is mapped.
You identify a user by the distinguished name in their certificate.
For example, the following mapping configuration maps `John Doe` to the
`user` role:
Using the role-mapping API:
[source,js]
--------------------------------------------------
PUT _xpack/security/role_mapping/users
{
"roles" : [ "user" ],
"rules" : { "field" : {
"dn" : "cn=John Doe,ou=example,o=com" <1>
} },
"enabled": true
}
--------------------------------------------------
// CONSOLE
<1> The distinguished name (DN) of a PKI user.
Or, alternatively, configured in a role-mapping file:
[source, yaml]
------------------------------------------------------------
user: <1>
- "cn=John Doe,ou=example,o=com" <2>
------------------------------------------------------------
<1> The name of a role.
<2> The distinguished name (DN) of a PKI user.
The disinguished name for a PKI user follows X.500 naming conventions which
place the most specific fields (like `cn` or `uid`) at the beginning of the
name, and the most general fields (like `o` or `dc`) at the end of the name.
Some tools, such as _openssl_, may print out the subject name in a different
format.
One way that you can determine the correct DN for a certificate is to use the
{ref}/security-api-authenticate.html[authenticate API] (use the relevant PKI
certificate as the means of authentication) and inspect the metadata field in
the result. The user's distinguished name will be populated under the `pki_dn`
key. You can also use the authenticate API to validate your role mapping.
For more information, see <<mapping-roles, Mapping Users and Groups to Roles>>.

View File

@ -25,238 +25,19 @@ for SAML realms.
[[saml-settings]] [[saml-settings]]
==== SAML Realm Settings ==== SAML Realm Settings
[cols="4,^3,10"] See {ref}/security-settings.html#ref-saml-settings[SAML Realm Settings].
|=======================
| Setting | Required | Description
| `type` | yes | Indicates the realm type. Must be set to `saml`.
| `order` | no | Indicates the priority of this realm within the realm chain.
Realms with a lower order are consulted first. Although not
required, we recommend explicitly setting this value when
you configure multiple realms. Defaults to `Integer.MAX_VALUE`.
| `enabled` | no | Indicates whether this realm is enabled or disabled. Enables
you to disable a realm without removing its configuration.
Defaults to `true`.
| `idp.entity_id` | yes | The Entity ID of the SAML Identity Provider. An Entity ID is
a URI with a maximum length of 1024 characters. It can be a
URL (`https://idp.example.com/`) or a URN (`urn:example.com:idp`)
and can be found in the configuration or the SAML metadata
of the Identity Provider.
| `idp.metadata.path` | yes | The path (_recommended_) or URL to a SAML 2.0 metadata file
describing the capabilities and configuration of the Identity
Provider.
If a path is provided, then it is resolved relative to the
{es} config directory.
If a URL is provided, then it must be either a `file` URL or
a `https` URL.
{security} will automatically poll this metadata resource and
will reload the IdP configuration when changes are detected.
File based resources are polled at a frequency determined by
the global {es} `resource.reload.interval.high` setting, which
defaults to 5 seconds.
HTTPS resources are polled at a frequency determined by
the realm's `idp.metadata.http.refresh` setting.
| `idp.metadata.http.refresh` | no | Controls the frequency with which `https` metadata is checked
for changes. Defaults to 1 hour.
| `idp.use_single_logout` | no | Indicates whether to utilise the Identity Provider's Single
Logout service (if one exists in the IdP metadata file).
Defaults to `true`.
| `sp.entity_id` | yes | The Entity ID to use for this SAML Service Provider.
This should be entered as a URI. We recommend that you use the
base URL of your {kib} instance,
e.g. `https://kibana.example.com/`
| `sp.acs` | yes | The URL of the Assertion Consumer Service within {kib}.
Typically this will be the "api/security/v1/saml" endpoint of
your {kib} server,
e.g. `https://kibana.example.com/api/security/v1/saml`
| `sp.logout` | no | The URL of the Single Logout service within {kib}.
Typically this will be the "logout" endpoint of
your {kib} server,
e.g. `https://kibana.example.com/logout`
| `attributes.principal` | yes | The Name of the SAML attribute that should be used as the
{security} user's principal (username)
| `attributes.groups` | no | The Name of the SAML attribute that should be used to populate
{security} user's groups
| `attributes.name` | no | The Name of the SAML attribute that should be used to populate
{security} user's full name
| `attributes.mail` | no | The Name of the SAML attribute that should be used to populate
{security} user's email address
| `attributes.dn` | no | The Name of the SAML attribute that should be used to populate
{security} user's X.500 _Distinguished Name_
| `attribute_patterns.principal` | no | A java regular expression that is matched against the SAML attribute
specified by `attributes.pattern` before it is applied to the user's
_principal_ property.
The attribute value must match the pattern, and the value of the
first _capturing group_ is used as the principal.
e.g. `^([^@]+)@example\\.com$` matches email addresses from the
"example.com" domain and uses the local-part as the principal.
| `attribute_patterns.groups` | no | As per `attribute_patterns.principal`, but for the _group_ property.
| `attribute_patterns.name` | no | As per `attribute_patterns.principal`, but for the _name_ property.
| `attribute_patterns.mail` | no | As per `attribute_patterns.principal`, but for the _mail_ property.
| `attribute_patterns.dn` | no | As per `attribute_patterns.principal`, but for the _dn_ property.
| `nameid_format` | no | The NameID format that should be requested when asking the IdP
to authenticate the current user.
Defaults to requesting _transient_ names
(`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`)
| `nameid.allow_create` | no | The value of the `AllowCreate` attribute of the `NameIdPolicy`
element in an authentication request.
Defaults to `false`
| `nameid.sp_qualifier` | no | The value of the `SPNameQualifier` attribute of the `NameIdPolicy`
element in an authentication request.
The default is to not include the `SPNameQualifier` attribute.
| `force_authn` | no | Whether to set the `ForceAuthn` attribute when requesting that the
IdP authenticate the current user. If this is set to `true`, the
IdP will be required to freshly establish the user's identity,
irrespective of any exiting sessions they may have.
Defaults to `false`.
| `populate_user_metadata` | no | Whether to populate the {es} user's metadata with the values that
are provided by the SAML attributes. Defaults to `true`.
| `allowed_clock_skew` | no | The maximum amount of skew that can be tolerated between the
IdP's clock and the {es} node's clock. Defaults to 3 minutes.
|=======================
===== SAML Realm Signing Settings ===== SAML Realm Signing Settings
If a signing key is configured (i.e. is one of `signing.key` or `signing.keystore.path` has been set), then See {ref}/security-settings.html#ref-saml-signing-settings[SAML Realm Signing Settings].
{security} will sign outgoing SAML messages. Signing can be configured using the following settings.
|=======================
| Setting | Required | Description
| `signing.saml_messages` | no | A list of SAML message types that should be signed, or `*` to
sign all messages. Each element in the list should be the
local name of a SAML XML Element. Supported element types are
`AuthnRequest`, `LogoutRequest` and `LogoutResponse`.
Defaults to `*`.
| `signing.key` | no | Specifies the path to the PEM encoded private key to use for
SAML message signing.
`signing.key` and `signing.keystore.path` may not be used at
the same time.
| `signing.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure])
Specifies the passphrase to decrypt the PEM encoded private key if
it is encrypted.
| `signing.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate
chain) that corresponds to the `signing.key`. This certificate
must also be included in the Service Provider metadata, or
manually configured within the IdP to allow for signature
validation.
May only be used if `signing.key` is set.
| `signing.keystore.path` | no | The path to the keystore that contains a private key and
certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file.
`signing.key` and `signing.keystore.path` may not be used at the
same time.
| `signing.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12".
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
"pkcs12", otherwise uses "jks"
| `signing.keystore.alias` | no | Specifies the alias of the key within the keystore that should be
used for SAML message signing. Must be specified if the keystore
contains more than one private key.
| `signing.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore.
| `signing.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure])
The password for the key in the keystore.
Defaults to the keystore password.
|=======================
===== SAML Realm Encryption Settings ===== SAML Realm Encryption Settings
If an encryption key is configured (i.e. is one of `encryption.key` or See {ref}/security-settings.html#ref-saml-encryption-settings[SAML Realm Encryption Settings].
`encryption.keystore.path` has been set), then {security} will publish
an encryption certificate when generating metadata, and will attempt to
decrypt incoming SAML content.
Encryption can be configured using the following settings.
|=======================
| Setting | Required | Description
| `encryption.key` | no | Specifies the path to the PEM encoded private key to use for
SAML message descryption.
`encryption.key` and `encryption.keystore.path` may not be used at
the same time.
| `encryption.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure])
Specifies the passphrase to decrypt the PEM encoded private key if
it is encrypted.
| `encryption.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate
chain) that is associated with the `encryption.key`. This
certificate must also be included in the Service Provider metadata,
or manually configured within the IdP to enable message encryption.
May only be used if `encryption.key` is set.
| `encryption.keystore.path` | no | The path to the keystore that contains a private key and
certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file.
`encryption.key` and `encryption.keystore.path` may not be used at
the same time.
| `encryption.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12".
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
"pkcs12", otherwise uses "jks"
| `encryption.keystore.alias` | no | Specifies the alias of the key within the keystore that should be
used for SAML message decryption. If not specified, all compatible
key pairs from the keystore will be considered as candidate keys
for decryption.
| `encryption.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore.
| `encryption.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure])
The password for the key in the keystore. Only a single password is
supported. If you are using multiple decryption keys, then they
cannot have individual passwords.
|=======================
===== SAML Realm SSL Settings ===== SAML Realm SSL Settings
If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path` is a URL using the `https` protocol) See {ref}/security-settings.html#ref-saml-ssl-settings[SAML Realm SSL Settings].
Then the following settings may be used to configure SSL. If these are not specified, then the {xpack}
{ref}/security-settings.html#ssl-tls-settings[default SSL settings] are used.
These settings are not used for any purpose other than loading metadata over https.
|=======================
| Setting | Required | Description
| `ssl.key` | no | Specifies the path to the PEM encoded private key to use for http
client authentication.
`ssl.key` and `ssl.keystore.path` may not be used at the same time.
| `ssl.key_passphrase` | no | Specifies the passphrase to decrypt the PEM encoded private key if
it is encrypted. May not be used with `ssl.secure_key_passphrase`
| `ssl.secure_key_passphrase` | no | ({ref}/secure-settings.html[Secure])
Specifies the passphrase to decrypt the PEM encoded private key if
it is encrypted. May not be used with `ssl.key_passphrase`
| `ssl.certificate` | no | Specifies the path to the PEM encoded certificate (or certificate
chain) that goes with the key. May only be used if `ssl.key` is set.
| `ssl.certificate_authorities` | no | Specifies the paths to the PEM encoded certificate authority
certificates that should be trusted.
`ssl.certificate_authorities` and `ssl.truststore.path` may not be
used at the same time.
| `ssl.keystore.path` | no | The path to the keystore that contains a private key and
certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file.
`ssl.key` and `ssl.keystore.path` may not be used at the same time.
| `ssl.keystore.type` | no | The type of the keystore. Must be one of "jks" or "PKCS12".
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
"pkcs12", otherwise uses "jks"
| `ssl.keystore.password` | no | The password to the keystore.
May not be used with `ssl.keystore.secure_password`.
| `ssl.keystore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the keystore.
May not be used with `ssl.keystore.password`.
| `ssl.keystore.key_password` | no | The password for the key in the keystore.
Defaults to the keystore password.
May not be used with `ssl.keystore.secure_key_password`.
| `ssl.keystore.secure_key_password` | no | ({ref}/secure-settings.html[Secure])
The password for the key in the keystore.
Defaults to the keystore password.
May not be used with `ssl.keystore.key_password`.
| `ssl.truststore.path` | no | The path to the keystore that contains the certificates to trust.
Must be either a Java Keystore (jks) or a PKCS#12 file.
`ssl.certificate_authorities` and `ssl.truststore.path` may not be
used at the same time.
| `ssl.truststore.type` | no | The type of the truststore. Must be one of "jks" or "PKCS12".
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or
"pkcs12", otherwise uses "jks"
| `ssl.truststore.password` | no | The password to the truststore.
May not be used with `ssl.truststore.secure_password`.
| `ssl.truststore.secure_password` | no | ({ref}/secure-settings.html[Secure]) The password to the truststore.
May not be used with `ssl.truststore.password`.
| `ssl.verification_mode` | no | One of `full` (verify the hostname and the certicate path),
`certificate` (verify the certificate path, but not the hostname)
or `none` (perform no verification). Defaults to `full`.
+
See {ref}/security-settings.html#ssl-tls-settings[`xpack.ssl.verification_mode`]
for a more detailed explanation of these values.
| `ssl.supported_protocols` | no | Specifies the supported protocols for TLS/SSL.
| `ssl.cipher_suites` | no | Specifies the cipher suites that should be supported.
|=======================

View File

@ -1,6 +1,6 @@
[role="xpack"] [role="xpack"]
[[configuring-security]] [[configuring-security]]
== Configuring Security in {es} == Configuring security in {es}
++++ ++++
<titleabbrev>Configuring Security</titleabbrev> <titleabbrev>Configuring Security</titleabbrev>
++++ ++++
@ -70,6 +70,11 @@ user API.
-- --
. Choose which types of realms you want to use to authenticate users.
** <<configuring-ad-realm,Configure an Active Directory realm>>.
** <<configuring-file-realm,Configure a file realm>>.
** <<configuring-pki-realm,Configure a PKI realm>>.
. Set up roles and users to control access to {es}. . Set up roles and users to control access to {es}.
For example, to grant _John Doe_ full access to all indices that match For example, to grant _John Doe_ full access to all indices that match
the pattern `events*` and enable him to create visualizations and dashboards the pattern `events*` and enable him to create visualizations and dashboards
@ -128,5 +133,8 @@ include::securing-communications/securing-elasticsearch.asciidoc[]
include::securing-communications/configuring-tls-docker.asciidoc[] include::securing-communications/configuring-tls-docker.asciidoc[]
include::securing-communications/enabling-cipher-suites.asciidoc[] include::securing-communications/enabling-cipher-suites.asciidoc[]
include::securing-communications/separating-node-client-traffic.asciidoc[] include::securing-communications/separating-node-client-traffic.asciidoc[]
include::authentication/configuring-active-directory-realm.asciidoc[]
include::authentication/configuring-file-realm.asciidoc[]
include::authentication/configuring-pki-realm.asciidoc[]
include::{xes-repo-dir}/settings/security-settings.asciidoc[] include::{xes-repo-dir}/settings/security-settings.asciidoc[]
include::{xes-repo-dir}/settings/audit-settings.asciidoc[] include::{xes-repo-dir}/settings/audit-settings.asciidoc[]

View File

@ -20,9 +20,13 @@ information, see <<security-settings>>.
.. Required: <<tls-transport,Enable TLS on the transport layer>>. .. Required: <<tls-transport,Enable TLS on the transport layer>>.
.. Recommended: <<tls-http,Enable TLS on the HTTP layer>>. .. Recommended: <<tls-http,Enable TLS on the HTTP layer>>.
. If you are using Active Directory user authentication,
<<tls-active-directory,encrypt communications between {es} and your Active Directory server>>.
For more information about encrypting communications across the Elastic Stack, For more information about encrypting communications across the Elastic Stack,
see {xpack-ref}/encrypting-communications.html[Encrypting Communications]. see {xpack-ref}/encrypting-communications.html[Encrypting Communications].
include::node-certificates.asciidoc[] include::node-certificates.asciidoc[]
include::tls-transport.asciidoc[] include::tls-transport.asciidoc[]
include::tls-http.asciidoc[] include::tls-http.asciidoc[]
include::tls-ad.asciidoc[]

View File

@ -0,0 +1,57 @@
[role="xpack"]
[[tls-active-directory]]
==== Encrypting communications between {es} and Active Directory
To protect the user credentials that are sent for authentication, it's highly
recommended to encrypt communications between {es} and your Active Directory
server. Connecting via SSL/TLS ensures that the identity of the Active Directory
server is authenticated before {security} transmits the user credentials and the
usernames and passwords are encrypted in transit.
Clients and nodes that connect via SSL/TLS to the Active Directory server need
to have the Active Directory server's certificate or the server's root CA
certificate installed in their keystore or truststore.
. Create the realm configuration for the `xpack.security.authc.realms` namespace
in the `elasticsearch.yml` file. See <<configuring-ad-realm>>.
. Set the `url` attribute in the realm configuration to specify the LDAPS protocol
and the secure port number. For example, `url: ldaps://ad.example.com:636`.
. Configure each node to trust certificates signed by the certificate authority
(CA) that signed your Active Directory server certificates.
+
--
The following example demonstrates how to trust a CA certificate (`cacert.pem`),
which is located within the configuration directory:
[source,shell]
--------------------------------------------------
xpack:
security:
authc:
realms:
active_directory:
type: active_directory
order: 0
domain_name: ad.example.com
url: ldaps://ad.example.com:636
ssl:
certificate_authorities: [ "CONFIG_DIR/cacert.pem" ]
--------------------------------------------------
The CA cert must be a PEM encoded certificate.
For more information about these settings, see <<ref-ad-settings>>.
--
. Restart {es}.
NOTE: By default, when you configure {security} to connect to Active Directory
using SSL/TLS, {security} attempts to verify the hostname or IP address
specified with the `url` attribute in the realm configuration with the
values in the certificate. If the values in the certificate and realm
configuration do not match, {security} does not allow a connection to the
Active Directory server. This is done to protect against man-in-the-middle
attacks. If necessary, you can disable this behavior by setting the
`ssl.verification_mode` property to `certificate`.

View File

@ -64,15 +64,15 @@ the users. Any unknown roles are marked with `*`.
-- --
[source, shell] [source, shell]
------------------------------------------ ------------------------------------------
bin/xpack/users list bin/elasticsearch-users list
rdeniro : admin rdeniro : admin
alpacino : power_user alpacino : power_user
jacknich : monitoring,unknown_role* <1> jacknich : monitoring,unknown_role* <1>
------------------------------------------ ------------------------------------------
<1> `unknown_role` was not found in `roles.yml` <1> `unknown_role` was not found in `roles.yml`
For more information about this command, see For more information about this command, see the
{ref}/users-command.html[Users Command]. {ref}/users-command.html[`elasticsearch-users` command].
-- --
. If you are authenticating to LDAP, a number of configuration options can cause . If you are authenticating to LDAP, a number of configuration options can cause

View File

@ -168,6 +168,10 @@ in-memory cached user credentials. For possible values, see
[float] [float]
===== File realm settings ===== File realm settings
The `type` setting must be set to `file`. In addition to the
<<ref-realm-settings,settings that are valid for all realms>>, you can specify
the following settings:
`cache.ttl`:: `cache.ttl`::
The time-to-live for cached user entries. A user and a hash of its credentials The time-to-live for cached user entries. A user and a hash of its credentials
are cached for this configured period of time. Defaults to `20m`. Specify values are cached for this configured period of time. Defaults to `20m`. Specify values
@ -685,6 +689,10 @@ LDAP operation (such as `search`). Defaults to `true`.
[float] [float]
===== PKI realm settings ===== PKI realm settings
The `type` setting must be set to `pki`. In addition to the
<<ref-realm-settings,settings that are valid for all realms>>, you can specify
the following settings:
`username_pattern`:: `username_pattern`::
The regular expression pattern used to extract the username from the The regular expression pattern used to extract the username from the
certificate DN. The first match group is the used as the username. certificate DN. The first match group is the used as the username.
@ -693,9 +701,7 @@ Defaults to `CN=(.*?)(?:,\|$)`.
`certificate_authorities`:: `certificate_authorities`::
List of paths to the PEM certificate files that should be used to authenticate a List of paths to the PEM certificate files that should be used to authenticate a
user's certificate as trusted. Defaults to the trusted certificates configured user's certificate as trusted. Defaults to the trusted certificates configured
for SSL. See the {xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] for SSL. This setting cannot be used with `truststore.path`.
section of the PKI realm documentation for more information.
This setting cannot be used with `truststore.path`.
`truststore.algorithm`:: `truststore.algorithm`::
Algorithm for the truststore. Defaults to `SunX509`. Algorithm for the truststore. Defaults to `SunX509`.
@ -708,10 +714,7 @@ The password for the truststore.
`truststore.path`:: `truststore.path`::
The path of a truststore to use. Defaults to the trusted certificates configured The path of a truststore to use. Defaults to the trusted certificates configured
for SSL. See the for SSL. This setting cannot be used with `certificate_authorities`.
{xpack-ref}/pki-realm.html#pki-ssl-config[SSL settings] section of the PKI realm
documentation for more information. This setting cannot be used with
`certificate_authorities`.
`files.role_mapping`:: `files.role_mapping`::
Specifies the {xpack-ref}/security-files.html[location] of the Specifies the {xpack-ref}/security-files.html[location] of the
@ -731,8 +734,16 @@ Defaults to `100000`.
[[ref-saml-settings]] [[ref-saml-settings]]
[float] [float]
===== SAML realm settings ===== SAML realm settings
The `type` setting must be set to `saml`. In addition to the
<<ref-realm-settings,settings that are valid for all realms>>, you can specify
the following settings:
`idp.entity_id`:: `idp.entity_id`::
The Entity ID of the SAML Identity Provider The Entity ID of the SAML Identity Provider. An Entity ID is a URI with a
maximum length of 1024 characters. It can be a URL (https://idp.example.com/) or
a URN (`urn:example.com:idp`) and can be found in the configuration or the SAML
metadata of the Identity Provider.
`idp.metadata.path`:: `idp.metadata.path`::
The path _(recommended)_ or URL to a SAML 2.0 metadata file describing the The path _(recommended)_ or URL to a SAML 2.0 metadata file describing the
@ -740,7 +751,7 @@ capabilities and configuration of the Identity Provider.
If a path is provided, then it is resolved relative to the {es} config If a path is provided, then it is resolved relative to the {es} config
directory. directory.
If a URL is provided, then it must be either a `file` URL or a `https` URL. If a URL is provided, then it must be either a `file` URL or a `https` URL.
{security} will automatically poll this metadata resource and will reload {security} automatically polls this metadata resource and reloads
the IdP configuration when changes are detected. the IdP configuration when changes are detected.
File based resources are polled at a frequency determined by the global {es} File based resources are polled at a frequency determined by the global {es}
`resource.reload.interval.high` setting, which defaults to 5 seconds. `resource.reload.interval.high` setting, which defaults to 5 seconds.
@ -757,39 +768,47 @@ Indicates whether to utilise the Identity Provider's Single Logout service
Defaults to `true`. Defaults to `true`.
`sp.entity_id`:: `sp.entity_id`::
The Entity ID to use for this SAML Service Provider, entered as a URI. The Entity ID to use for this SAML Service Provider. This should be entered as a
URI. We recommend that you use the base URL of your Kibana instance. For example,
`https://kibana.example.com/`.
`sp.acs`:: `sp.acs`::
The URL of the Assertion Consumer Service within {kib}. The URL of the Assertion Consumer Service within {kib}. Typically this is the
"api/security/v1/saml" endpoint of your Kibana server. For example,
`https://kibana.example.com/api/security/v1/saml`.
`sp.logout`:: `sp.logout`::
The URL of the Single Logout service within {kib}. The URL of the Single Logout service within {kib}. Typically this is the
"logout" endpoint of your Kibana server. For example,
`https://kibana.example.com/logout`.
`attributes.principal`:: `attributes.principal`::
The Name of the SAML attribute that should be used as the {security} user's The Name of the SAML attribute that should be used as the {security} user's
principal (username) principal (username).
`attributes.groups`:: `attributes.groups`::
The Name of the SAML attribute that should be used to populate {security} The Name of the SAML attribute that should be used to populate {security}
user's groups user's groups.
`attributes.name`:: `attributes.name`::
The Name of the SAML attribute that should be used to populate {security} The Name of the SAML attribute that should be used to populate {security}
user's full name user's full name.
`attributes.mail`:: `attributes.mail`::
The Name of the SAML attribute that should be used to populate {security} The Name of the SAML attribute that should be used to populate {security}
user's email address user's email address.
`attributes.dn`:: `attributes.dn`::
The Name of the SAML attribute that should be used to populate {security} The Name of the SAML attribute that should be used to populate {security}
user's X.500 _Distinguished Name_ user's X.500 _Distinguished Name_.
`attribute_patterns.principal`:: `attribute_patterns.principal`::
A java regular expression that is matched against the SAML attribute specified A Java regular expression that is matched against the SAML attribute specified
by `attributes.pattern` before it is applied to the user's _principal_ property. by `attributes.pattern` before it is applied to the user's _principal_ property.
The attribute value must match the pattern, and the value of the first The attribute value must match the pattern and the value of the first
_capturing group_ is used as the principal. _capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$`
matches email addresses from the "example.com" domain and uses the local-part as
the principal.
`attribute_patterns.groups`:: `attribute_patterns.groups`::
As per `attribute_patterns.principal`, but for the _group_ property. As per `attribute_patterns.principal`, but for the _group_ property.
@ -805,26 +824,41 @@ As per `attribute_patterns.principal`, but for the _dn_ property.
`nameid_format`:: `nameid_format`::
The NameID format that should be requested when asking the IdP to authenticate The NameID format that should be requested when asking the IdP to authenticate
the current user. the current user. Defaults to requesting _transient_ names
Defaults to `urn:oasis:names:tc:SAML:2.0:nameid-format:transient` (`urn:oasis:names:tc:SAML:2.0:nameid-format:transient`).
`nameid.allow_create`:: The value of the `AllowCreate` attribute of the
`NameIdPolicy` element in an authentication request. Defaults to `false`.
`nameid.sp_qualifier`:: The value of the `SPNameQualifier` attribute of the
`NameIdPolicy` element in an authentication request. The default is to not
include the `SPNameQualifier` attribute.
`force_authn`:: `force_authn`::
Whether to set the `ForceAuthn` attribute when requesting that the IdP Specifies whether to set the `ForceAuthn` attribute when requesting that the IdP
authenticate the current user. authenticate the current user. If set to `true`, the IdP is required to verify
the users identity, irrespective of any existing sessions they might have.
Defaults to `false`. Defaults to `false`.
`populate_user_metadata`:: `populate_user_metadata`::
Whether to populate the {es} user's metadata with the values that are provided Specifies whether to populate the {es} user's metadata with the values that are
by the SAML attributes. provided by the SAML attributes. Defaults to `true`.
Defaults to `true`.
`allowed_clock_skew`:: `allowed_clock_skew`::
The maximum amount of skew that can be tolerated between the IdP's clock and the The maximum amount of skew that can be tolerated between the IdP's clock and the
{es} node's clock. {es} node's clock.
Defaults to `3m` (3 minutes). Defaults to `3m` (3 minutes).
[float]
[[ref-saml-signing-settings]]
===== SAML realm signing settings
If a signing key is configured (that is, either `signing.key` or
`signing.keystore.path` is set), then {security} signs outgoing SAML messages.
Signing can be configured using the following settings:
`signing.saml_messages`:: `signing.saml_messages`::
A list of SAML message types that should be signed, or `*` to sign all messages. A list of SAML message types that should be signed or `*` to sign all messages.
Each element in the list should be the local name of a SAML XML Element. Each element in the list should be the local name of a SAML XML Element.
Supported element types are `AuthnRequest`, `LogoutRequest` and `LogoutResponse`. Supported element types are `AuthnRequest`, `LogoutRequest` and `LogoutResponse`.
Only valid if `signing.key` or `signing.keystore.path` is also specified. Only valid if `signing.key` or `signing.keystore.path` is also specified.
@ -832,152 +866,177 @@ Defaults to `*`.
`signing.key`:: `signing.key`::
Specifies the path to the PEM encoded private key to use for SAML message signing. Specifies the path to the PEM encoded private key to use for SAML message signing.
`signing.key` and `signing.keystore.path` may not be used at the same time. `signing.key` and `signing.keystore.path` cannot be used at the same time.
`signing.secure_key_passphrase` (<<secure-settings,Secure>>):: `signing.secure_key_passphrase` (<<secure-settings,Secure>>)::
Specifies the passphrase to decrypt the PEM encoded private key (`signing.key`) Specifies the passphrase to decrypt the PEM encoded private key (`signing.key`)
if it is encrypted. if it is encrypted.
`signing.certificate`:: `signing.certificate`::
Specifies the path to the PEM encoded certificate that corresponds to the Specifies the path to the PEM encoded certificate (or certificate chain) that
`signing.key`. May only be used if `signing.key` is set. corresponds to the `signing.key`. This certificate must also be included in the
Service Provider metadata or manually configured within the IdP to allow for
signature validation. This setting can only be used if `signing.key` is set.
`signing.keystore.path`:: `signing.keystore.path`::
The path to the keystore that contains a private key and certificate. The path to the keystore that contains a private key and certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file. Must be either a Java Keystore (jks) or a PKCS#12 file.
`signing.key` and `signing.keystore.path` may not be used at the same time. `signing.key` and `signing.keystore.path` cannot be used at the same time.
`signing.keystore.type`:: `signing.keystore.type`::
The type of the keystore (`signing.keystore.path`). The type of the keystore in `signing.keystore.path`.
Must be one of "jks" or "PKCS12". Defaults to "PKCS12" if the keystore path Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx",
ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks". or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
`signing.keystore.alias`:: `signing.keystore.alias`::
Specifies the alias of the key within the keystore that should be Specifies the alias of the key within the keystore that should be
used for SAML message signing. Must be specified if the keystore used for SAML message signing. If the keystore contains more than one private
contains more than one private key. key, this setting must be specified.
`signing.keystore.secure_password` (<<secure-settings,Secure>>):: `signing.keystore.secure_password` (<<secure-settings,Secure>>)::
The password to the keystore (`signing.keystore.path`). The password to the keystore in `signing.keystore.path`.
`signing.keystore.secure_key_password` (<<secure-settings,Secure>>):: `signing.keystore.secure_key_password` (<<secure-settings,Secure>>)::
The password for the key in the keystore (`signing.keystore.path`). The password for the key in the keystore (`signing.keystore.path`).
Defaults to the keystore password. Defaults to the keystore password.
[float]
[[ref-saml-encryption-settings]]
===== SAML realm encryption settings
If an encryption key is configured (that is, either `encryption.key` or
`encryption.keystore.path` is set), then {security} publishes an encryption
certificate when generating metadata and attempts to decrypt incoming SAML
content. Encryption can be configured using the following settings:
`encryption.key`:: `encryption.key`::
Specifies the path to the PEM encoded private key to use for SAML message Specifies the path to the PEM encoded private key to use for SAML message
decryption. decryption.
`encryption.key` and `encryption.keystore.path` may not be used at the same time. `encryption.key` and `encryption.keystore.path` cannot be used at the same time.
`encryption.secure_key_passphrase` (<<secure-settings,Secure>>):: `encryption.secure_key_passphrase` (<<secure-settings,Secure>>)::
Specifies the passphrase to decrypt the PEM encoded private key Specifies the passphrase to decrypt the PEM encoded private key
(`encryption.key`) if it is encrypted. (`encryption.key`) if it is encrypted.
`encryption.certificate`:: `encryption.certificate`::
Specifies the path to the PEM encoded certificate chain that is associated with Specifies the path to the PEM encoded certificate (or certificate chain) that is
the `encryption.key`. May only be used if `encryption.key` is set. associated with the `encryption.key`. This certificate must also be included in
the Service Provider metadata or manually configured within the IdP to enable
message encryption. This setting can be used only if `encryption.key` is set.
`encryption.keystore.path`:: `encryption.keystore.path`::
The path to the keystore that contains a private key and certificate. The path to the keystore that contains a private key and certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file. Must be either a Java Keystore (jks) or a PKCS#12 file.
`encryption.key` and `encryption.keystore.path` may not be used at the same time. `encryption.key` and `encryption.keystore.path` cannot be used at the same time.
`encryption.keystore.type`:: `encryption.keystore.type`::
The type of the keystore (`encryption.keystore.path`). The type of the keystore (`encryption.keystore.path`).
Must be one of "jks" or "PKCS12". Defaults to "PKCS12" if the keystore path Must be either `jks` or `PKCS12`. If the keystore path ends in ".p12", ".pfx",
ends in ".p12", ".pfx" or "pkcs12", otherwise uses "jks". or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`.
`encryption.keystore.alias`:: `encryption.keystore.alias`::
Specifies the alias of the key within the keystore (`encryption.keystore.path`) Specifies the alias of the key within the keystore (`encryption.keystore.path`)
that should be used for SAML message decryption. If not specified, all compatible that should be used for SAML message decryption. If not specified, all compatible
key pairs from the keystore will be considered as candidate keys for decryption. key pairs from the keystore are considered as candidate keys for decryption.
`encryption.keystore.secure_password` (<<secure-settings,Secure>>):: `encryption.keystore.secure_password` (<<secure-settings,Secure>>)::
The password to the keystore (`encryption.keystore.path`). The password to the keystore (`encryption.keystore.path`).
`encryption.keystore.secure_key_password` (<<secure-settings,Secure>>):: `encryption.keystore.secure_key_password` (<<secure-settings,Secure>>)::
The password for the key in the keystore (`encryption.keystore.path`). Only a The password for the key in the keystore (`encryption.keystore.path`). Only a
single password is supported. If you are using multiple decryption keys, then single password is supported. If you are using multiple decryption keys,
they cannot have individual passwords. they cannot have individual passwords.
[float]
[[ref-saml-ssl-settings]]
===== SAML realm SSL settings
If you are loading the IdP metadata over SSL/TLS (that is, `idp.metadata.path`
is a URL using the `https` protocol), the following settings can be used to
configure SSL. If these are not specified, then the
<<ssl-tls-settings,default SSL settings>> are used.
NOTE: These settings are not used for any purpose other than loading metadata
over https.
`ssl.key`:: `ssl.key`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the path to the PEM encoded private key to use for http client
path to the PEM encoded private key to use for http client authentication (if authentication (if required). `ssl.key` and `ssl.keystore.path` cannot be used
required). `ssl.key` and `ssl.keystore.path` may not be used at the same time. at the same time.
`ssl.key_passphrase`:: `ssl.key_passphrase`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the
passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
encrypted. May not be used with `ssl.secure_key_passphrase` encrypted. Cannot be used with `ssl.secure_key_passphrase`.
`ssl.secure_key_passphrase` (<<secure-settings,Secure>>):: `ssl.secure_key_passphrase` (<<secure-settings,Secure>>)::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the
passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is passphrase to decrypt the PEM encoded private key (`ssl.key`) if it is
encrypted. May not be used with `ssl.key_passphrase` encrypted. Cannot be used with `ssl.key_passphrase`.
`ssl.certificate`:: `ssl.certificate`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the
path to the PEM encoded certificate (or certificate chain) that is associated path to the PEM encoded certificate (or certificate chain) that is associated
with the key (`ssl.key`). May only be used if `ssl.key` is set. with the key (`ssl.key`). This setting can be used only if `ssl.key` is set.
`ssl.certificate_authorities`:: `ssl.certificate_authorities`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the
paths to the PEM encoded certificate authority certificates that should be paths to the PEM encoded certificate authority certificates that should be
trusted. `ssl.certificate_authorities` and `ssl.truststore.path` may not be trusted. `ssl.certificate_authorities` and `ssl.truststore.path` cannot be
used at the same time. used at the same time.
`ssl.keystore.path`:: `ssl.keystore.path`::
If retrieving IDP metadata via https (see `idp.metadata.path`), the path to Specifies the path to
the keystore that contains a private key and certificate. the keystore that contains a private key and certificate.
Must be either a Java Keystore (jks) or a PKCS#12 file. Must be either a Java Keystore (jks) or a PKCS#12 file.
`ssl.key` and `ssl.keystore.path` may not be used at the same time. `ssl.key` and `ssl.keystore.path` cannot be used at the same time.
`ssl.keystore.type`:: `ssl.keystore.type`::
The type of the keystore (`ssl.keystore.path`). Must be one of "jks" or "PKCS12". The type of the keystore (`ssl.keystore.path`). Must be either `jks` or `PKCS12`.
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12", If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults
otherwise uses "jks" to `PKCS12`. Otherwise, it defaults to `jks`.
`ssl.keystore.password`:: `ssl.keystore.password`::
The password to the keystore (`ssl.keystore.path`). The password to the keystore (`ssl.keystore.path`). This setting cannot be used
May not be used with `ssl.keystore.secure_password`. with `ssl.keystore.secure_password`.
`ssl.keystore.secure_password` (<<secure-settings,Secure>>):: `ssl.keystore.secure_password` (<<secure-settings,Secure>>)::
The password to the keystore (`ssl.keystore.path`). The password to the keystore (`ssl.keystore.path`).
May not be used with `ssl.keystore.password`. This setting cannot be used with `ssl.keystore.password`.
`ssl.keystore.key_password`:: `ssl.keystore.key_password`::
The password for the key in the keystore (`ssl.keystore.path`). The password for the key in the keystore (`ssl.keystore.path`).
Defaults to the keystore password. Defaults to the keystore password. This setting cannot be used with
May not be used with `ssl.keystore.secure_key_password`. `ssl.keystore.secure_key_password`.
`ssl.keystore.secure_key_password` (<<secure-settings,Secure>>):: `ssl.keystore.secure_key_password` (<<secure-settings,Secure>>)::
The password for the key in the keystore (`ssl.keystore.path`). The password for the key in the keystore (`ssl.keystore.path`).
Defaults to the keystore password. Defaults to the keystore password. This setting cannot be used with
May not be used with `ssl.keystore.key_password`. `ssl.keystore.key_password`.
`ssl.truststore.path`:: `ssl.truststore.path`::
If retrieving IDP metadata via https (see `idp.metadata.path`), the path to the The path to the
keystore that contains the certificates to trust. keystore that contains the certificates to trust.
Must be either a Java Keystore (jks) or a PKCS#12 file. Must be either a Java Keystore (jks) or a PKCS#12 file.
`ssl.certificate_authorities` and `ssl.truststore.path` may not be used at the `ssl.certificate_authorities` and `ssl.truststore.path` cannot be used at the
same time. same time.
`ssl.truststore.type`:: `ssl.truststore.type`::
The type of the truststore (`ssl.truststore.path`). Must be one of "jks" or "PKCS12". The type of the truststore (`ssl.truststore.path`). Must be either `jks` or
Defaults to "PKCS12" if the keystore path ends in ".p12", ".pfx" or "pkcs12", `PKCS12`. If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting
otherwise uses "jks" defaults to `PKCS12`. Otherwise, it defaults to `jks`.
`ssl.truststore.password`:: `ssl.truststore.password`::
The password to the truststore (`ssl.truststore.path`). The password to the truststore (`ssl.truststore.path`). This setting cannot be
May not be used with `ssl.truststore.secure_password`. used with `ssl.truststore.secure_password`.
`ssl.truststore.secure_password` (<<secure-settings,Secure>>):: `ssl.truststore.secure_password` (<<secure-settings,Secure>>)::
The password to the truststore (`ssl.truststore.path`). The password to the truststore (`ssl.truststore.path`). This setting cannot be
May not be used with `ssl.truststore.password`. used with `ssl.truststore.password`.
`ssl.verification_mode`:: `ssl.verification_mode`::
If retrieving IDP metadata via https (see `idp.metadata.path`), one of `full` One of `full`
(verify the hostname and the certicate path), `certificate` (verify the (verify the hostname and the certificate path), `certificate` (verify the
certificate path, but not the hostname) or `none` (perform no verification). certificate path, but not the hostname) or `none` (perform no verification).
Defaults to `full`. Defaults to `full`.
+ +
@ -985,11 +1044,10 @@ See <<ssl-tls-settings,`xpack.ssl.verification_mode`>> for a more detailed
explanation of these values. explanation of these values.
`ssl.supported_protocols`:: `ssl.supported_protocols`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the supported protocols for TLS/SSL.
supported protocols for TLS/SSL.
`ssl.cipher_suites`:: `ssl.cipher_suites`::
If retrieving IDP metadata via https (see `idp.metadata.path`), specifies the Specifies the
cipher suites that should be supported. cipher suites that should be supported.
[float] [float]

View File

@ -6,7 +6,6 @@
package org.elasticsearch.xpack.core.scheduler; package org.elasticsearch.xpack.core.scheduler;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.joda.time.DateTimeZone; import org.joda.time.DateTimeZone;
@ -29,8 +28,7 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArg
/** /**
* * THIS CLASS IS A FORK OF
* THIS CLASS IS A COPY OF
* <a href="https://fisheye.terracotta.org/browse/Quartz/trunk/quartz-core/src/main/java/org/quartz/CronExpression.java?r=2426"> * <a href="https://fisheye.terracotta.org/browse/Quartz/trunk/quartz-core/src/main/java/org/quartz/CronExpression.java?r=2426">
* {@code CronExpression}</a> * {@code CronExpression}</a>
* FROM THE <a href="http://quartz-scheduler.org/">QUARTZ</a> PROJECT * FROM THE <a href="http://quartz-scheduler.org/">QUARTZ</a> PROJECT
@ -44,63 +42,63 @@ import static org.elasticsearch.xpack.core.watcher.support.Exceptions.illegalArg
* Cron expressions are comprised of 6 required fields and one optional field * Cron expressions are comprised of 6 required fields and one optional field
* separated by white space. The fields respectively are described as follows: * separated by white space. The fields respectively are described as follows:
* *
* <table cellspacing="8"> * <table style="border-collapse: separate; border-spacing: 8px;">
* <caption>Fields in cron expressions</caption> * <caption>Fields in cron expressions</caption>
* <tr> * <tr>
* <th align="left">Field Name</th> * <th>Field Name</th>
* <th align="left">&nbsp;</th> * <th>&nbsp;</th>
* <th align="left">Allowed Values</th> * <th>Allowed Values</th>
* <th align="left">&nbsp;</th> * <th>&nbsp;</th>
* <th align="left">Allowed Special Characters</th> * <th>Allowed Special Characters</th>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Seconds</code></td> * <td><code>Seconds</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>0-59</code></td> * <td><code>0-59</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * /</code></td> * <td><code>, - * /</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Minutes</code></td> * <td><code>Minutes</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>0-59</code></td> * <td><code>0-59</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * /</code></td> * <td><code>, - * /</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Hours</code></td> * <td><code>Hours</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>0-23</code></td> * <td><code>0-23</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * /</code></td> * <td><code>, - * /</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Day-of-month</code></td> * <td><code>Day-of-month</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>1-31</code></td> * <td><code>1-31</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * ? / L W</code></td> * <td><code>, - * ? / L W</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Month</code></td> * <td><code>Month</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>0-11 or JAN-DEC</code></td> * <td><code>0-11 or JAN-DEC</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * /</code></td> * <td><code>, - * /</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Day-of-Week</code></td> * <td><code>Day-of-Week</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>1-7 or SUN-SAT</code></td> * <td><code>1-7 or SUN-SAT</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * ? / L #</code></td> * <td><code>, - * ? / L #</code></td>
* </tr> * </tr>
* <tr> * <tr>
* <td align="left"><code>Year (Optional)</code></td> * <td><code>Year (Optional)</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>empty, 1970-2199</code></td> * <td><code>empty, 1970-2199</code></td>
* <td align="left">&nbsp;</td> * <td>&nbsp;</td>
* <td align="left"><code>, - * /</code></td> * <td><code>, - * /</code></td>
* </tr> * </tr>
* </table> * </table>
* <P> * <P>

View File

@ -64,7 +64,7 @@ public class LicensesMetaDataSerializationTests extends ESTestCase {
License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2));
LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT); LicensesMetaData licensesMetaData = new LicensesMetaData(license, Version.CURRENT);
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY);
RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData));
final MetaData.Builder metaDataBuilder = MetaData.builder(); final MetaData.Builder metaDataBuilder = MetaData.builder();
if (randomBoolean()) { // random order of insertion if (randomBoolean()) { // random order of insertion
metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData); metaDataBuilder.putCustom(licensesMetaData.getWriteableName(), licensesMetaData);

View File

@ -12,8 +12,9 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts;
import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
import org.elasticsearch.xpack.ml.job.process.diagnostics.DataStreamDiagnostics;
import java.util.Date; import java.util.Date;
import java.util.Locale; import java.util.Locale;

View File

@ -1,223 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.process;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.Counter;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import java.util.Date;
import java.util.SortedMap;
import java.util.TreeMap;
public class DataStreamDiagnostics {
/**
* Minimum window to take into consideration for bucket count histogram.
*/
private static final int MIN_BUCKET_WINDOW = 10;
/**
* Threshold to report potential sparsity problems.
*
* Sparsity score is calculated: log(average) - log(current)
*
* If score is above the threshold, bucket is reported as sparse bucket.
*/
private static final int DATA_SPARSITY_THRESHOLD = 2;
private static final long MS_IN_SECOND = 1000;
private static final Logger LOGGER = Loggers.getLogger(DataStreamDiagnostics.class);
/**
* Container for the histogram
*
* Note: Using a sorted map in order to iterate in order when consuming the
* data. The counter is lazily initialized and potentially missing in case
* of empty buckets.
*
* The container gets pruned along the data streaming based on the bucket
* window, so it should not contain more than max(MIN_BUCKET_WINDOW,
* 'buckets_required_by_latency') + 1 items at any time.
*
* Sparsity can only be calculated after the window has been filled. Currently
* this window is lost if a job gets closed and re-opened. We might fix this
* in future.
*/
private final SortedMap<Long, Counter> movingBucketHistogram = new TreeMap<>();
private final long bucketSpan;
private final long latency;
private long movingBucketCount = 0;
private long latestReportedBucket = -1;
private long bucketCount = 0;
private long emptyBucketCount = 0;
private long latestEmptyBucketTime = -1;
private long sparseBucketCount = 0;
private long latestSparseBucketTime = -1;
public DataStreamDiagnostics(Job job) {
bucketSpan = job.getAnalysisConfig().getBucketSpan().seconds();
latency = job.getAnalysisConfig().getLatency() == null ? 0 : job.getAnalysisConfig().getLatency().seconds();
}
/**
* Check record
*
* @param recordTimestampInMs
* The record timestamp in milliseconds since epoch
*/
public void checkRecord(long recordTimestampInMs) {
checkBucketing(recordTimestampInMs);
}
/**
* Flush all counters, should be called at the end of the data stream
*/
public void flush() {
// flush all we know
if (movingBucketHistogram.isEmpty() == false) {
flush(movingBucketHistogram.lastKey());
}
}
/**
* Check bucketing of record. Report empty and sparse buckets.
*
* @param recordTimestampInMs
* The record timestamp in milliseconds since epoch
*/
private void checkBucketing(long recordTimestampInMs) {
long bucket = (recordTimestampInMs / MS_IN_SECOND) / bucketSpan;
long bucketHistogramStartBucket = ((recordTimestampInMs / MS_IN_SECOND) - latency) / bucketSpan;
bucketHistogramStartBucket = Math.min(bucket - MIN_BUCKET_WINDOW, bucketHistogramStartBucket);
movingBucketHistogram.computeIfAbsent(bucket, l -> Counter.newCounter()).addAndGet(1);
++movingBucketCount;
// find the very first bucket
if (latestReportedBucket == -1) {
latestReportedBucket = bucket - 1;
}
// flush all bucket out of the window
flush(bucketHistogramStartBucket);
}
/**
* Flush Bucket reporting till the given bucket.
*
* @param bucketNumber
* The number of the last bucket that can be flushed.
*/
private void flush(long bucketNumber) {
// check for a longer period of empty buckets
long emptyBuckets = movingBucketHistogram.firstKey() - latestReportedBucket - 1;
if (emptyBuckets > 0) {
bucketCount += emptyBuckets;
emptyBucketCount += emptyBuckets;
latestEmptyBucketTime = (movingBucketHistogram.firstKey() - 1) * bucketSpan * MS_IN_SECOND;
latestReportedBucket = movingBucketHistogram.firstKey() - 1;
}
// calculate the average number of data points in a bucket based on the
// current history
double averageBucketSize = (float) movingBucketCount / movingBucketHistogram.size();
// prune all buckets that can be flushed
long lastBucketSparsityCheck = Math.min(bucketNumber, movingBucketHistogram.lastKey());
for (long pruneBucket = movingBucketHistogram.firstKey(); pruneBucket < lastBucketSparsityCheck; ++pruneBucket) {
Counter bucketSizeHolder = movingBucketHistogram.remove(pruneBucket);
long bucketSize = bucketSizeHolder != null ? bucketSizeHolder.get() : 0L;
LOGGER.debug("Checking bucket {} compare sizes, this bucket: {} average: {}", pruneBucket, bucketSize, averageBucketSize);
++bucketCount;
latestReportedBucket = pruneBucket;
// substract bucketSize from the counter
movingBucketCount -= bucketSize;
// check if bucket is empty
if (bucketSize == 0L) {
latestEmptyBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
++emptyBucketCount;
// do not do sparse analysis on an empty bucket
continue;
}
// simplistic way to calculate data sparsity, just take the log and
// check the difference
double logAverageBucketSize = Math.log(averageBucketSize);
double logBucketSize = Math.log(bucketSize);
double sparsityScore = logAverageBucketSize - logBucketSize;
if (sparsityScore > DATA_SPARSITY_THRESHOLD) {
LOGGER.debug("Sparse bucket {}, this bucket: {} average: {}, sparsity score: {}", pruneBucket, bucketSize,
averageBucketSize, sparsityScore);
++sparseBucketCount;
latestSparseBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
}
}
// prune the rest if necessary
for (long pruneBucket = lastBucketSparsityCheck; pruneBucket < bucketNumber; ++pruneBucket) {
Counter bucketSizeHolder = movingBucketHistogram.remove(pruneBucket);
long bucketSize = bucketSizeHolder != null ? bucketSizeHolder.get() : 0L;
bucketCount++;
latestReportedBucket = pruneBucket;
// substract bucketSize from the counter
movingBucketCount -= bucketSize;
// check if bucket is empty
if (bucketSize == 0L) {
latestEmptyBucketTime = pruneBucket * bucketSpan * MS_IN_SECOND;
++emptyBucketCount;
}
}
}
public long getBucketCount() {
return bucketCount;
}
public long getEmptyBucketCount() {
return emptyBucketCount;
}
public Date getLatestEmptyBucketTime() {
return latestEmptyBucketTime > 0 ? new Date(latestEmptyBucketTime) : null;
}
public long getSparseBucketCount() {
return sparseBucketCount;
}
public Date getLatestSparseBucketTime() {
return latestSparseBucketTime > 0 ? new Date(latestSparseBucketTime) : null;
}
/**
* Resets counts,
*
* Note: This does not reset the inner state for e.g. sparse bucket
* detection.
*
*/
public void resetCounts() {
bucketCount = 0;
emptyBucketCount = 0;
sparseBucketCount = 0;
}
}

View File

@ -0,0 +1,132 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.process.diagnostics;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.elasticsearch.xpack.core.ml.utils.Intervals;
/**
* A moving window of buckets that allow keeping
* track of some statistics like the bucket count,
* empty or sparse buckets, etc.
*
* The counts are stored in an array that functions as a
* circular buffer. When time is advanced, all buckets
* out of the window are flushed.
*/
class BucketDiagnostics {
private static final int MIN_BUCKETS = 10;
private final long bucketSpanMs;
private final long latencyMs;
private final int maxSize;
private final long[] buckets;
private long movingBucketCount = 0;
private long latestBucketStartMs = -1;
private int latestBucketIndex;
private long earliestBucketStartMs = -1;
private int earliestBucketIndex;
private long latestFlushedBucketStartMs = -1;
private final BucketFlushListener bucketFlushListener;
BucketDiagnostics(Job job, BucketFlushListener bucketFlushListener) {
bucketSpanMs = job.getAnalysisConfig().getBucketSpan().millis();
latencyMs = job.getAnalysisConfig().getLatency() == null ? 0 : job.getAnalysisConfig().getLatency().millis();
maxSize = Math.max((int) (Intervals.alignToCeil(latencyMs, bucketSpanMs) / bucketSpanMs), MIN_BUCKETS);
buckets = new long[maxSize];
this.bucketFlushListener = bucketFlushListener;
}
void addRecord(long recordTimestampMs) {
long bucketStartMs = Intervals.alignToFloor(recordTimestampMs, bucketSpanMs);
// Initialize earliest/latest times
if (latestBucketStartMs < 0) {
latestBucketStartMs = bucketStartMs;
earliestBucketStartMs = bucketStartMs;
}
advanceTime(bucketStartMs);
addToBucket(bucketStartMs);
}
private void advanceTime(long bucketStartMs) {
while (bucketStartMs > latestBucketStartMs) {
int flushBucketIndex = (latestBucketIndex + 1) % maxSize;
if (flushBucketIndex == earliestBucketIndex) {
flush(flushBucketIndex);
movingBucketCount -= buckets[flushBucketIndex];
earliestBucketStartMs += bucketSpanMs;
earliestBucketIndex = (earliestBucketIndex + 1) % maxSize;
}
buckets[flushBucketIndex] = 0L;
latestBucketStartMs += bucketSpanMs;
latestBucketIndex = flushBucketIndex;
}
}
private void addToBucket(long bucketStartMs) {
int offsetToLatest = (int) ((bucketStartMs - latestBucketStartMs) / bucketSpanMs);
int bucketIndex = (latestBucketIndex + offsetToLatest) % maxSize;
if (bucketIndex < 0) {
bucketIndex = maxSize + bucketIndex;
}
++buckets[bucketIndex];
++movingBucketCount;
if (bucketStartMs < earliestBucketStartMs) {
earliestBucketStartMs = bucketStartMs;
earliestBucketIndex = bucketIndex;
}
}
private void flush(int bucketIndex) {
long bucketStartMs = getTimestampMs(bucketIndex);
if (bucketStartMs > latestFlushedBucketStartMs) {
bucketFlushListener.onBucketFlush(bucketStartMs, buckets[bucketIndex]);
latestFlushedBucketStartMs = bucketStartMs;
}
}
private long getTimestampMs(int bucketIndex) {
int offsetToLatest = latestBucketIndex - bucketIndex;
if (offsetToLatest < 0) {
offsetToLatest = maxSize + offsetToLatest;
}
return latestBucketStartMs - offsetToLatest * bucketSpanMs;
}
void flush() {
if (latestBucketStartMs < 0) {
return;
}
int bucketIndex = earliestBucketIndex;
while (bucketIndex != latestBucketIndex) {
flush(bucketIndex);
bucketIndex = (bucketIndex + 1) % maxSize;
}
}
double averageBucketCount() {
return (double) movingBucketCount / size();
}
private int size() {
if (latestBucketStartMs < 0) {
return 0;
}
return (int) ((latestBucketStartMs - earliestBucketStartMs) / bucketSpanMs) + 1;
}
interface BucketFlushListener {
void onBucketFlush(long bucketStartMs, long bucketCounts);
}
}

View File

@ -0,0 +1,113 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ml.job.process.diagnostics;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.xpack.core.ml.job.config.Job;
import java.util.Date;
public class DataStreamDiagnostics {
/**
* Threshold to report potential sparsity problems.
*
* Sparsity score is calculated: log(average) - log(current)
*
* If score is above the threshold, bucket is reported as sparse bucket.
*/
private static final int DATA_SPARSITY_THRESHOLD = 2;
private static final Logger LOGGER = Loggers.getLogger(DataStreamDiagnostics.class);
private final BucketDiagnostics bucketDiagnostics;
private long bucketCount = 0;
private long emptyBucketCount = 0;
private long latestEmptyBucketTime = -1;
private long sparseBucketCount = 0;
private long latestSparseBucketTime = -1;
public DataStreamDiagnostics(Job job) {
bucketDiagnostics = new BucketDiagnostics(job, createBucketFlushListener());
}
private BucketDiagnostics.BucketFlushListener createBucketFlushListener() {
return (flushedBucketStartMs, flushedBucketCount) -> {
++bucketCount;
if (flushedBucketCount == 0) {
++emptyBucketCount;
latestEmptyBucketTime = flushedBucketStartMs;
} else {
// simplistic way to calculate data sparsity, just take the log and
// check the difference
double averageBucketSize = bucketDiagnostics.averageBucketCount();
double logAverageBucketSize = Math.log(averageBucketSize);
double logBucketSize = Math.log(flushedBucketCount);
double sparsityScore = logAverageBucketSize - logBucketSize;
if (sparsityScore > DATA_SPARSITY_THRESHOLD) {
LOGGER.debug("Sparse bucket {}, this bucket: {} average: {}, sparsity score: {}", flushedBucketStartMs,
flushedBucketCount, averageBucketSize, sparsityScore);
++sparseBucketCount;
latestSparseBucketTime = flushedBucketStartMs;
}
}
};
}
/**
* Check record
*
* @param recordTimestampInMs
* The record timestamp in milliseconds since epoch
*/
public void checkRecord(long recordTimestampInMs) {
bucketDiagnostics.addRecord(recordTimestampInMs);
}
/**
* Flush all counters, should be called at the end of the data stream
*/
public void flush() {
// flush all we know
bucketDiagnostics.flush();
}
public long getBucketCount() {
return bucketCount;
}
public long getEmptyBucketCount() {
return emptyBucketCount;
}
public Date getLatestEmptyBucketTime() {
return latestEmptyBucketTime > 0 ? new Date(latestEmptyBucketTime) : null;
}
public long getSparseBucketCount() {
return sparseBucketCount;
}
public Date getLatestSparseBucketTime() {
return latestSparseBucketTime > 0 ? new Date(latestSparseBucketTime) : null;
}
/**
* Resets counts,
*
* Note: This does not reset the inner state for e.g. sparse bucket
* detection.
*
*/
public void resetCounts() {
bucketCount = 0;
emptyBucketCount = 0;
sparseBucketCount = 0;
}
}

View File

@ -3,7 +3,7 @@
* or more contributor license agreements. Licensed under the Elastic License; * or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License. * you may not use this file except in compliance with the Elastic License.
*/ */
package org.elasticsearch.xpack.ml.job.process; package org.elasticsearch.xpack.ml.job.process.diagnostics;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -13,7 +13,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector;
import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.Job;
import org.junit.Before; import org.junit.Before;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Date; import java.util.Date;
@ -21,9 +20,9 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
private static final long BUCKET_SPAN = 60000; private static final long BUCKET_SPAN = 60000;
private Job job; private Job job;
@Before @Before
public void setUpMocks() throws IOException { public void setUpMocks() {
AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build())); AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build()));
acBuilder.setBucketSpan(TimeValue.timeValueMillis(BUCKET_SPAN)); acBuilder.setBucketSpan(TimeValue.timeValueMillis(BUCKET_SPAN));
acBuilder.setLatency(TimeValue.ZERO); acBuilder.setLatency(TimeValue.ZERO);
@ -32,7 +31,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
Job.Builder builder = new Job.Builder("job_id"); Job.Builder builder = new Job.Builder("job_id");
builder.setAnalysisConfig(acBuilder); builder.setAnalysisConfig(acBuilder);
builder.setDataDescription(new DataDescription.Builder()); builder.setDataDescription(new DataDescription.Builder());
job = builder.build(new Date()); job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), null);
} }
public void testIncompleteBuckets() { public void testIncompleteBuckets() {
@ -80,6 +79,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
assertEquals(null, d.getLatestSparseBucketTime()); assertEquals(null, d.getLatestSparseBucketTime());
assertEquals(new Date(BUCKET_SPAN * 2), d.getLatestEmptyBucketTime()); assertEquals(new Date(BUCKET_SPAN * 2), d.getLatestEmptyBucketTime());
} }
public void testSimple() { public void testSimple() {
DataStreamDiagnostics d = new DataStreamDiagnostics(job); DataStreamDiagnostics d = new DataStreamDiagnostics(job);
@ -102,6 +102,58 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
assertEquals(null, d.getLatestEmptyBucketTime()); assertEquals(null, d.getLatestEmptyBucketTime());
} }
public void testSimpleReverse() {
DataStreamDiagnostics d = new DataStreamDiagnostics(job);
d.checkRecord(610000);
d.checkRecord(550000);
d.checkRecord(490000);
d.checkRecord(430000);
d.checkRecord(370000);
d.checkRecord(310000);
d.checkRecord(250000);
d.checkRecord(190000);
d.checkRecord(130000);
d.checkRecord(70000);
d.flush();
assertEquals(9, d.getBucketCount());
assertEquals(0, d.getEmptyBucketCount());
assertEquals(0, d.getSparseBucketCount());
assertEquals(null, d.getLatestSparseBucketTime());
assertEquals(null, d.getLatestEmptyBucketTime());
}
public void testWithLatencyLessThanTenBuckets() {
job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), TimeValue.timeValueMillis(3 * BUCKET_SPAN));
DataStreamDiagnostics d = new DataStreamDiagnostics(job);
long timestamp = 70000;
while (timestamp < 70000 + 20 * BUCKET_SPAN) {
sendManyDataPoints(d, timestamp - BUCKET_SPAN, timestamp + timestamp, 100);
timestamp += BUCKET_SPAN;
}
assertEquals(10, d.getBucketCount());
d.flush();
assertEquals(19, d.getBucketCount());
}
public void testWithLatencyGreaterThanTenBuckets() {
job = createJob(TimeValue.timeValueMillis(BUCKET_SPAN), TimeValue.timeValueMillis(13 * BUCKET_SPAN + 10000));
DataStreamDiagnostics d = new DataStreamDiagnostics(job);
long timestamp = 70000;
while (timestamp < 70000 + 20 * BUCKET_SPAN) {
sendManyDataPoints(d, timestamp - BUCKET_SPAN, timestamp + timestamp, 100);
timestamp += BUCKET_SPAN;
}
assertEquals(6, d.getBucketCount());
d.flush();
assertEquals(19, d.getBucketCount());
}
public void testEmptyBuckets() { public void testEmptyBuckets() {
DataStreamDiagnostics d = new DataStreamDiagnostics(job); DataStreamDiagnostics d = new DataStreamDiagnostics(job);
@ -280,7 +332,7 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
/** /**
* Send signals, make a longer period of sparse signals, then go up again * Send signals, make a longer period of sparse signals, then go up again
* *
* The number of sparse buckets should not be to much, it could be normal. * The number of sparse buckets should not be to much, it could be normal.
*/ */
public void testSparseBucketsLongerPeriod() { public void testSparseBucketsLongerPeriod() {
@ -307,6 +359,20 @@ public class DataStreamDiagnosticsTests extends ESTestCase {
assertEquals(null, d.getLatestEmptyBucketTime()); assertEquals(null, d.getLatestEmptyBucketTime());
} }
private static Job createJob(TimeValue bucketSpan, TimeValue latency) {
AnalysisConfig.Builder acBuilder = new AnalysisConfig.Builder(Arrays.asList(new Detector.Builder("metric", "field").build()));
acBuilder.setBucketSpan(bucketSpan);
if (latency != null) {
acBuilder.setLatency(latency);
}
acBuilder.setDetectors(Arrays.asList(new Detector.Builder("metric", "field").build()));
Job.Builder builder = new Job.Builder("job_id");
builder.setAnalysisConfig(acBuilder);
builder.setDataDescription(new DataDescription.Builder());
return builder.build(new Date());
}
public void testFlushAfterZeroRecords() { public void testFlushAfterZeroRecords() {
DataStreamDiagnostics d = new DataStreamDiagnostics(job); DataStreamDiagnostics d = new DataStreamDiagnostics(job);
d.flush(); d.flush();

View File

@ -17,15 +17,14 @@ import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.xpack.core.XPackField;
import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.XPackSettings;
import org.elasticsearch.xpack.core.security.authc.support.Hasher; import org.elasticsearch.xpack.core.security.authc.support.Hasher;
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore;
import org.elasticsearch.xpack.core.security.support.Validation; import org.elasticsearch.xpack.core.security.support.Validation;
import org.elasticsearch.xpack.core.security.support.Validation.Users; import org.elasticsearch.xpack.core.security.support.Validation.Users;
import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore; import org.elasticsearch.xpack.security.authc.file.FileUserPasswdStore;
import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore; import org.elasticsearch.xpack.security.authc.file.FileUserRolesStore;
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
import org.elasticsearch.xpack.security.support.FileAttributesChecker; import org.elasticsearch.xpack.security.support.FileAttributesChecker;
import java.nio.file.Files; import java.nio.file.Files;
@ -47,7 +46,7 @@ public class UsersTool extends LoggingAwareMultiCommand {
} }
UsersTool() { UsersTool() {
super("Manages elasticsearch native users"); super("Manages elasticsearch file users");
subcommands.put("useradd", newAddUserCommand()); subcommands.put("useradd", newAddUserCommand());
subcommands.put("userdel", newDeleteUserCommand()); subcommands.put("userdel", newDeleteUserCommand());
subcommands.put("passwd", newPasswordCommand()); subcommands.put("passwd", newPasswordCommand());
@ -82,7 +81,7 @@ public class UsersTool extends LoggingAwareMultiCommand {
private final OptionSpec<String> arguments; private final OptionSpec<String> arguments;
AddUserCommand() { AddUserCommand() {
super("Adds a native user"); super("Adds a file user");
this.passwordOption = parser.acceptsAll(Arrays.asList("p", "password"), this.passwordOption = parser.acceptsAll(Arrays.asList("p", "password"),
"The user password") "The user password")
@ -96,11 +95,8 @@ public class UsersTool extends LoggingAwareMultiCommand {
@Override @Override
protected void printAdditionalHelp(Terminal terminal) { protected void printAdditionalHelp(Terminal terminal) {
terminal.println("Adds a file based user to elasticsearch (via internal realm). The user will"); terminal.println("Adds a file based user to elasticsearch (via internal realm). The user will");
terminal.println("be added to the users file and its roles will be added to the"); terminal.println("be added to the \"users\" file and its roles will be added to the");
terminal.println("users_roles file. If non-default files are used (different file"); terminal.println("\"users_roles\" file in the elasticsearch config directory.");
terminal.println("locations are configured in elasticsearch.yml) the appropriate files");
terminal.println("will be resolved from the settings and the user and its roles will be");
terminal.println("added to them.");
terminal.println(""); terminal.println("");
} }
@ -123,7 +119,7 @@ public class UsersTool extends LoggingAwareMultiCommand {
Map<String, char[]> users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings()); Map<String, char[]> users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings());
if (users == null) { if (users == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + passwordFile + "] is missing");
} }
if (users.containsKey(username)) { if (users.containsKey(username)) {
throw new UserException(ExitCodes.CODE_ERROR, "User [" + username + "] already exists"); throw new UserException(ExitCodes.CODE_ERROR, "User [" + username + "] already exists");
@ -155,11 +151,8 @@ public class UsersTool extends LoggingAwareMultiCommand {
@Override @Override
protected void printAdditionalHelp(Terminal terminal) { protected void printAdditionalHelp(Terminal terminal) {
terminal.println("Removes an existing file based user from elasticsearch. The user will be"); terminal.println("Removes an existing file based user from elasticsearch. The user will be");
terminal.println("removed from the users file and its roles will be removed from the"); terminal.println("removed from the \"users\" file and its roles will be removed from the");
terminal.println("users_roles file. If non-default files are used (different file"); terminal.println("\"users_roles\" file in the elasticsearch config directory.");
terminal.println("locations are configured in elasticsearch.yml) the appropriate files");
terminal.println("will be resolved from the settings and the user and its roles will be");
terminal.println("removed from them.");
terminal.println(""); terminal.println("");
} }
@ -173,7 +166,7 @@ public class UsersTool extends LoggingAwareMultiCommand {
Map<String, char[]> users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings()); Map<String, char[]> users = FileUserPasswdStore.parseFile(passwordFile, null, env.settings());
if (users == null) { if (users == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + passwordFile + "] is missing");
} }
if (users.containsKey(username) == false) { if (users.containsKey(username) == false) {
throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist");
@ -213,12 +206,10 @@ public class UsersTool extends LoggingAwareMultiCommand {
@Override @Override
protected void printAdditionalHelp(Terminal terminal) { protected void printAdditionalHelp(Terminal terminal) {
terminal.println("The passwd command changes passwords for files based users. The tool"); terminal.println("The passwd command changes passwords for file based users. The tool");
terminal.println("prompts twice for a replacement password. The second entry is compared"); terminal.println("prompts twice for a replacement password. The second entry is compared");
terminal.println("against the first and both are required to match in order for the"); terminal.println("against the first and both are required to match in order for the");
terminal.println("password to be changed. If non-default users file is used (a different"); terminal.println("password to be changed.");
terminal.println("file location is configured in elasticsearch.yml) the appropriate file");
terminal.println("will be resolved from the settings.");
terminal.println(""); terminal.println("");
} }
@ -232,7 +223,7 @@ public class UsersTool extends LoggingAwareMultiCommand {
FileAttributesChecker attributesChecker = new FileAttributesChecker(file); FileAttributesChecker attributesChecker = new FileAttributesChecker(file);
Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings())); Map<String, char[]> users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings()));
if (users == null) { if (users == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + file + "] is missing");
} }
if (users.containsKey(username) == false) { if (users.containsKey(username) == false) {
throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist");
@ -345,19 +336,19 @@ public class UsersTool extends LoggingAwareMultiCommand {
Path userRolesFilePath = FileUserRolesStore.resolveFile(env); Path userRolesFilePath = FileUserRolesStore.resolveFile(env);
Map<String, String[]> userRoles = FileUserRolesStore.parseFile(userRolesFilePath, null); Map<String, String[]> userRoles = FileUserRolesStore.parseFile(userRolesFilePath, null);
if (userRoles == null) { if (userRoles == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [users_roles] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + userRolesFilePath + "] is missing");
} }
Path userFilePath = FileUserPasswdStore.resolveFile(env); Path userFilePath = FileUserPasswdStore.resolveFile(env);
Map<String, char[]> users = FileUserPasswdStore.parseFile(userFilePath, null, env.settings()); Map<String, char[]> users = FileUserPasswdStore.parseFile(userFilePath, null, env.settings());
if (users == null) { if (users == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [users] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + userFilePath + "] is missing");
} }
Path rolesFilePath = FileRolesStore.resolveFile(env); Path rolesFilePath = FileRolesStore.resolveFile(env);
Set<String> knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFilePath, null), ReservedRolesStore.names()); Set<String> knownRoles = Sets.union(FileRolesStore.parseFileForRoleNames(rolesFilePath, null), ReservedRolesStore.names());
if (knownRoles == null) { if (knownRoles == null) {
throw new UserException(ExitCodes.CONFIG, "Configuration file [roles.xml] is missing"); throw new UserException(ExitCodes.CONFIG, "Configuration file [" + rolesFilePath + "] is missing");
} }
if (username != null) { if (username != null) {

View File

@ -56,6 +56,11 @@ public class CliRepl {
multiLine.setLength(0); multiLine.setLength(0);
} }
// Skip empty commands
if (line.isEmpty()) {
continue;
}
// special case to handle exit // special case to handle exit
if (isExit(line)) { if (isExit(line)) {
cliTerminal.line().em("Bye!").ln(); cliTerminal.line().em("Bye!").ln();

View File

@ -38,6 +38,28 @@ public class CliReplTests extends ESTestCase {
verifyNoMoreInteractions(mockCommand, mockSession); verifyNoMoreInteractions(mockCommand, mockSession);
} }
/**
* Test that empty commands are skipped. This includes commands that are
* just new lines.
*/
public void testEmptyNotSent() {
CliTerminal cliTerminal = new TestTerminal(
";",
"",
"",
";",
"exit;"
);
CliSession mockSession = mock(CliSession.class);
CliCommand mockCommand = mock(CliCommand.class);
CliRepl cli = new CliRepl(cliTerminal, mockSession, mockCommand);
cli.execute();
verify(mockCommand, times(1)).handle(cliTerminal, mockSession, "logo");
verifyNoMoreInteractions(mockSession, mockCommand);
}
public void testFatalCliExceptionHandling() throws Exception { public void testFatalCliExceptionHandling() throws Exception {
CliTerminal cliTerminal = new TestTerminal( CliTerminal cliTerminal = new TestTerminal(

View File

@ -47,7 +47,7 @@ public class WatcherMetaDataSerializationTests extends ESTestCase {
boolean manuallyStopped = randomBoolean(); boolean manuallyStopped = randomBoolean();
WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped); WatcherMetaData watcherMetaData = new WatcherMetaData(manuallyStopped);
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("repo", "fs", Settings.EMPTY);
RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(repositoryMetaData); RepositoriesMetaData repositoriesMetaData = new RepositoriesMetaData(Collections.singletonList(repositoryMetaData));
final MetaData.Builder metaDataBuilder = MetaData.builder(); final MetaData.Builder metaDataBuilder = MetaData.builder();
if (randomBoolean()) { // random order of insertion if (randomBoolean()) { // random order of insertion
metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData); metaDataBuilder.putCustom(watcherMetaData.getWriteableName(), watcherMetaData);

View File

@ -13,6 +13,7 @@ integTestCluster {
setting 'xpack.watcher.enabled', 'false' setting 'xpack.watcher.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false' setting 'xpack.monitoring.enabled', 'false'
setting 'xpack.ml.enabled', 'false' setting 'xpack.ml.enabled', 'false'
setting 'logger.org.elasticsearch.xpack.security.authc', 'TRACE'
extraConfigFile 'roles.yml', 'roles.yml' extraConfigFile 'roles.yml', 'roles.yml'
setupCommand 'setup-test-user', 'bin/elasticsearch-users', 'useradd', 'test-user', '-p', 'x-pack-test-password', '-r', 'test' setupCommand 'setup-test-user', 'bin/elasticsearch-users', 'useradd', 'test-user', '-p', 'x-pack-test-password', '-r', 'test'
setupCommand 'setup-super-user', 'bin/elasticsearch-users', 'useradd', 'super-user', '-p', 'x-pack-super-password', '-r', 'superuser' setupCommand 'setup-super-user', 'bin/elasticsearch-users', 'useradd', 'super-user', '-p', 'x-pack-super-password', '-r', 'superuser'

Some files were not shown because too many files have changed in this diff Show More