Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
976935967b
|
@ -34,6 +34,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
|
@ -269,6 +270,28 @@ public final class IndicesClient {
|
|||
listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/** Initiate a synced flush manually using the synced flush API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html">
|
||||
* Synced flush API on elastic.co</a>
|
||||
*/
|
||||
public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced,
|
||||
SyncedFlushResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously initiate a synced flush manually using the synced flush API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html">
|
||||
* Synced flush API on elastic.co</a>
|
||||
*/
|
||||
public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, ActionListener<SyncedFlushResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced,
|
||||
SyncedFlushResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Retrieve the settings of one or more indices
|
||||
* <p>
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
|||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
||||
|
@ -41,6 +42,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
|
@ -211,6 +213,14 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request flushSynced(SyncedFlushRequest syncedFlushRequest) {
|
||||
String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush/synced"));
|
||||
Params parameters = new Params(request);
|
||||
parameters.withIndicesOptions(syncedFlushRequest.indicesOptions());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request forceMerge(ForceMergeRequest forceMergeRequest) {
|
||||
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge"));
|
||||
|
@ -738,6 +748,19 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot")
|
||||
.addPathPart(verifyRepositoryRequest.name())
|
||||
.addPathPartAsIs("_verify")
|
||||
.build();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout());
|
||||
parameters.withTimeout(verifyRepositoryRequest.timeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
|
|||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -116,4 +118,28 @@ public final class SnapshotClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
|
||||
DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, Header... headers)
|
||||
throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository,
|
||||
VerifyRepositoryResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously verifies a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest,
|
||||
ActionListener<VerifyRepositoryResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository,
|
||||
VerifyRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,344 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment {
|
||||
|
||||
public static final String SHARDS_FIELD = "_shards";
|
||||
|
||||
private ShardCounts totalCounts;
|
||||
private Map<String, IndexResult> indexResults;
|
||||
|
||||
SyncedFlushResponse(ShardCounts totalCounts, Map<String, IndexResult> indexResults) {
|
||||
this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed);
|
||||
this.indexResults = Collections.unmodifiableMap(indexResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The total number of shard copies that were processed across all indexes
|
||||
*/
|
||||
public int totalShards() {
|
||||
return totalCounts.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of successful shard copies that were processed across all indexes
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return totalCounts.successful;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of failed shard copies that were processed across all indexes
|
||||
*/
|
||||
public int failedShards() {
|
||||
return totalCounts.failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A map of results for each index where the keys of the map are the index names
|
||||
* and the values are the results encapsulated in {@link IndexResult}.
|
||||
*/
|
||||
public Map<String, IndexResult> getIndexResults() {
|
||||
return indexResults;
|
||||
}
|
||||
|
||||
ShardCounts getShardCounts() {
|
||||
return totalCounts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(SHARDS_FIELD);
|
||||
totalCounts.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
for (Map.Entry<String, IndexResult> entry: indexResults.entrySet()) {
|
||||
String indexName = entry.getKey();
|
||||
IndexResult indexResult = entry.getValue();
|
||||
builder.startObject(indexName);
|
||||
indexResult.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
ShardCounts totalCounts = null;
|
||||
Map<String, IndexResult> indexResults = new HashMap<>();
|
||||
XContentLocation startLoc = parser.getTokenLocation();
|
||||
while (parser.nextToken().equals(Token.FIELD_NAME)) {
|
||||
if (parser.currentName().equals(SHARDS_FIELD)) {
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
totalCounts = ShardCounts.fromXContent(parser);
|
||||
} else {
|
||||
String indexName = parser.currentName();
|
||||
IndexResult indexResult = IndexResult.fromXContent(parser);
|
||||
indexResults.put(indexName, indexResult);
|
||||
}
|
||||
}
|
||||
if (totalCounts != null) {
|
||||
return new SyncedFlushResponse(totalCounts, indexResults);
|
||||
} else {
|
||||
throw new ParsingException(
|
||||
startLoc,
|
||||
"Unable to reconstruct object. Total counts for shards couldn't be parsed."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulates the number of total successful and failed shard copies
|
||||
*/
|
||||
public static final class ShardCounts implements ToXContentFragment {
|
||||
|
||||
public static final String TOTAL_FIELD = "total";
|
||||
public static final String SUCCESSFUL_FIELD = "successful";
|
||||
public static final String FAILED_FIELD = "failed";
|
||||
|
||||
private static final ConstructingObjectParser<ShardCounts, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"shardcounts",
|
||||
a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
|
||||
}
|
||||
|
||||
private int total;
|
||||
private int successful;
|
||||
private int failed;
|
||||
|
||||
|
||||
ShardCounts(int total, int successful, int failed) {
|
||||
this.total = total;
|
||||
this.successful = successful;
|
||||
this.failed = failed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(TOTAL_FIELD, total);
|
||||
builder.field(SUCCESSFUL_FIELD, successful);
|
||||
builder.field(FAILED_FIELD, failed);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static ShardCounts fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
public boolean equals(ShardCounts other) {
|
||||
if (other != null) {
|
||||
return
|
||||
other.total == this.total &&
|
||||
other.successful == this.successful &&
|
||||
other.failed == this.failed;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Description for the flush/synced results for a particular index.
|
||||
* This includes total, successful and failed copies along with failure description for each failed copy.
|
||||
*/
|
||||
public static final class IndexResult implements ToXContentFragment {
|
||||
|
||||
public static final String TOTAL_FIELD = "total";
|
||||
public static final String SUCCESSFUL_FIELD = "successful";
|
||||
public static final String FAILED_FIELD = "failed";
|
||||
public static final String FAILURES_FIELD = "failures";
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<IndexResult, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"indexresult",
|
||||
a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List<ShardFailure>)a[3])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
|
||||
PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD));
|
||||
}
|
||||
|
||||
private ShardCounts counts;
|
||||
private List<ShardFailure> failures;
|
||||
|
||||
IndexResult(int total, int successful, int failed, List<ShardFailure> failures) {
|
||||
counts = new ShardCounts(total, successful, failed);
|
||||
if (failures != null) {
|
||||
this.failures = Collections.unmodifiableList(failures);
|
||||
} else {
|
||||
this.failures = Collections.unmodifiableList(new ArrayList<>());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The total number of shard copies that were processed for this index.
|
||||
*/
|
||||
public int totalShards() {
|
||||
return counts.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of successful shard copies that were processed for this index.
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return counts.successful;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of failed shard copies that were processed for this index.
|
||||
*/
|
||||
public int failedShards() {
|
||||
return counts.failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index.
|
||||
*/
|
||||
public List<ShardFailure> failures() {
|
||||
return failures;
|
||||
}
|
||||
|
||||
ShardCounts getShardCounts() {
|
||||
return counts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
counts.toXContent(builder, params);
|
||||
if (failures.size() > 0) {
|
||||
builder.startArray(FAILURES_FIELD);
|
||||
for (ShardFailure failure : failures) {
|
||||
failure.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static IndexResult fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of a failed shard copy for an index.
|
||||
*/
|
||||
public static final class ShardFailure implements ToXContentFragment {
|
||||
|
||||
public static String SHARD_ID_FIELD = "shard";
|
||||
public static String FAILURE_REASON_FIELD = "reason";
|
||||
public static String ROUTING_FIELD = "routing";
|
||||
|
||||
private int shardId;
|
||||
private String failureReason;
|
||||
private Map<String, Object> routing;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static ConstructingObjectParser<ShardFailure, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"shardfailure",
|
||||
a -> new ShardFailure((Integer)a[0], (String)a[1], (Map<String, Object>)a[2])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD));
|
||||
PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD));
|
||||
PARSER.declareObject(
|
||||
optionalConstructorArg(),
|
||||
(parser, c) -> parser.map(),
|
||||
new ParseField(ROUTING_FIELD)
|
||||
);
|
||||
}
|
||||
|
||||
ShardFailure(int shardId, String failureReason, Map<String, Object> routing) {
|
||||
this.shardId = shardId;
|
||||
this.failureReason = failureReason;
|
||||
if (routing != null) {
|
||||
this.routing = Collections.unmodifiableMap(routing);
|
||||
} else {
|
||||
this.routing = Collections.unmodifiableMap(new HashMap<>());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Id of the shard whose copy failed
|
||||
*/
|
||||
public int getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Reason for failure of the shard copy
|
||||
*/
|
||||
public String getFailureReason() {
|
||||
return failureReason;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Additional information about the failure.
|
||||
*/
|
||||
public Map<String, Object> getRouting() {
|
||||
return routing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(SHARD_ID_FIELD, shardId);
|
||||
builder.field(FAILURE_REASON_FIELD, failureReason);
|
||||
if (routing.size() > 0) {
|
||||
builder.field(ROUTING_FIELD, routing);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static ShardFailure fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
|
@ -563,6 +564,39 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSyncedFlush() throws IOException {
|
||||
{
|
||||
String index = "index";
|
||||
Settings settings = Settings.builder()
|
||||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
createIndex(index, settings);
|
||||
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index);
|
||||
SyncedFlushResponse flushResponse =
|
||||
execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync);
|
||||
assertThat(flushResponse.totalShards(), equalTo(1));
|
||||
assertThat(flushResponse.successfulShards(), equalTo(1));
|
||||
assertThat(flushResponse.failedShards(), equalTo(0));
|
||||
}
|
||||
{
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
() ->
|
||||
execute(
|
||||
syncedFlushRequest,
|
||||
highLevelClient().indices()::flushSynced,
|
||||
highLevelClient().indices()::flushSyncedAsync
|
||||
)
|
||||
);
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testClearCache() throws IOException {
|
||||
{
|
||||
String index = "index";
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
|||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
|
@ -43,6 +44,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
|
@ -645,6 +647,29 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
}
|
||||
|
||||
public void testSyncedFlush() {
|
||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
|
||||
SyncedFlushRequest syncedFlushRequest;
|
||||
if (randomBoolean()) {
|
||||
syncedFlushRequest = new SyncedFlushRequest(indices);
|
||||
} else {
|
||||
syncedFlushRequest = new SyncedFlushRequest();
|
||||
syncedFlushRequest.indices(indices);
|
||||
}
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, expectedParams);
|
||||
Request request = RequestConverters.flushSynced(syncedFlushRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
}
|
||||
endpoint.add("_flush/synced");
|
||||
assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
|
||||
assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
}
|
||||
|
||||
public void testForceMerge() {
|
||||
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
|
||||
ForceMergeRequest forceMergeRequest;
|
||||
|
@ -1608,6 +1633,21 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testVerifyRepository() {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
String endpoint = "/_snapshot/" + repository + "/_verify";
|
||||
|
||||
VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository);
|
||||
setRandomMasterTimeout(verifyRepositoryRequest, expectedParams);
|
||||
setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
|
||||
Request request = RequestConverters.verifyRepository(verifyRepositoryRequest);
|
||||
assertThat(endpoint, equalTo(request.getEndpoint()));
|
||||
assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
}
|
||||
|
||||
public void testPutTemplateRequest() throws Exception {
|
||||
Map<String, String> names = new HashMap<>();
|
||||
names.put("log", "log");
|
||||
|
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
|
|||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -86,10 +88,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
String repository = "test";
|
||||
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
|
||||
|
||||
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository,
|
||||
Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
|
||||
assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest();
|
||||
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
|
||||
|
@ -102,4 +101,14 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
assertTrue(deleteResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testVerifyRepository() throws IOException {
|
||||
PutRepositoryResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}");
|
||||
assertTrue(putRepositoryResponse.isAcknowledged());
|
||||
|
||||
VerifyRepositoryRequest request = new VerifyRepositoryRequest("test");
|
||||
VerifyRepositoryResponse response = execute(request, highLevelClient().snapshot()::verifyRepository,
|
||||
highLevelClient().snapshot()::verifyRepositoryAsync);
|
||||
assertThat(response.getNodes().size(), equalTo(1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.ObjectIntMap;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class SyncedFlushResponseTests extends ESTestCase {
|
||||
|
||||
public void testXContentSerialization() throws IOException {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
TestPlan plan = createTestPlan();
|
||||
|
||||
XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
assertNotNull(plan.result);
|
||||
serverResponsebuilder.startObject();
|
||||
plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS);
|
||||
serverResponsebuilder.endObject();
|
||||
XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
assertNotNull(plan.result);
|
||||
clientResponsebuilder.startObject();
|
||||
plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS);
|
||||
clientResponsebuilder.endObject();
|
||||
Map<String, Object> serverContentMap = convertFailureListToSet(
|
||||
serverResponsebuilder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
LoggingDeprecationHandler.INSTANCE,
|
||||
BytesReference.bytes(serverResponsebuilder).streamInput()
|
||||
).map()
|
||||
);
|
||||
Map<String, Object> clientContentMap = convertFailureListToSet(
|
||||
clientResponsebuilder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
LoggingDeprecationHandler.INSTANCE,
|
||||
BytesReference.bytes(clientResponsebuilder).streamInput()
|
||||
)
|
||||
.map()
|
||||
);
|
||||
assertEquals(serverContentMap, clientContentMap);
|
||||
}
|
||||
|
||||
public void testXContentDeserialization() throws IOException {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
TestPlan plan = createTestPlan();
|
||||
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
|
||||
builder.startObject();
|
||||
plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
XContentParser parser = builder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()
|
||||
);
|
||||
SyncedFlushResponse originalResponse = plan.clientResult;
|
||||
SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser);
|
||||
assertNotNull(parsedResponse);
|
||||
assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts());
|
||||
for (Map.Entry<String, SyncedFlushResponse.IndexResult> entry: originalResponse.getIndexResults().entrySet()) {
|
||||
String index = entry.getKey();
|
||||
SyncedFlushResponse.IndexResult responseResult = entry.getValue();
|
||||
SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index);
|
||||
assertNotNull(responseResult);
|
||||
assertNotNull(parsedResult);
|
||||
assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts());
|
||||
assertEquals(responseResult.failures().size(), parsedResult.failures().size());
|
||||
for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) {
|
||||
assertTrue(containsFailure(parsedResult.failures(), responseShardFailure));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class TestPlan {
|
||||
SyncedFlushResponse.ShardCounts totalCounts;
|
||||
Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
|
||||
ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
|
||||
org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result;
|
||||
SyncedFlushResponse clientResult;
|
||||
}
|
||||
|
||||
TestPlan createTestPlan() throws IOException {
|
||||
final TestPlan testPlan = new TestPlan();
|
||||
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
|
||||
Map<String, SyncedFlushResponse.IndexResult> indexResults = new HashMap<>();
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final int indexCount = randomIntBetween(1, 10);
|
||||
int totalShards = 0;
|
||||
int totalSuccessful = 0;
|
||||
int totalFailed = 0;
|
||||
for (int i = 0; i < indexCount; i++) {
|
||||
final String index = "index_" + i;
|
||||
int shards = randomIntBetween(1, 4);
|
||||
int replicas = randomIntBetween(0, 2);
|
||||
int successful = 0;
|
||||
int failed = 0;
|
||||
int failures = 0;
|
||||
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
|
||||
List<SyncedFlushResponse.ShardFailure> shardFailures = new ArrayList<>();
|
||||
for (int shard = 0; shard < shards; shard++) {
|
||||
final ShardId shardId = new ShardId(index, "_na_", shard);
|
||||
if (randomInt(5) < 2) {
|
||||
// total shard failure
|
||||
failed += replicas + 1;
|
||||
failures++;
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
|
||||
shardFailures.add(
|
||||
new SyncedFlushResponse.ShardFailure(
|
||||
shardId.id(),
|
||||
"simulated total failure",
|
||||
new HashMap<>()
|
||||
)
|
||||
);
|
||||
} else {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
|
||||
for (int copy = 0; copy < replicas + 1; copy++) {
|
||||
final ShardRouting shardRouting =
|
||||
TestShardRouting.newShardRouting(
|
||||
index, shard, "node_" + shardId + "_" + copy, null,
|
||||
copy == 0, ShardRoutingState.STARTED
|
||||
);
|
||||
if (randomInt(5) < 2) {
|
||||
// shard copy failure
|
||||
failed++;
|
||||
failures++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
|
||||
// Building the shardRouting map here.
|
||||
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
|
||||
Map<String, Object> routing =
|
||||
shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS)
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(), LoggingDeprecationHandler.INSTANCE,
|
||||
BytesReference.bytes(builder).streamInput()
|
||||
)
|
||||
.map();
|
||||
shardFailures.add(
|
||||
new SyncedFlushResponse.ShardFailure(
|
||||
shardId.id(),
|
||||
"copy failure " + shardId,
|
||||
routing
|
||||
)
|
||||
);
|
||||
} else {
|
||||
successful++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse());
|
||||
}
|
||||
}
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
|
||||
}
|
||||
}
|
||||
indicesResults.put(index, shardsResults);
|
||||
indexResults.put(
|
||||
index,
|
||||
new SyncedFlushResponse.IndexResult(
|
||||
shards * (replicas + 1),
|
||||
successful,
|
||||
failed,
|
||||
shardFailures
|
||||
)
|
||||
);
|
||||
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
|
||||
testPlan.expectedFailuresPerIndex.put(index, failures);
|
||||
totalFailed += failed;
|
||||
totalShards += shards * (replicas + 1);
|
||||
totalSuccessful += successful;
|
||||
}
|
||||
testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults);
|
||||
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed);
|
||||
testPlan.clientResult = new SyncedFlushResponse(
|
||||
new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed),
|
||||
indexResults
|
||||
);
|
||||
return testPlan;
|
||||
}
|
||||
|
||||
public boolean containsFailure(List<SyncedFlushResponse.ShardFailure> failures, SyncedFlushResponse.ShardFailure origFailure) {
|
||||
for (SyncedFlushResponse.ShardFailure failure: failures) {
|
||||
if (failure.getShardId() == origFailure.getShardId() &&
|
||||
failure.getFailureReason().equals(origFailure.getFailureReason()) &&
|
||||
failure.getRouting().equals(origFailure.getRouting())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) {
|
||||
if (first == null) {
|
||||
assertNull(second);
|
||||
} else {
|
||||
assertTrue(first.equals(second));
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, Object> convertFailureListToSet(Map<String, Object> input) {
|
||||
Map<String, Object> retMap = new HashMap<>();
|
||||
for (Map.Entry<String, Object> entry: input.entrySet()) {
|
||||
if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) {
|
||||
retMap.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
// This was an index entry.
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> indexResult = (Map<String, Object>)entry.getValue();
|
||||
Map<String, Object> retResult = new HashMap<>();
|
||||
for (Map.Entry<String, Object> entry2: indexResult.entrySet()) {
|
||||
if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Object> failures = (List<Object>)entry2.getValue();
|
||||
Set<Object> retSet = new HashSet<>(failures);
|
||||
retResult.put(entry.getKey(), retSet);
|
||||
} else {
|
||||
retResult.put(entry2.getKey(), entry2.getValue());
|
||||
}
|
||||
}
|
||||
retMap.put(entry.getKey(), retResult);
|
||||
}
|
||||
}
|
||||
return retMap;
|
||||
}
|
||||
}
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
|
@ -55,8 +56,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
|
|||
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
|
@ -64,6 +63,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.SyncedFlushResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -81,8 +81,6 @@ import java.util.Map;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Indices API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
|
@ -784,6 +782,89 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testSyncedFlushIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createIndex("index1", Settings.EMPTY);
|
||||
}
|
||||
|
||||
{
|
||||
// tag::flush-synced-request
|
||||
SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1>
|
||||
SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2>
|
||||
SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3>
|
||||
// end::flush-synced-request
|
||||
|
||||
// tag::flush-synced-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::flush-synced-request-indicesOptions
|
||||
|
||||
// tag::flush-synced-execute
|
||||
SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request);
|
||||
// end::flush-synced-execute
|
||||
|
||||
// tag::flush-synced-response
|
||||
int totalShards = flushSyncedResponse.totalShards(); // <1>
|
||||
int successfulShards = flushSyncedResponse.successfulShards(); // <2>
|
||||
int failedShards = flushSyncedResponse.failedShards(); // <3>
|
||||
|
||||
for (Map.Entry<String, SyncedFlushResponse.IndexResult> responsePerIndexEntry:
|
||||
flushSyncedResponse.getIndexResults().entrySet()) {
|
||||
String indexName = responsePerIndexEntry.getKey(); // <4>
|
||||
SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue();
|
||||
int totalShardsForIndex = indexResult.totalShards(); // <5>
|
||||
int successfulShardsForIndex = indexResult.successfulShards(); // <6>
|
||||
int failedShardsForIndex = indexResult.failedShards(); // <7>
|
||||
if (failedShardsForIndex > 0) {
|
||||
for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) {
|
||||
int shardId = failureEntry.getShardId(); // <8>
|
||||
String failureReason = failureEntry.getFailureReason(); // <9>
|
||||
Map<String, Object> routing = failureEntry.getRouting(); // <10>
|
||||
}
|
||||
}
|
||||
}
|
||||
// end::flush-synced-response
|
||||
|
||||
// tag::flush-synced-execute-listener
|
||||
ActionListener<SyncedFlushResponse> listener = new ActionListener<SyncedFlushResponse>() {
|
||||
@Override
|
||||
public void onResponse(SyncedFlushResponse refreshResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::flush-synced-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::flush-synced-execute-async
|
||||
client.indices().flushSyncedAsync(request, listener); // <1>
|
||||
// end::flush-synced-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
// tag::flush-synced-notfound
|
||||
try {
|
||||
SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist");
|
||||
client.indices().flushSynced(request);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.NOT_FOUND) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::flush-synced-notfound
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSettings() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
|
|||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
|
@ -297,6 +299,66 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testSnapshotVerifyRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
createTestRepositories();
|
||||
|
||||
// tag::verify-repository-request
|
||||
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
|
||||
// end::verify-repository-request
|
||||
|
||||
// tag::verify-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::verify-repository-request-masterTimeout
|
||||
// tag::verify-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::verify-repository-request-timeout
|
||||
|
||||
// tag::verify-repository-execute
|
||||
VerifyRepositoryResponse response = client.snapshot().verifyRepository(request);
|
||||
// end::verify-repository-execute
|
||||
|
||||
// tag::verify-repository-response
|
||||
List<VerifyRepositoryResponse.NodeView> repositoryMetaDataResponse = response.getNodes();
|
||||
// end::verify-repository-response
|
||||
assertThat(1, equalTo(repositoryMetaDataResponse.size()));
|
||||
assertThat("node-0", equalTo(repositoryMetaDataResponse.get(0).getName()));
|
||||
}
|
||||
|
||||
public void testSnapshotVerifyRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
|
||||
|
||||
// tag::verify-repository-execute-listener
|
||||
ActionListener<VerifyRepositoryResponse> listener =
|
||||
new ActionListener<VerifyRepositoryResponse>() {
|
||||
@Override
|
||||
public void onResponse(VerifyRepositoryResponse verifyRepositoryRestResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::verify-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::verify-repository-execute-async
|
||||
client.snapshot().verifyRepositoryAsync(request, listener); // <1>
|
||||
// end::verify-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
private void createTestRepositories() throws IOException {
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
request.type(FsRepository.TYPE);
|
||||
|
|
|
@ -106,15 +106,23 @@ tasks.withType(AbstractArchiveTask) {
|
|||
baseName = "elasticsearch${ subdir.contains('oss') ? '-oss' : ''}"
|
||||
}
|
||||
|
||||
Closure commonZipConfig = {
|
||||
dirMode 0755
|
||||
fileMode 0644
|
||||
}
|
||||
|
||||
task buildIntegTestZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
with archiveFiles(transportModulesFiles, 'zip', false)
|
||||
}
|
||||
|
||||
task buildZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
with archiveFiles(modulesFiles(false), 'zip', false)
|
||||
}
|
||||
|
||||
task buildOssZip(type: Zip) {
|
||||
configure(commonZipConfig)
|
||||
with archiveFiles(modulesFiles(true), 'zip', true)
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
[[java-rest-high-flush-synced]]
|
||||
=== Flush Synced API
|
||||
|
||||
[[java-rest-high-flush-synced-request]]
|
||||
==== Flush Synced Request
|
||||
|
||||
A `SyncedFlushRequest` can be applied to one or more indices, or even on `_all` the indices:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request]
|
||||
--------------------------------------------------
|
||||
<1> Flush synced one index
|
||||
<2> Flush synced multiple indices
|
||||
<3> Flush synced all the indices
|
||||
|
||||
==== Optional arguments
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
[[java-rest-high-flush-synced-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-flush-synced-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a flush request requires both the `SyncedFlushRequest`
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `SyncedFlushRequest` to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `SyncedFlushResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-flush-synced-response]]
|
||||
==== Flush Synced Response
|
||||
|
||||
The returned `SyncedFlushResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-response]
|
||||
--------------------------------------------------
|
||||
<1> Total number of shards hit by the flush request
|
||||
<2> Number of shards where the flush has succeeded
|
||||
<3> Number of shards where the flush has failed
|
||||
<4> Name of the index whose results we are about to calculate.
|
||||
<5> Total number of shards for index mentioned in 4.
|
||||
<6> Successful shards for index mentioned in 4.
|
||||
<7> Failed shards for index mentioned in 4.
|
||||
<8> One of the failed shard ids of the failed index mentioned in 4.
|
||||
<9> Reason for failure of copies of the shard mentioned in 8.
|
||||
<10> JSON represented by a Map<String, Object>. Contains shard related information like id, state, version etc.
|
||||
for the failed shard copies. If the entire shard failed then this returns an empty map.
|
||||
|
||||
By default, if the indices were not found, an `ElasticsearchException` will be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-notfound]
|
||||
--------------------------------------------------
|
||||
<1> Do something if the indices to be flushed were not found
|
|
@ -0,0 +1,81 @@
|
|||
[[java-rest-high-snapshot-verify-repository]]
|
||||
=== Snapshot Verify Repository API
|
||||
|
||||
The Snapshot Verify Repository API allows to verify a registered repository.
|
||||
|
||||
[[java-rest-high-snapshot-verify-repository-request]]
|
||||
==== Snapshot Verify Repository Request
|
||||
|
||||
A `VerifyRepositoryRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-snapshot-verify-repository-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-snapshot-verify-repository-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a snapshot verify repository requires both the
|
||||
`VerifyRepositoryRequest` instance and an `ActionListener` instance to be
|
||||
passed to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `VerifyRepositoryRequest` to execute and the `ActionListener`
|
||||
to use when the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `VerifyRepositoryResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of a failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-verify-repository-response]]
|
||||
==== Snapshot Verify Repository Response
|
||||
|
||||
The returned `VerifyRepositoryResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-response]
|
||||
--------------------------------------------------
|
|
@ -67,6 +67,7 @@ Index Management::
|
|||
* <<java-rest-high-split-index>>
|
||||
* <<java-rest-high-refresh>>
|
||||
* <<java-rest-high-flush>>
|
||||
* <<java-rest-high-flush-synced>>
|
||||
* <<java-rest-high-clear-cache>>
|
||||
* <<java-rest-high-force-merge>>
|
||||
* <<java-rest-high-rollover-index>>
|
||||
|
@ -89,6 +90,7 @@ include::indices/shrink_index.asciidoc[]
|
|||
include::indices/split_index.asciidoc[]
|
||||
include::indices/refresh.asciidoc[]
|
||||
include::indices/flush.asciidoc[]
|
||||
include::indices/flush_synced.asciidoc[]
|
||||
include::indices/clear_cache.asciidoc[]
|
||||
include::indices/force_merge.asciidoc[]
|
||||
include::indices/rollover.asciidoc[]
|
||||
|
@ -116,10 +118,12 @@ The Java High Level REST Client supports the following Snapshot APIs:
|
|||
* <<java-rest-high-snapshot-get-repository>>
|
||||
* <<java-rest-high-snapshot-create-repository>>
|
||||
* <<java-rest-high-snapshot-delete-repository>>
|
||||
* <<java-rest-high-snapshot-verify-repository>>
|
||||
|
||||
include::snapshot/get_repository.asciidoc[]
|
||||
include::snapshot/create_repository.asciidoc[]
|
||||
include::snapshot/delete_repository.asciidoc[]
|
||||
include::snapshot/verify_repository.asciidoc[]
|
||||
|
||||
== Tasks APIs
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ Which shows that the class of `doc.first` is
|
|||
"java_class": "org.elasticsearch.index.fielddata.ScriptDocValues$Longs",
|
||||
...
|
||||
},
|
||||
"status": 500
|
||||
"status": 400
|
||||
}
|
||||
---------------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"script_stack": $body.error.script_stack, "script": $body.error.script, "lang": $body.error.lang, "caused_by": $body.error.caused_by, "root_cause": $body.error.root_cause, "reason": $body.error.reason/]
|
||||
|
|
|
@ -348,6 +348,34 @@ GET /_search
|
|||
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
|
||||
and in ascending order when comparing values from the `terms` source.
|
||||
|
||||
====== Missing bucket
|
||||
|
||||
By default documents without a value for a given source are ignored.
|
||||
It is possible to include them in the response by setting `missing_bucket` to
|
||||
`true` (defaults to `false`):
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_search
|
||||
{
|
||||
"aggs" : {
|
||||
"my_buckets": {
|
||||
"composite" : {
|
||||
"sources" : [
|
||||
{ "product_name": { "terms" : { "field": "product", "missing_bucket": true } } }
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
In the example above the source `product_name` will emit an explicit `null` value
|
||||
for documents without a value for the field `product`.
|
||||
The `order` specified in the source dictates whether the `null` values should rank
|
||||
first (ascending order, `asc`) or last (descending order, `desc`).
|
||||
|
||||
==== Size
|
||||
|
||||
The `size` parameter can be set to define how many composite buckets should be returned.
|
||||
|
|
|
@ -46,7 +46,7 @@ response.
|
|||
|
||||
==== Script
|
||||
|
||||
The `min` aggregation can also calculate the maximum of a script. The example
|
||||
The `min` aggregation can also calculate the minimum of a script. The example
|
||||
below computes the minimum price:
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -89,7 +89,7 @@ The following parameters are accepted by `text` fields:
|
|||
What information should be stored in the index, for search and highlighting purposes.
|
||||
Defaults to `positions`.
|
||||
|
||||
<<index-prefix-config,`index_prefix`>>::
|
||||
<<index-prefix-config,`index_prefixes`>>::
|
||||
|
||||
If enabled, term prefixes of between 2 and 5 characters are indexed into a
|
||||
separate field. This allows prefix searches to run more efficiently, at
|
||||
|
@ -138,7 +138,7 @@ The following parameters are accepted by `text` fields:
|
|||
[[index-prefix-config]]
|
||||
==== Index Prefix configuration
|
||||
|
||||
Text fields may also index term prefixes to speed up prefix searches. The `index_prefix`
|
||||
Text fields may also index term prefixes to speed up prefix searches. The `index_prefixes`
|
||||
parameter is configured as below. Either or both of `min_chars` and `max_chars` may be excluded.
|
||||
Both values are treated as inclusive
|
||||
|
||||
|
@ -151,7 +151,7 @@ PUT my_index
|
|||
"properties": {
|
||||
"full_name": {
|
||||
"type": "text",
|
||||
"index_prefix" : {
|
||||
"index_prefixes" : {
|
||||
"min_chars" : 1, <1>
|
||||
"max_chars" : 10 <2>
|
||||
}
|
||||
|
|
|
@ -9,4 +9,9 @@ These `execution_hint` are removed and should be replaced by `global_ordinals`.
|
|||
|
||||
The dynamic cluster setting named `search.max_buckets` now defaults
|
||||
to 10,000 (instead of unlimited in the previous version).
|
||||
Requests that try to return more than the limit will fail with an exception.
|
||||
Requests that try to return more than the limit will fail with an exception.
|
||||
|
||||
==== `missing` option of the `composite` aggregation has been removed
|
||||
|
||||
The `missing` option of the `composite` aggregation, deprecated in 6.x,
|
||||
has been removed. `missing_bucket` should be used instead.
|
|
@ -11,3 +11,9 @@ the getter methods for date objects were deprecated. These methods have
|
|||
now been removed. Instead, use `.value` on `date` fields, or explicitly
|
||||
parse `long` fields into a date object using
|
||||
`Instance.ofEpochMillis(doc["myfield"].value)`.
|
||||
|
||||
==== Script errors will return as `400` error codes
|
||||
|
||||
Malformed scripts, either in search templates, ingest pipelines or search
|
||||
requests, return `400 - Bad request` while they would previously return
|
||||
`500 - Internal Server Error`. This also applies for stored scripts.
|
||||
|
|
|
@ -43,7 +43,7 @@ The Search API returns `400 - Bad request` while it would previously return
|
|||
* the number of slices is too large
|
||||
* keep alive for scroll is too large
|
||||
* number of filters in the adjacency matrix aggregation is too large
|
||||
|
||||
* script compilation errors
|
||||
|
||||
==== Scroll queries cannot use the `request_cache` anymore
|
||||
|
||||
|
|
|
@ -20,12 +20,14 @@
|
|||
package org.elasticsearch.script.mustache;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -107,12 +109,14 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera
|
|||
}
|
||||
|
||||
private Item[] items;
|
||||
|
||||
private long tookInMillis;
|
||||
|
||||
MultiSearchTemplateResponse() {
|
||||
}
|
||||
|
||||
public MultiSearchTemplateResponse(Item[] items) {
|
||||
public MultiSearchTemplateResponse(Item[] items, long tookInMillis) {
|
||||
this.items = items;
|
||||
this.tookInMillis = tookInMillis;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -126,6 +130,13 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera
|
|||
public Item[] getResponses() {
|
||||
return this.items;
|
||||
}
|
||||
|
||||
/**
|
||||
* How long the msearch_template took.
|
||||
*/
|
||||
public TimeValue getTook() {
|
||||
return new TimeValue(tookInMillis);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
|
@ -134,6 +145,9 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera
|
|||
for (int i = 0; i < items.length; i++) {
|
||||
items[i] = Item.readItem(in);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
tookInMillis = in.readVLong();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -143,11 +157,15 @@ public class MultiSearchTemplateResponse extends ActionResponse implements Itera
|
|||
for (Item item : items) {
|
||||
item.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
out.writeVLong(tookInMillis);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field("took", tookInMillis);
|
||||
builder.startArray(Fields.RESPONSES);
|
||||
for (Item item : items) {
|
||||
if (item.isFailure()) {
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
package org.elasticsearch.script.mustache;
|
||||
|
||||
import com.github.mustachejava.Mustache;
|
||||
import com.github.mustachejava.MustacheException;
|
||||
import com.github.mustachejava.MustacheFactory;
|
||||
|
||||
import java.io.StringReader;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
|
@ -31,12 +31,15 @@ import org.elasticsearch.script.GeneralScriptException;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.ScriptException;
|
||||
import org.elasticsearch.script.TemplateScript;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
import java.io.StringWriter;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -66,9 +69,14 @@ public final class MustacheScriptEngine implements ScriptEngine {
|
|||
}
|
||||
final MustacheFactory factory = createMustacheFactory(options);
|
||||
Reader reader = new StringReader(templateSource);
|
||||
Mustache template = factory.compile(reader, "query-template");
|
||||
TemplateScript.Factory compiled = params -> new MustacheExecutableScript(template, params);
|
||||
return context.factoryClazz.cast(compiled);
|
||||
try {
|
||||
Mustache template = factory.compile(reader, "query-template");
|
||||
TemplateScript.Factory compiled = params -> new MustacheExecutableScript(template, params);
|
||||
return context.factoryClazz.cast(compiled);
|
||||
} catch (MustacheException ex) {
|
||||
throw new ScriptException(ex.getMessage(), ex, Collections.emptyList(), templateSource, NAME);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private CustomMustacheFactory createMustacheFactory(Map<String, String> options) {
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TransportMultiSearchTemplateAction extends HandledTransportAction<M
|
|||
items[originalSlot].getResponse().setResponse(item.getResponse());
|
||||
}
|
||||
}
|
||||
listener.onResponse(new MultiSearchTemplateResponse(items));
|
||||
listener.onResponse(new MultiSearchTemplateResponse(items, r.getTook().millis()));
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
|
@ -140,6 +141,7 @@ public class MultiSearchTemplateIT extends ESIntegTestCase {
|
|||
|
||||
MultiSearchTemplateResponse response = client().execute(MultiSearchTemplateAction.INSTANCE, multiRequest).get();
|
||||
assertThat(response.getResponses(), arrayWithSize(5));
|
||||
assertThat(response.getTook().millis(), greaterThan(0L));
|
||||
|
||||
MultiSearchTemplateResponse.Item response1 = response.getResponses()[0];
|
||||
assertThat(response1.isFailure(), is(false));
|
||||
|
|
|
@ -18,6 +18,15 @@
|
|||
*/
|
||||
package org.elasticsearch.script.mustache;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.ScriptException;
|
||||
import org.elasticsearch.script.TemplateScript;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import java.net.URLEncoder;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
|
@ -29,15 +38,6 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import com.github.mustachejava.MustacheException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.script.ScriptEngine;
|
||||
import org.elasticsearch.script.TemplateScript;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matcher;
|
||||
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
|
@ -225,11 +225,17 @@ public class MustacheTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testsUnsupportedTagsToJson() {
|
||||
MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{foo}}{{bar}}{{/toJson}}"));
|
||||
final String script = "{{#toJson}}{{foo}}{{bar}}{{/toJson}}";
|
||||
ScriptException e = expectThrows(ScriptException.class, () -> compile(script));
|
||||
assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier"));
|
||||
assertEquals(MustacheScriptEngine.NAME, e.getLang());
|
||||
assertEquals(script, e.getScript());
|
||||
|
||||
e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{/toJson}}"));
|
||||
final String script2 = "{{#toJson}}{{/toJson}}";
|
||||
e = expectThrows(ScriptException.class, () -> compile(script2));
|
||||
assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier"));
|
||||
assertEquals(MustacheScriptEngine.NAME, e.getLang());
|
||||
assertEquals(script2, e.getScript());
|
||||
}
|
||||
|
||||
public void testEmbeddedToJSON() throws Exception {
|
||||
|
@ -312,11 +318,17 @@ public class MustacheTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testsUnsupportedTagsJoin() {
|
||||
MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#join}}{{/join}}"));
|
||||
final String script = "{{#join}}{{/join}}";
|
||||
ScriptException e = expectThrows(ScriptException.class, () -> compile(script));
|
||||
assertThat(e.getMessage(), containsString("Mustache function [join] must contain one and only one identifier"));
|
||||
assertEquals(MustacheScriptEngine.NAME, e.getLang());
|
||||
assertEquals(script, e.getScript());
|
||||
|
||||
e = expectThrows(MustacheException.class, () -> compile("{{#join delimiter='a'}}{{/join delimiter='b'}}"));
|
||||
final String script2 = "{{#join delimiter='a'}}{{/join delimiter='b'}}";
|
||||
e = expectThrows(ScriptException.class, () -> compile(script2));
|
||||
assertThat(e.getMessage(), containsString("Mismatched start/end tags"));
|
||||
assertEquals(MustacheScriptEngine.NAME, e.getLang());
|
||||
assertEquals(script2, e.getScript());
|
||||
}
|
||||
|
||||
public void testJoinWithCustomDelimiter() {
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
id: "non_existing"
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
put_script:
|
||||
id: "1"
|
||||
context: "search"
|
||||
|
|
|
@ -133,7 +133,7 @@ setup:
|
|||
---
|
||||
"Scripted Field with script error":
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
search:
|
||||
body:
|
||||
script_fields:
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Vector;
|
||||
|
||||
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
|
@ -146,7 +145,7 @@ public class MeanReciprocalRankTests extends ESTestCase {
|
|||
|
||||
public void testCombine() {
|
||||
MeanReciprocalRank reciprocalRank = new MeanReciprocalRank();
|
||||
Vector<EvalQueryQuality> partialResults = new Vector<>(3);
|
||||
List<EvalQueryQuality> partialResults = new ArrayList<>(3);
|
||||
partialResults.add(new EvalQueryQuality("id1", 0.5));
|
||||
partialResults.add(new EvalQueryQuality("id2", 1.0));
|
||||
partialResults.add(new EvalQueryQuality("id3", 0.75));
|
||||
|
|
|
@ -38,7 +38,6 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Vector;
|
||||
|
||||
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
|
@ -163,7 +162,7 @@ public class PrecisionAtKTests extends ESTestCase {
|
|||
|
||||
public void testCombine() {
|
||||
PrecisionAtK metric = new PrecisionAtK();
|
||||
Vector<EvalQueryQuality> partialResults = new Vector<>(3);
|
||||
List<EvalQueryQuality> partialResults = new ArrayList<>(3);
|
||||
partialResults.add(new EvalQueryQuality("a", 0.1));
|
||||
partialResults.add(new EvalQueryQuality("b", 0.2));
|
||||
partialResults.add(new EvalQueryQuality("c", 0.6));
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
|
|
|
@ -446,7 +446,7 @@
|
|||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
update_by_query:
|
||||
index: source
|
||||
body:
|
||||
|
|
|
@ -434,7 +434,7 @@
|
|||
indices.refresh: {}
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
update_by_query:
|
||||
index: twitter
|
||||
refresh: true
|
||||
|
|
|
@ -332,7 +332,7 @@
|
|||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
ingest.put_pipeline:
|
||||
id: "my_pipeline_1"
|
||||
body: >
|
||||
|
@ -348,5 +348,5 @@
|
|||
]
|
||||
}
|
||||
- match: { error.header.processor_type: "set" }
|
||||
- match: { error.type: "general_script_exception" }
|
||||
- match: { error.reason: "Failed to compile inline script [{{#join}}{{/join}}] using lang [mustache]" }
|
||||
- match: { error.type: "script_exception" }
|
||||
- match: { error.reason: "Mustache function [join] must contain one and only one identifier" }
|
||||
|
|
|
@ -89,7 +89,7 @@
|
|||
---
|
||||
"Test script processor with syntax error in inline script":
|
||||
- do:
|
||||
catch: request
|
||||
catch: bad_request
|
||||
ingest.put_pipeline:
|
||||
id: "my_pipeline"
|
||||
body: >
|
||||
|
|
|
@ -82,38 +82,26 @@ In general it's probably best to avoid running external commands when a good
|
|||
Java alternative exists. For example most filesystem operations can be done with
|
||||
the java.nio.file APIs. For those that aren't, use an instance of [Shell](src/main/java/org/elasticsearch/packaging/util/Shell.java)
|
||||
|
||||
Despite the name, commands run with this class are not run in a shell, and any
|
||||
familiar features of shells like variables or expansion won't work.
|
||||
|
||||
If you do need the shell, you must explicitly invoke the shell's command. For
|
||||
example to run a command with Bash, use the `bash -c command` syntax. Note that
|
||||
the entire script must be in a single string argument
|
||||
This class runs scripts in either bash with the `bash -c <script>` syntax,
|
||||
or in powershell with the `powershell.exe -Command <script>` syntax.
|
||||
|
||||
```java
|
||||
Shell sh = new Shell();
|
||||
sh.run("bash", "-c", "echo $foo; echo $bar");
|
||||
|
||||
// equivalent to `bash -c 'echo $foo; echo $bar'`
|
||||
sh.bash("echo $foo; echo $bar");
|
||||
|
||||
// equivalent to `powershell.exe -Command 'Write-Host $foo; Write-Host $bar'`
|
||||
sh.powershell("Write-Host $foo; Write-Host $bar");
|
||||
```
|
||||
|
||||
Similary for powershell - again, the entire powershell script must go in a
|
||||
single string argument
|
||||
### Notes about powershell
|
||||
|
||||
```java
|
||||
sh.run("powershell.exe", "-Command", "Write-Host $foo; Write-Host $bar");
|
||||
```
|
||||
Powershell scripts for the most part have backwards compatibility with legacy
|
||||
cmd.exe commands and their syntax. Most of the commands you'll want to use
|
||||
in powershell are [Cmdlets](https://msdn.microsoft.com/en-us/library/ms714395.aspx)
|
||||
which generally don't have a one-to-one mapping with an executable file.
|
||||
|
||||
On Linux, most commands you'll want to use will be executable files and will
|
||||
work fine without a shell
|
||||
|
||||
```java
|
||||
sh.run("tar", "-xzpf", "elasticsearch-6.1.0.tar.gz");
|
||||
```
|
||||
|
||||
On Windows you'll mostly want to use powershell as it can do a lot more and
|
||||
gives much better feedback than Windows' legacy command line. Unfortunately that
|
||||
means that you'll need to use the `powershell.exe -Command` syntax as
|
||||
powershell's [Cmdlets](https://msdn.microsoft.com/en-us/library/ms714395.aspx)
|
||||
don't correspond to executable files and are not runnable by `Runtime` directly.
|
||||
|
||||
When writing powershell commands this way, make sure to test them as some types
|
||||
of formatting can cause it to return a successful exit code but not run
|
||||
anything.
|
||||
When writing powershell commands in this project it's worth testing them by
|
||||
hand, as sometimes when a script can't be interpreted correctly it will
|
||||
fail silently.
|
||||
|
|
|
@ -64,7 +64,7 @@ public class Archives {
|
|||
if (distribution.packaging == Distribution.Packaging.TAR) {
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
sh.run("tar", "-C", baseInstallPath.toString(), "-xzpf", distributionFile.toString());
|
||||
sh.bash("tar -C " + baseInstallPath + " -xzpf " + distributionFile);
|
||||
} else {
|
||||
throw new RuntimeException("Distribution " + distribution + " is not supported on windows");
|
||||
}
|
||||
|
@ -72,11 +72,12 @@ public class Archives {
|
|||
} else if (distribution.packaging == Distribution.Packaging.ZIP) {
|
||||
|
||||
if (Platforms.LINUX) {
|
||||
sh.run("unzip", distributionFile.toString(), "-d", baseInstallPath.toString());
|
||||
sh.bash("unzip " + distributionFile + " -d " + baseInstallPath);
|
||||
} else {
|
||||
sh.run("powershell.exe", "-Command",
|
||||
sh.powershell(
|
||||
"Add-Type -AssemblyName 'System.IO.Compression.Filesystem'; " +
|
||||
"[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')");
|
||||
"[IO.Compression.ZipFile]::ExtractToDirectory('" + distributionFile + "', '" + baseInstallPath + "')"
|
||||
);
|
||||
}
|
||||
|
||||
} else {
|
||||
|
@ -102,35 +103,35 @@ public class Archives {
|
|||
private static void setupArchiveUsersLinux(Path installPath) {
|
||||
final Shell sh = new Shell();
|
||||
|
||||
if (sh.runIgnoreExitCode("getent", "group", "elasticsearch").isSuccess() == false) {
|
||||
if (sh.bashIgnoreExitCode("getent group elasticsearch").isSuccess() == false) {
|
||||
if (isDPKG()) {
|
||||
sh.run("addgroup", "--system", "elasticsearch");
|
||||
sh.bash("addgroup --system elasticsearch");
|
||||
} else {
|
||||
sh.run("groupadd", "-r", "elasticsearch");
|
||||
sh.bash("groupadd -r elasticsearch");
|
||||
}
|
||||
}
|
||||
|
||||
if (sh.runIgnoreExitCode("id", "elasticsearch").isSuccess() == false) {
|
||||
if (sh.bashIgnoreExitCode("id elasticsearch").isSuccess() == false) {
|
||||
if (isDPKG()) {
|
||||
sh.run("adduser",
|
||||
"--quiet",
|
||||
"--system",
|
||||
"--no-create-home",
|
||||
"--ingroup", "elasticsearch",
|
||||
"--disabled-password",
|
||||
"--shell", "/bin/false",
|
||||
sh.bash("adduser " +
|
||||
"--quiet " +
|
||||
"--system " +
|
||||
"--no-create-home " +
|
||||
"--ingroup elasticsearch " +
|
||||
"--disabled-password " +
|
||||
"--shell /bin/false " +
|
||||
"elasticsearch");
|
||||
} else {
|
||||
sh.run("useradd",
|
||||
"--system",
|
||||
"-M",
|
||||
"--gid", "elasticsearch",
|
||||
"--shell", "/sbin/nologin",
|
||||
"--comment", "elasticsearch user",
|
||||
sh.bash("useradd " +
|
||||
"--system " +
|
||||
"-M " +
|
||||
"--gid elasticsearch " +
|
||||
"--shell /sbin/nologin " +
|
||||
"--comment 'elasticsearch user' " +
|
||||
"elasticsearch");
|
||||
}
|
||||
}
|
||||
sh.run("chown", "-R", "elasticsearch:elasticsearch", installPath.toString());
|
||||
sh.bash("chown -R elasticsearch:elasticsearch " + installPath);
|
||||
}
|
||||
|
||||
public static void verifyArchiveInstallation(Installation installation, Distribution distribution) {
|
||||
|
|
|
@ -59,16 +59,16 @@ public class Cleanup {
|
|||
if (Platforms.WINDOWS) {
|
||||
|
||||
// the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here
|
||||
sh.runIgnoreExitCode("powershell.exe", "-Command",
|
||||
sh.powershellIgnoreExitCode(
|
||||
"Get-WmiObject Win32_Process | " +
|
||||
"Where-Object { $_.CommandLine -Match 'org.elasticsearch.bootstrap.Elasticsearch' } | " +
|
||||
"ForEach-Object { $_.Terminate() }");
|
||||
"ForEach-Object { $_.Terminate() }"
|
||||
);
|
||||
|
||||
} else {
|
||||
|
||||
sh.runIgnoreExitCode("pkill", "-u", "elasticsearch");
|
||||
sh.runIgnoreExitCode("bash", "-c",
|
||||
"ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9");
|
||||
sh.bashIgnoreExitCode("pkill -u elasticsearch");
|
||||
sh.bashIgnoreExitCode("ps aux | grep -i 'org.elasticsearch.bootstrap.Elasticsearch' | awk {'print $2'} | xargs kill -9");
|
||||
|
||||
}
|
||||
|
||||
|
@ -78,8 +78,8 @@ public class Cleanup {
|
|||
|
||||
// remove elasticsearch users
|
||||
if (Platforms.LINUX) {
|
||||
sh.runIgnoreExitCode("userdel", "elasticsearch");
|
||||
sh.runIgnoreExitCode("groupdel", "elasticsearch");
|
||||
sh.bashIgnoreExitCode("userdel elasticsearch");
|
||||
sh.bashIgnoreExitCode("groupdel elasticsearch");
|
||||
}
|
||||
|
||||
// delete files that may still exist
|
||||
|
@ -95,7 +95,7 @@ public class Cleanup {
|
|||
// disable elasticsearch service
|
||||
// todo add this for windows when adding tests for service intallation
|
||||
if (Platforms.LINUX && isSystemd()) {
|
||||
sh.run("systemctl", "unmask", "systemd-sysctl.service");
|
||||
sh.bash("systemctl unmask systemd-sysctl.service");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,19 +103,19 @@ public class Cleanup {
|
|||
final Shell sh = new Shell();
|
||||
|
||||
if (isRPM()) {
|
||||
sh.runIgnoreExitCode("rpm", "--quiet", "-e", "elasticsearch", "elasticsearch-oss");
|
||||
sh.bashIgnoreExitCode("rpm --quiet -e elasticsearch elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isYUM()) {
|
||||
sh.runIgnoreExitCode("yum", "remove", "-y", "elasticsearch", "elasticsearch-oss");
|
||||
sh.bashIgnoreExitCode("yum remove -y elasticsearch elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isDPKG()) {
|
||||
sh.runIgnoreExitCode("dpkg", "--purge", "elasticsearch", "elasticsearch-oss");
|
||||
sh.bashIgnoreExitCode("dpkg --purge elasticsearch elasticsearch-oss");
|
||||
}
|
||||
|
||||
if (isAptGet()) {
|
||||
sh.runIgnoreExitCode("apt-get", "--quiet", "--yes", "purge", "elasticsearch", "elasticsearch-oss");
|
||||
sh.bashIgnoreExitCode("apt-get --quiet --yes purge elasticsearch elasticsearch-oss");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,41 +28,41 @@ public class Platforms {
|
|||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "dpkg").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which dpkg").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isAptGet() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "apt-get").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which apt-get").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isRPM() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "rpm").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which rpm").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isYUM() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "yum").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which yum").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isSystemd() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "systemctl").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which systemctl").isSuccess();
|
||||
}
|
||||
|
||||
public static boolean isSysVInit() {
|
||||
if (WINDOWS) {
|
||||
return false;
|
||||
}
|
||||
return new Shell().runIgnoreExitCode("which", "service").isSuccess();
|
||||
return new Shell().bashIgnoreExitCode("which service").isSuccess();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.nio.file.Path;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
|
@ -57,7 +58,47 @@ public class Shell {
|
|||
this.workingDirectory = workingDirectory;
|
||||
}
|
||||
|
||||
public Result run(String... command) {
|
||||
/**
|
||||
* Runs a script in a bash shell, throwing an exception if its exit code is nonzero
|
||||
*/
|
||||
public Result bash(String script) {
|
||||
return run(bashCommand(script));
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a script in a bash shell
|
||||
*/
|
||||
public Result bashIgnoreExitCode(String script) {
|
||||
return runIgnoreExitCode(bashCommand(script));
|
||||
}
|
||||
|
||||
private static String[] bashCommand(String script) {
|
||||
return Stream.concat(Stream.of("bash", "-c"), Stream.of(script)).toArray(String[]::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a script in a powershell shell, throwing an exception if its exit code is nonzero
|
||||
*/
|
||||
public Result powershell(String script) {
|
||||
return run(powershellCommand(script));
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs a script in a powershell shell
|
||||
*/
|
||||
public Result powershellIgnoreExitCode(String script) {
|
||||
return runIgnoreExitCode(powershellCommand(script));
|
||||
}
|
||||
|
||||
private static String[] powershellCommand(String script) {
|
||||
return Stream.concat(Stream.of("powershell.exe", "-Command"), Stream.of(script)).toArray(String[]::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs an executable file, passing all elements of {@code command} after the first as arguments. Throws an exception if the process'
|
||||
* exit code is nonzero
|
||||
*/
|
||||
private Result run(String[] command) {
|
||||
Result result = runIgnoreExitCode(command);
|
||||
if (result.isSuccess() == false) {
|
||||
throw new RuntimeException("Command was not successful: [" + String.join(" ", command) + "] result: " + result.toString());
|
||||
|
@ -65,7 +106,10 @@ public class Shell {
|
|||
return result;
|
||||
}
|
||||
|
||||
public Result runIgnoreExitCode(String... command) {
|
||||
/**
|
||||
* Runs an executable file, passing all elements of {@code command} after the first as arguments
|
||||
*/
|
||||
private Result runIgnoreExitCode(String[] command) {
|
||||
ProcessBuilder builder = new ProcessBuilder();
|
||||
builder.command(command);
|
||||
|
||||
|
|
|
@ -323,3 +323,32 @@ setup:
|
|||
- length: { aggregations.test.buckets: 2 }
|
||||
- length: { aggregations.test.after_key: 1 }
|
||||
- match: { aggregations.test.after_key.keyword: "foo" }
|
||||
|
||||
---
|
||||
"Composite aggregation and array size":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: starting in 6.4 the composite sources do not allocate arrays eagerly.
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
aggregations:
|
||||
test:
|
||||
composite:
|
||||
size: 1000000000
|
||||
sources: [
|
||||
{
|
||||
"keyword": {
|
||||
"terms": {
|
||||
"field": "keyword",
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
- match: {hits.total: 6}
|
||||
- length: { aggregations.test.buckets: 2 }
|
||||
- length: { aggregations.test.after_key: 1 }
|
||||
- match: { aggregations.test.after_key.keyword: "foo" }
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
"search with index prefixes":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: index_prefix is only available as of 6.3.0
|
||||
reason: index_prefixes is only available as of 6.3.0
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
@ -12,7 +12,7 @@
|
|||
properties:
|
||||
text:
|
||||
type: text
|
||||
index_prefix:
|
||||
index_prefixes:
|
||||
min_chars: 1
|
||||
max_chars: 10
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
@ -129,6 +130,12 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
private ClusterName clusterName;
|
||||
|
||||
|
||||
private static final ObjectParser<VerifyRepositoryResponse, Void> PARSER =
|
||||
new ObjectParser<>(VerifyRepositoryResponse.class.getName(), VerifyRepositoryResponse::new);
|
||||
static {
|
||||
PARSER.declareNamedObjects(VerifyRepositoryResponse::setNodes, NodeView.PARSER, new ParseField("nodes"));
|
||||
}
|
||||
|
||||
VerifyRepositoryResponse() {
|
||||
}
|
||||
|
||||
|
@ -167,6 +174,10 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
return clusterName;
|
||||
}
|
||||
|
||||
protected void setNodes(List<NodeView> nodes) {
|
||||
this.nodes = nodes.stream().map(n -> n.convertToDiscoveryNode()).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
@ -187,8 +198,29 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static VerifyRepositoryResponse fromXContent(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
VerifyRepositoryResponse other = (VerifyRepositoryResponse) obj;
|
||||
return nodes.equals(other.nodes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return nodes.hashCode();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,10 +30,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -42,7 +44,7 @@ import java.util.Set;
|
|||
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
||||
public class AliasMetaData extends AbstractDiffable<AliasMetaData> implements ToXContentFragment {
|
||||
|
||||
private final String alias;
|
||||
|
||||
|
@ -199,6 +201,17 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
return readDiffFrom(AliasMetaData::new, in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this, true, true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
AliasMetaData.Builder.toXContent(this, builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final String alias;
|
||||
|
@ -314,6 +327,8 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
if ("filter".equals(currentFieldName)) {
|
||||
Map<String, Object> filter = parser.mapOrdered();
|
||||
builder.filter(filter);
|
||||
} else {
|
||||
parser.skipChildren();
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
|
||||
if ("filter".equals(currentFieldName)) {
|
||||
|
@ -327,6 +342,8 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
} else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName)) {
|
||||
builder.searchRouting(parser.text());
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
parser.skipChildren();
|
||||
}
|
||||
}
|
||||
return builder.build();
|
||||
|
|
|
@ -141,9 +141,10 @@ public abstract class MetaDataStateFormat<T> {
|
|||
Path finalPath = stateLocation.resolve(fileName);
|
||||
try {
|
||||
Files.copy(finalStatePath, tmpPath);
|
||||
IOUtils.fsync(tmpPath, false); // fsync the state file
|
||||
// we are on the same FileSystem / Partition here we can do an atomic move
|
||||
Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE);
|
||||
IOUtils.fsync(stateLocation, true); // we just fsync the dir here..
|
||||
IOUtils.fsync(stateLocation, true);
|
||||
} finally {
|
||||
Files.deleteIfExists(tmpPath);
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
PrefixFieldMapper prefixMapper = null;
|
||||
if (prefixFieldType != null) {
|
||||
if (fieldType().isSearchable() == false) {
|
||||
throw new IllegalArgumentException("Cannot set index_prefix on unindexed field [" + name() + "]");
|
||||
throw new IllegalArgumentException("Cannot set index_prefixes on unindexed field [" + name() + "]");
|
||||
}
|
||||
if (fieldType.indexOptions() == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) {
|
||||
prefixFieldType.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
|
||||
|
@ -203,7 +203,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
builder.fielddataFrequencyFilter(minFrequency, maxFrequency, minSegmentSize);
|
||||
DocumentMapperParser.checkNoRemainingFields(propName, frequencyFilter, parserContext.indexVersionCreated());
|
||||
iterator.remove();
|
||||
} else if (propName.equals("index_prefix")) {
|
||||
} else if (propName.equals("index_prefixes")) {
|
||||
Map<?, ?> indexPrefix = (Map<?, ?>) propNode;
|
||||
int minChars = XContentMapValues.nodeIntegerValue(indexPrefix.remove("min_chars"),
|
||||
Defaults.INDEX_PREFIX_MIN_CHARS);
|
||||
|
@ -243,7 +243,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
}
|
||||
}
|
||||
|
||||
private static final class PrefixFieldType extends StringFieldType {
|
||||
static final class PrefixFieldType extends StringFieldType {
|
||||
|
||||
final int minChars;
|
||||
final int maxChars;
|
||||
|
@ -268,14 +268,14 @@ public class TextFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
void doXContent(XContentBuilder builder) throws IOException {
|
||||
builder.startObject("index_prefix");
|
||||
builder.startObject("index_prefixes");
|
||||
builder.field("min_chars", minChars);
|
||||
builder.field("max_chars", maxChars);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MappedFieldType clone() {
|
||||
public PrefixFieldType clone() {
|
||||
return new PrefixFieldType(name(), minChars, maxChars);
|
||||
}
|
||||
|
||||
|
@ -305,6 +305,22 @@ public class TextFieldMapper extends FieldMapper {
|
|||
public Query existsQuery(QueryShardContext context) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
PrefixFieldType that = (PrefixFieldType) o;
|
||||
return minChars == that.minChars &&
|
||||
maxChars == that.maxChars;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
|
||||
return Objects.hash(super.hashCode(), minChars, maxChars);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class PrefixFieldMapper extends FieldMapper {
|
||||
|
@ -355,6 +371,9 @@ public class TextFieldMapper extends FieldMapper {
|
|||
this.fielddataMinFrequency = ref.fielddataMinFrequency;
|
||||
this.fielddataMaxFrequency = ref.fielddataMaxFrequency;
|
||||
this.fielddataMinSegmentSize = ref.fielddataMinSegmentSize;
|
||||
if (ref.prefixFieldType != null) {
|
||||
this.prefixFieldType = ref.prefixFieldType.clone();
|
||||
}
|
||||
}
|
||||
|
||||
public TextFieldType clone() {
|
||||
|
@ -368,6 +387,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
}
|
||||
TextFieldType that = (TextFieldType) o;
|
||||
return fielddata == that.fielddata
|
||||
&& Objects.equals(prefixFieldType, that.prefixFieldType)
|
||||
&& fielddataMinFrequency == that.fielddataMinFrequency
|
||||
&& fielddataMaxFrequency == that.fielddataMaxFrequency
|
||||
&& fielddataMinSegmentSize == that.fielddataMinSegmentSize;
|
||||
|
@ -375,7 +395,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), fielddata,
|
||||
return Objects.hash(super.hashCode(), fielddata, prefixFieldType,
|
||||
fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
|
||||
}
|
||||
|
||||
|
@ -420,6 +440,10 @@ public class TextFieldMapper extends FieldMapper {
|
|||
this.prefixFieldType = prefixFieldType;
|
||||
}
|
||||
|
||||
public PrefixFieldType getPrefixFieldType() {
|
||||
return this.prefixFieldType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String typeName() {
|
||||
return CONTENT_TYPE;
|
||||
|
|
|
@ -103,15 +103,19 @@ public class AllocatedPersistentTask extends CancellableTask {
|
|||
}
|
||||
|
||||
/**
|
||||
* Waits for this persistent task to have the desired state.
|
||||
* Waits for a given persistent task to comply with a given predicate, then call back the listener accordingly.
|
||||
*
|
||||
* @param predicate the persistent task predicate to evaluate
|
||||
* @param timeout a timeout for waiting
|
||||
* @param listener the callback listener
|
||||
*/
|
||||
public void waitForPersistentTaskStatus(Predicate<PersistentTasksCustomMetaData.PersistentTask<?>> predicate,
|
||||
@Nullable TimeValue timeout,
|
||||
PersistentTasksService.WaitForPersistentTaskStatusListener<?> listener) {
|
||||
persistentTasksService.waitForPersistentTaskStatus(persistentTaskId, predicate, timeout, listener);
|
||||
public void waitForPersistentTask(final Predicate<PersistentTasksCustomMetaData.PersistentTask<?>> predicate,
|
||||
final @Nullable TimeValue timeout,
|
||||
final PersistentTasksService.WaitForPersistentTaskListener<?> listener) {
|
||||
persistentTasksService.waitForPersistentTaskCondition(persistentTaskId, predicate, timeout, listener);
|
||||
}
|
||||
|
||||
final boolean isCompleted() {
|
||||
protected final boolean isCompleted() {
|
||||
return state.get() == State.COMPLETED;
|
||||
}
|
||||
|
||||
|
@ -143,7 +147,7 @@ public class AllocatedPersistentTask extends CancellableTask {
|
|||
this.failure = failure;
|
||||
if (prevState == State.STARTED) {
|
||||
logger.trace("sending notification for completed task [{}] with id [{}]", getAction(), getPersistentTaskId());
|
||||
persistentTasksService.sendCompletionNotification(getPersistentTaskId(), getAllocationId(), failure, new
|
||||
persistentTasksService.sendCompletionRequest(getPersistentTaskId(), getAllocationId(), failure, new
|
||||
ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> persistentTask) {
|
||||
|
|
|
@ -196,7 +196,8 @@ public class PersistentTasksNodeService extends AbstractComponent implements Clu
|
|||
AllocatedPersistentTask task = runningTasks.remove(allocationId);
|
||||
if (task.markAsCancelled()) {
|
||||
// Cancel the local task using the task manager
|
||||
persistentTasksService.sendTaskManagerCancellation(task.getId(), new ActionListener<CancelTasksResponse>() {
|
||||
String reason = "task has been removed, cancelling locally";
|
||||
persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener<CancelTasksResponse>() {
|
||||
@Override
|
||||
public void onResponse(CancelTasksResponse cancelTasksResponse) {
|
||||
logger.trace("Persistent task [{}] with id [{}] and allocation id [{}] was cancelled", task.getAction(),
|
||||
|
|
|
@ -22,14 +22,12 @@ import org.elasticsearch.action.Action;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse;
|
||||
import org.elasticsearch.action.support.ContextPreservingActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -37,20 +35,24 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
|
||||
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* This service is used by persistent actions to propagate changes in the action state and notify about completion
|
||||
* This service is used by persistent tasks and allocated persistent tasks to communicate changes
|
||||
* to the master node so that the master can update the cluster state and can track of the states
|
||||
* of the persistent tasks.
|
||||
*/
|
||||
public class PersistentTasksService extends AbstractComponent {
|
||||
|
||||
private static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin";
|
||||
private static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks";
|
||||
|
||||
private final Client client;
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadPool threadPool;
|
||||
|
@ -63,92 +65,115 @@ public class PersistentTasksService extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates the specified persistent task and attempts to assign it to a node.
|
||||
* Notifies the master node to create new persistent task and to assign it to a node.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <Params extends PersistentTaskParams> void startPersistentTask(String taskId, String taskName, @Nullable Params params,
|
||||
ActionListener<PersistentTask<Params>> listener) {
|
||||
StartPersistentTaskAction.Request createPersistentActionRequest =
|
||||
new StartPersistentTaskAction.Request(taskId, taskName, params);
|
||||
public <Params extends PersistentTaskParams> void sendStartRequest(final String taskId,
|
||||
final String taskName,
|
||||
final @Nullable Params taskParams,
|
||||
final ActionListener<PersistentTask<Params>> listener) {
|
||||
@SuppressWarnings("unchecked")
|
||||
final ActionListener<PersistentTask<?>> wrappedListener =
|
||||
ActionListener.wrap(t -> listener.onResponse((PersistentTask<Params>) t), listener::onFailure);
|
||||
StartPersistentTaskAction.Request request = new StartPersistentTaskAction.Request(taskId, taskName, taskParams);
|
||||
execute(request, StartPersistentTaskAction.INSTANCE, wrappedListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Notifies the master node about the completion of a persistent task.
|
||||
* <p>
|
||||
* When {@code failure} is {@code null}, the persistent task is considered as successfully completed.
|
||||
*/
|
||||
public void sendCompletionRequest(final String taskId,
|
||||
final long taskAllocationId,
|
||||
final @Nullable Exception taskFailure,
|
||||
final ActionListener<PersistentTask<?>> listener) {
|
||||
CompletionPersistentTaskAction.Request request = new CompletionPersistentTaskAction.Request(taskId, taskAllocationId, taskFailure);
|
||||
execute(request, CompletionPersistentTaskAction.INSTANCE, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels a locally running task using the Task Manager API
|
||||
*/
|
||||
void sendCancelRequest(final long taskId, final String reason, final ActionListener<CancelTasksResponse> listener) {
|
||||
CancelTasksRequest request = new CancelTasksRequest();
|
||||
request.setTaskId(new TaskId(clusterService.localNode().getId(), taskId));
|
||||
request.setReason(reason);
|
||||
try {
|
||||
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, StartPersistentTaskAction.INSTANCE, createPersistentActionRequest,
|
||||
ActionListener.wrap(o -> listener.onResponse((PersistentTask<Params>) o.getTask()), listener::onFailure));
|
||||
final ThreadContext threadContext = client.threadPool().getThreadContext();
|
||||
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
|
||||
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, PERSISTENT_TASK_ORIGIN)) {
|
||||
client.admin().cluster().cancelTasks(request, new ContextPreservingActionListener<>(supplier, listener));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Notifies the PersistentTasksClusterService about successful (failure == null) completion of a task or its failure
|
||||
*/
|
||||
public void sendCompletionNotification(String taskId, long allocationId, Exception failure,
|
||||
ActionListener<PersistentTask<?>> listener) {
|
||||
CompletionPersistentTaskAction.Request restartRequest = new CompletionPersistentTaskAction.Request(taskId, allocationId, failure);
|
||||
try {
|
||||
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, CompletionPersistentTaskAction.INSTANCE, restartRequest,
|
||||
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels a locally running task using the task manager
|
||||
*/
|
||||
void sendTaskManagerCancellation(long taskId, ActionListener<CancelTasksResponse> listener) {
|
||||
DiscoveryNode localNode = clusterService.localNode();
|
||||
CancelTasksRequest cancelTasksRequest = new CancelTasksRequest();
|
||||
cancelTasksRequest.setTaskId(new TaskId(localNode.getId(), taskId));
|
||||
cancelTasksRequest.setReason("persistent action was removed");
|
||||
try {
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), PERSISTENT_TASK_ORIGIN, cancelTasksRequest, listener,
|
||||
client.admin().cluster()::cancelTasks);
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates status of the persistent task.
|
||||
* Notifies the master node that the state of a persistent task has changed.
|
||||
* <p>
|
||||
* Persistent task implementers shouldn't call this method directly and use
|
||||
* {@link AllocatedPersistentTask#updatePersistentStatus} instead
|
||||
*/
|
||||
void updateStatus(String taskId, long allocationId, Task.Status status, ActionListener<PersistentTask<?>> listener) {
|
||||
UpdatePersistentTaskStatusAction.Request updateStatusRequest =
|
||||
new UpdatePersistentTaskStatusAction.Request(taskId, allocationId, status);
|
||||
try {
|
||||
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, UpdatePersistentTaskStatusAction.INSTANCE, updateStatusRequest,
|
||||
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
void updateStatus(final String taskId,
|
||||
final long taskAllocationID,
|
||||
final Task.Status status,
|
||||
final ActionListener<PersistentTask<?>> listener) {
|
||||
UpdatePersistentTaskStatusAction.Request request = new UpdatePersistentTaskStatusAction.Request(taskId, taskAllocationID, status);
|
||||
execute(request, UpdatePersistentTaskStatusAction.INSTANCE, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels if needed and removes a persistent task
|
||||
* Notifies the master node to remove a persistent task from the cluster state
|
||||
*/
|
||||
public void cancelPersistentTask(String taskId, ActionListener<PersistentTask<?>> listener) {
|
||||
RemovePersistentTaskAction.Request removeRequest = new RemovePersistentTaskAction.Request(taskId);
|
||||
try {
|
||||
executeAsyncWithOrigin(client, PERSISTENT_TASK_ORIGIN, RemovePersistentTaskAction.INSTANCE, removeRequest,
|
||||
ActionListener.wrap(o -> listener.onResponse(o.getTask()), listener::onFailure));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
public void sendRemoveRequest(final String taskId, final ActionListener<PersistentTask<?>> listener) {
|
||||
RemovePersistentTaskAction.Request request = new RemovePersistentTaskAction.Request(taskId);
|
||||
execute(request, RemovePersistentTaskAction.INSTANCE, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if the persistent task with giving id (taskId) has the desired state and if it doesn't
|
||||
* waits of it.
|
||||
* Executes an asynchronous persistent task action using the client.
|
||||
* <p>
|
||||
* The origin is set in the context and the listener is wrapped to ensure the proper context is restored
|
||||
*/
|
||||
public void waitForPersistentTaskStatus(String taskId, Predicate<PersistentTask<?>> predicate, @Nullable TimeValue timeout,
|
||||
WaitForPersistentTaskStatusListener<?> listener) {
|
||||
ClusterStateObserver stateObserver = new ClusterStateObserver(clusterService, timeout, logger, threadPool.getThreadContext());
|
||||
if (predicate.test(PersistentTasksCustomMetaData.getTaskWithId(stateObserver.setAndGetObservedState(), taskId))) {
|
||||
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(stateObserver.setAndGetObservedState(), taskId));
|
||||
private <Req extends ActionRequest, Resp extends PersistentTaskResponse, Builder extends ActionRequestBuilder<Req, Resp, Builder>>
|
||||
void execute(final Req request, final Action<Req, Resp, Builder> action, final ActionListener<PersistentTask<?>> listener) {
|
||||
try {
|
||||
final ThreadContext threadContext = client.threadPool().getThreadContext();
|
||||
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
|
||||
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, PERSISTENT_TASK_ORIGIN)) {
|
||||
client.execute(action, request,
|
||||
new ContextPreservingActionListener<>(supplier,
|
||||
ActionListener.wrap(r -> listener.onResponse(r.getTask()), listener::onFailure)));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for a given persistent task to comply with a given predicate, then call back the listener accordingly.
|
||||
*
|
||||
* @param taskId the persistent task id
|
||||
* @param predicate the persistent task predicate to evaluate
|
||||
* @param timeout a timeout for waiting
|
||||
* @param listener the callback listener
|
||||
*/
|
||||
public void waitForPersistentTaskCondition(final String taskId,
|
||||
final Predicate<PersistentTask<?>> predicate,
|
||||
final @Nullable TimeValue timeout,
|
||||
final WaitForPersistentTaskListener<?> listener) {
|
||||
final Predicate<ClusterState> clusterStatePredicate = clusterState ->
|
||||
predicate.test(PersistentTasksCustomMetaData.getTaskWithId(clusterState, taskId));
|
||||
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, timeout, logger, threadPool.getThreadContext());
|
||||
final ClusterState clusterState = observer.setAndGetObservedState();
|
||||
if (clusterStatePredicate.test(clusterState)) {
|
||||
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(clusterState, taskId));
|
||||
} else {
|
||||
stateObserver.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
listener.onResponse(PersistentTasksCustomMetaData.getTaskWithId(state, taskId));
|
||||
|
@ -163,18 +188,28 @@ public class PersistentTasksService extends AbstractComponent {
|
|||
public void onTimeout(TimeValue timeout) {
|
||||
listener.onTimeout(timeout);
|
||||
}
|
||||
}, clusterState -> predicate.test(PersistentTasksCustomMetaData.getTaskWithId(clusterState, taskId)));
|
||||
}, clusterStatePredicate);
|
||||
}
|
||||
}
|
||||
|
||||
public void waitForPersistentTasksStatus(Predicate<PersistentTasksCustomMetaData> predicate,
|
||||
@Nullable TimeValue timeout, ActionListener<Boolean> listener) {
|
||||
ClusterStateObserver stateObserver = new ClusterStateObserver(clusterService, timeout,
|
||||
logger, threadPool.getThreadContext());
|
||||
if (predicate.test(stateObserver.setAndGetObservedState().metaData().custom(PersistentTasksCustomMetaData.TYPE))) {
|
||||
/**
|
||||
* Waits for persistent tasks to comply with a given predicate, then call back the listener accordingly.
|
||||
*
|
||||
* @param predicate the predicate to evaluate
|
||||
* @param timeout a timeout for waiting
|
||||
* @param listener the callback listener
|
||||
*/
|
||||
public void waitForPersistentTasksCondition(final Predicate<PersistentTasksCustomMetaData> predicate,
|
||||
final @Nullable TimeValue timeout,
|
||||
final ActionListener<Boolean> listener) {
|
||||
final Predicate<ClusterState> clusterStatePredicate = clusterState ->
|
||||
predicate.test(clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE));
|
||||
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, timeout, logger, threadPool.getThreadContext());
|
||||
if (clusterStatePredicate.test(observer.setAndGetObservedState())) {
|
||||
listener.onResponse(true);
|
||||
} else {
|
||||
stateObserver.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
listener.onResponse(true);
|
||||
|
@ -187,45 +222,15 @@ public class PersistentTasksService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
listener.onFailure(new IllegalStateException("timed out after " + timeout));
|
||||
listener.onFailure(new IllegalStateException("Timed out when waiting for persistent tasks after " + timeout));
|
||||
}
|
||||
}, clusterState -> predicate.test(clusterState.metaData().custom(PersistentTasksCustomMetaData.TYPE)), timeout);
|
||||
}, clusterStatePredicate, timeout);
|
||||
}
|
||||
}
|
||||
|
||||
public interface WaitForPersistentTaskStatusListener<Params extends PersistentTaskParams>
|
||||
extends ActionListener<PersistentTask<Params>> {
|
||||
public interface WaitForPersistentTaskListener<P extends PersistentTaskParams> extends ActionListener<PersistentTask<P>> {
|
||||
default void onTimeout(TimeValue timeout) {
|
||||
onFailure(new IllegalStateException("timed out after " + timeout));
|
||||
}
|
||||
}
|
||||
|
||||
private static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin";
|
||||
private static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks";
|
||||
|
||||
/**
|
||||
* Executes a consumer after setting the origin and wrapping the listener so that the proper context is restored
|
||||
*/
|
||||
public static <Request extends ActionRequest, Response extends ActionResponse> void executeAsyncWithOrigin(
|
||||
ThreadContext threadContext, String origin, Request request, ActionListener<Response> listener,
|
||||
BiConsumer<Request, ActionListener<Response>> consumer) {
|
||||
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) {
|
||||
consumer.accept(request, new ContextPreservingActionListener<>(supplier, listener));
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Executes an asynchronous action using the provided client. The origin is set in the context and the listener
|
||||
* is wrapped to ensure the proper context is restored
|
||||
*/
|
||||
public static <Request extends ActionRequest, Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void executeAsyncWithOrigin(
|
||||
Client client, String origin, Action<Request, Response, RequestBuilder> action, Request request,
|
||||
ActionListener<Response> listener) {
|
||||
final ThreadContext threadContext = client.threadPool().getThreadContext();
|
||||
final Supplier<ThreadContext.StoredContext> supplier = threadContext.newRestorableContext(false);
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(threadContext, origin)) {
|
||||
client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener));
|
||||
onFailure(new IllegalStateException("Timed out when waiting for persistent task after " + timeout));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -234,5 +239,4 @@ public class PersistentTasksService extends AbstractComponent {
|
|||
threadContext.putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin);
|
||||
return storedContext;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,14 @@
|
|||
package org.elasticsearch.script;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
|
@ -25,14 +34,6 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
/**
|
||||
* Exception from a scripting engine.
|
||||
* <p>
|
||||
|
@ -132,4 +133,9 @@ public class ScriptException extends ElasticsearchException {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestStatus status() {
|
||||
return RestStatus.BAD_REQUEST;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,49 +24,93 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.ObjectArray;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.StringFieldType;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.LongConsumer;
|
||||
|
||||
/**
|
||||
* A {@link SingleDimensionValuesSource} for binary source ({@link BytesRef}).
|
||||
*/
|
||||
class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
||||
private final LongConsumer breakerConsumer;
|
||||
private final CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc;
|
||||
private final BytesRef[] values;
|
||||
private ObjectArray<BytesRef> values;
|
||||
private ObjectArray<BytesRefBuilder> valueBuilders;
|
||||
private BytesRef currentValue;
|
||||
|
||||
BinaryValuesSource(MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc,
|
||||
DocValueFormat format, Object missing, int size, int reverseMul) {
|
||||
super(format, fieldType, missing, size, reverseMul);
|
||||
BinaryValuesSource(BigArrays bigArrays, LongConsumer breakerConsumer,
|
||||
MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc,
|
||||
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
|
||||
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
|
||||
this.breakerConsumer = breakerConsumer;
|
||||
this.docValuesFunc = docValuesFunc;
|
||||
this.values = new BytesRef[size];
|
||||
this.values = bigArrays.newObjectArray(Math.min(size, 100));
|
||||
this.valueBuilders = bigArrays.newObjectArray(Math.min(size, 100));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyCurrent(int slot) {
|
||||
values[slot] = BytesRef.deepCopyOf(currentValue);
|
||||
void copyCurrent(int slot) {
|
||||
values = bigArrays.grow(values, slot+1);
|
||||
valueBuilders = bigArrays.grow(valueBuilders, slot+1);
|
||||
BytesRefBuilder builder = valueBuilders.get(slot);
|
||||
int byteSize = builder == null ? 0 : builder.bytes().length;
|
||||
if (builder == null) {
|
||||
builder = new BytesRefBuilder();
|
||||
valueBuilders.set(slot, builder);
|
||||
}
|
||||
if (missingBucket && currentValue == null) {
|
||||
values.set(slot, null);
|
||||
} else {
|
||||
assert currentValue != null;
|
||||
builder.copyBytes(currentValue);
|
||||
breakerConsumer.accept(builder.bytes().length - byteSize);
|
||||
values.set(slot, builder.get());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(int from, int to) {
|
||||
return compareValues(values[from], values[to]);
|
||||
int compare(int from, int to) {
|
||||
if (missingBucket) {
|
||||
if (values.get(from) == null) {
|
||||
return values.get(to) == null ? 0 : -1 * reverseMul;
|
||||
} else if (values.get(to) == null) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(values.get(from), values.get(to));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrent(int slot) {
|
||||
return compareValues(currentValue, values[slot]);
|
||||
if (missingBucket) {
|
||||
if (currentValue == null) {
|
||||
return values.get(slot) == null ? 0 : -1 * reverseMul;
|
||||
} else if (values.get(slot) == null) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, values.get(slot));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrentWithAfter() {
|
||||
if (missingBucket) {
|
||||
if (currentValue == null) {
|
||||
return afterValue == null ? 0 : -1 * reverseMul;
|
||||
} else if (afterValue == null) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, afterValue);
|
||||
}
|
||||
|
||||
|
@ -76,7 +120,9 @@ class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
|
||||
@Override
|
||||
void setAfter(Comparable<?> value) {
|
||||
if (value.getClass() == String.class) {
|
||||
if (missingBucket && value == null) {
|
||||
afterValue = null;
|
||||
} else if (value.getClass() == String.class) {
|
||||
afterValue = format.parseBytesRef(value.toString());
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
|
||||
|
@ -85,7 +131,7 @@ class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
|
||||
@Override
|
||||
BytesRef toComparable(int slot) {
|
||||
return values[slot];
|
||||
return values.get(slot);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,6 +146,9 @@ class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
currentValue = dvs.nextValue();
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
} else if (missingBucket) {
|
||||
currentValue = null;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -130,5 +179,7 @@ class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
public void close() {
|
||||
Releasables.close(values, valueBuilders);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.composite;
|
||||
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.LongArray;
|
||||
|
||||
/**
|
||||
* A bit array that is implemented using a growing {@link LongArray}
|
||||
* created from {@link BigArrays}.
|
||||
* The underlying long array grows lazily based on the biggest index
|
||||
* that needs to be set.
|
||||
*/
|
||||
final class BitArray implements Releasable {
|
||||
private final BigArrays bigArrays;
|
||||
private LongArray bits;
|
||||
|
||||
BitArray(BigArrays bigArrays, int initialSize) {
|
||||
this.bigArrays = bigArrays;
|
||||
this.bits = bigArrays.newLongArray(initialSize, true);
|
||||
}
|
||||
|
||||
public void set(int index) {
|
||||
fill(index, true);
|
||||
}
|
||||
|
||||
public void clear(int index) {
|
||||
fill(index, false);
|
||||
}
|
||||
|
||||
public boolean get(int index) {
|
||||
int wordNum = index >> 6;
|
||||
long bitmask = 1L << index;
|
||||
return (bits.get(wordNum) & bitmask) != 0;
|
||||
}
|
||||
|
||||
private void fill(int index, boolean bit) {
|
||||
int wordNum = index >> 6;
|
||||
bits = bigArrays.grow(bits,wordNum+1);
|
||||
long bitmask = 1L << index;
|
||||
long value = bit ? bits.get(wordNum) | bitmask : bits.get(wordNum) & ~bitmask;
|
||||
bits.set(wordNum, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(bits);
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.search.aggregations.bucket.composite;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation;
|
||||
|
||||
|
@ -66,11 +65,7 @@ public interface CompositeAggregation extends MultiBucketsAggregation {
|
|||
static void buildCompositeMap(String fieldName, Map<String, Object> composite, XContentBuilder builder) throws IOException {
|
||||
builder.startObject(fieldName);
|
||||
for (Map.Entry<String, Object> entry : composite.entrySet()) {
|
||||
if (entry.getValue().getClass() == BytesRef.class) {
|
||||
builder.field(entry.getKey(), ((BytesRef) entry.getValue()).utf8ToString());
|
||||
} else {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
|
|
@ -170,7 +170,9 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder<Comp
|
|||
throw new IllegalArgumentException("Missing value for [after." + sources.get(i).name() + "]");
|
||||
}
|
||||
Object obj = after.get(sourceName);
|
||||
if (obj instanceof Comparable) {
|
||||
if (configs[i].missingBucket() && obj == null) {
|
||||
values[i] = null;
|
||||
} else if (obj instanceof Comparable) {
|
||||
values[i] = (Comparable<?>) obj;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Invalid value for [after." + sources.get(i).name() +
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.RoaringDocIdSet;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
|
@ -50,6 +51,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.LongUnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
final class CompositeAggregator extends BucketsAggregator {
|
||||
|
@ -59,9 +61,10 @@ final class CompositeAggregator extends BucketsAggregator {
|
|||
private final int[] reverseMuls;
|
||||
private final List<DocValueFormat> formats;
|
||||
|
||||
private final SingleDimensionValuesSource<?>[] sources;
|
||||
private final CompositeValuesCollectorQueue queue;
|
||||
|
||||
private final List<Entry> entries;
|
||||
private final List<Entry> entries = new ArrayList<>();
|
||||
private LeafReaderContext currentLeaf;
|
||||
private RoaringDocIdSet.Builder docIdSetBuilder;
|
||||
private BucketCollector deferredCollectors;
|
||||
|
@ -74,19 +77,19 @@ final class CompositeAggregator extends BucketsAggregator {
|
|||
this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList());
|
||||
this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
|
||||
this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList());
|
||||
final SingleDimensionValuesSource<?>[] sources =
|
||||
createValuesSources(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs, size);
|
||||
this.queue = new CompositeValuesCollectorQueue(sources, size);
|
||||
this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query());
|
||||
if (rawAfterKey != null) {
|
||||
queue.setAfter(rawAfterKey.values());
|
||||
this.sources = new SingleDimensionValuesSource[sourceConfigs.length];
|
||||
for (int i = 0; i < sourceConfigs.length; i++) {
|
||||
this.sources[i] = createValuesSource(context.bigArrays(), context.searcher().getIndexReader(),
|
||||
context.query(), sourceConfigs[i], size, i);
|
||||
}
|
||||
this.entries = new ArrayList<>();
|
||||
this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey);
|
||||
this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
Releasables.close(queue);
|
||||
Releasables.close(sources);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -256,94 +259,88 @@ final class CompositeAggregator extends BucketsAggregator {
|
|||
};
|
||||
}
|
||||
|
||||
private static SingleDimensionValuesSource<?>[] createValuesSources(BigArrays bigArrays, IndexReader reader, Query query,
|
||||
CompositeValuesSourceConfig[] configs, int size) {
|
||||
final SingleDimensionValuesSource<?>[] sources = new SingleDimensionValuesSource[configs.length];
|
||||
for (int i = 0; i < sources.length; i++) {
|
||||
final int reverseMul = configs[i].reverseMul();
|
||||
if (configs[i].valuesSource() instanceof ValuesSource.Bytes.WithOrdinals && reader instanceof DirectoryReader) {
|
||||
ValuesSource.Bytes.WithOrdinals vs = (ValuesSource.Bytes.WithOrdinals) configs[i].valuesSource();
|
||||
sources[i] = new GlobalOrdinalValuesSource(
|
||||
private SingleDimensionValuesSource<?> createValuesSource(BigArrays bigArrays, IndexReader reader, Query query,
|
||||
CompositeValuesSourceConfig config, int sortRank, int size) {
|
||||
|
||||
final int reverseMul = config.reverseMul();
|
||||
if (config.valuesSource() instanceof ValuesSource.Bytes.WithOrdinals && reader instanceof DirectoryReader) {
|
||||
ValuesSource.Bytes.WithOrdinals vs = (ValuesSource.Bytes.WithOrdinals) config.valuesSource();
|
||||
SingleDimensionValuesSource<?> source = new GlobalOrdinalValuesSource(
|
||||
bigArrays,
|
||||
config.fieldType(),
|
||||
vs::globalOrdinalsValues,
|
||||
config.format(),
|
||||
config.missingBucket(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
if (sortRank == 0 && source.createSortedDocsProducerOrNull(reader, query) != null) {
|
||||
// this the leading source and we can optimize it with the sorted docs producer but
|
||||
// we don't want to use global ordinals because the number of visited documents
|
||||
// should be low and global ordinals need one lookup per visited term.
|
||||
Releasables.close(source);
|
||||
return new BinaryValuesSource(
|
||||
bigArrays,
|
||||
configs[i].fieldType(),
|
||||
vs::globalOrdinalsValues,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
if (i == 0 && sources[i].createSortedDocsProducerOrNull(reader, query) != null) {
|
||||
// this the leading source and we can optimize it with the sorted docs producer but
|
||||
// we don't want to use global ordinals because the number of visited documents
|
||||
// should be low and global ordinals need one lookup per visited term.
|
||||
Releasables.close(sources[i]);
|
||||
sources[i] = new BinaryValuesSource(
|
||||
configs[i].fieldType(),
|
||||
vs::bytesValues,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
}
|
||||
} else if (configs[i].valuesSource() instanceof ValuesSource.Bytes) {
|
||||
ValuesSource.Bytes vs = (ValuesSource.Bytes) configs[i].valuesSource();
|
||||
sources[i] = new BinaryValuesSource(
|
||||
configs[i].fieldType(),
|
||||
this::addRequestCircuitBreakerBytes,
|
||||
config.fieldType(),
|
||||
vs::bytesValues,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
config.format(),
|
||||
config.missingBucket(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
} else {
|
||||
return source;
|
||||
}
|
||||
} else if (config.valuesSource() instanceof ValuesSource.Bytes) {
|
||||
ValuesSource.Bytes vs = (ValuesSource.Bytes) config.valuesSource();
|
||||
return new BinaryValuesSource(
|
||||
bigArrays,
|
||||
this::addRequestCircuitBreakerBytes,
|
||||
config.fieldType(),
|
||||
vs::bytesValues,
|
||||
config.format(),
|
||||
config.missingBucket(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
} else if (config.valuesSource() instanceof ValuesSource.Numeric) {
|
||||
final ValuesSource.Numeric vs = (ValuesSource.Numeric) config.valuesSource();
|
||||
if (vs.isFloatingPoint()) {
|
||||
return new DoubleValuesSource(
|
||||
bigArrays,
|
||||
config.fieldType(),
|
||||
vs::doubleValues,
|
||||
config.format(),
|
||||
config.missingBucket(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
} else if (configs[i].valuesSource() instanceof ValuesSource.Numeric) {
|
||||
final ValuesSource.Numeric vs = (ValuesSource.Numeric) configs[i].valuesSource();
|
||||
if (vs.isFloatingPoint()) {
|
||||
sources[i] = new DoubleValuesSource(
|
||||
bigArrays,
|
||||
configs[i].fieldType(),
|
||||
vs::doubleValues,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
} else {
|
||||
if (vs instanceof RoundingValuesSource) {
|
||||
sources[i] = new LongValuesSource(
|
||||
bigArrays,
|
||||
configs[i].fieldType(),
|
||||
vs::longValues,
|
||||
((RoundingValuesSource) vs)::round,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
} else {
|
||||
sources[i] = new LongValuesSource(
|
||||
bigArrays,
|
||||
configs[i].fieldType(),
|
||||
vs::longValues,
|
||||
(value) -> value,
|
||||
configs[i].format(),
|
||||
configs[i].missing(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unknown value source: " + configs[i].valuesSource().getClass().getName() +
|
||||
" for field: " + sources[i].fieldType.name());
|
||||
final LongUnaryOperator rounding;
|
||||
if (vs instanceof RoundingValuesSource) {
|
||||
rounding = ((RoundingValuesSource) vs)::round;
|
||||
} else {
|
||||
rounding = LongUnaryOperator.identity();
|
||||
}
|
||||
return new LongValuesSource(
|
||||
bigArrays,
|
||||
config.fieldType(),
|
||||
vs::longValues,
|
||||
rounding,
|
||||
config.format(),
|
||||
config.missingBucket(),
|
||||
size,
|
||||
reverseMul
|
||||
);
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException("Unknown values source type: " + config.valuesSource().getClass().getName() +
|
||||
" for source: " + config.name());
|
||||
}
|
||||
return sources;
|
||||
}
|
||||
|
||||
private static class Entry {
|
||||
|
|
|
@ -22,10 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.composite;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.IntArray;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
|
@ -36,29 +37,33 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
// the slot for the current candidate
|
||||
private static final int CANDIDATE_SLOT = Integer.MAX_VALUE;
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
private final int maxSize;
|
||||
private final TreeMap<Integer, Integer> keys;
|
||||
private final SingleDimensionValuesSource<?>[] arrays;
|
||||
private final int[] docCounts;
|
||||
private boolean afterValueSet = false;
|
||||
private IntArray docCounts;
|
||||
private boolean afterKeyIsSet = false;
|
||||
|
||||
/**
|
||||
* Constructs a composite queue with the specified size and sources.
|
||||
*
|
||||
* @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets.
|
||||
* @param size The number of composite buckets to keep.
|
||||
* @param afterKey
|
||||
*/
|
||||
CompositeValuesCollectorQueue(SingleDimensionValuesSource<?>[] sources, int size) {
|
||||
CompositeValuesCollectorQueue(BigArrays bigArrays, SingleDimensionValuesSource<?>[] sources, int size, CompositeKey afterKey) {
|
||||
this.bigArrays = bigArrays;
|
||||
this.maxSize = size;
|
||||
this.arrays = sources;
|
||||
this.docCounts = new int[size];
|
||||
this.keys = new TreeMap<>(this::compare);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
keys.clear();
|
||||
Arrays.fill(docCounts, 0);
|
||||
afterValueSet = false;
|
||||
if (afterKey != null) {
|
||||
assert afterKey.size() == sources.length;
|
||||
afterKeyIsSet = true;
|
||||
for (int i = 0; i < afterKey.size(); i++) {
|
||||
sources[i].setAfter(afterKey.get(i));
|
||||
}
|
||||
}
|
||||
this.docCounts = bigArrays.newIntArray(1, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -94,7 +99,7 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
* Returns the lowest value (exclusive) of the leading source.
|
||||
*/
|
||||
Comparable<?> getLowerValueLeadSource() {
|
||||
return afterValueSet ? arrays[0].getAfter() : null;
|
||||
return afterKeyIsSet ? arrays[0].getAfter() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -107,7 +112,7 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
* Returns the document count in <code>slot</code>.
|
||||
*/
|
||||
int getDocCount(int slot) {
|
||||
return docCounts[slot];
|
||||
return docCounts.get(slot);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -117,7 +122,8 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
for (int i = 0; i < arrays.length; i++) {
|
||||
arrays[i].copyCurrent(slot);
|
||||
}
|
||||
docCounts[slot] = 1;
|
||||
docCounts = bigArrays.grow(docCounts, slot+1);
|
||||
docCounts.set(slot, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,17 +140,6 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the after values for this comparator.
|
||||
*/
|
||||
void setAfter(Comparable<?>[] values) {
|
||||
assert values.length == arrays.length;
|
||||
afterValueSet = true;
|
||||
for (int i = 0; i < arrays.length; i++) {
|
||||
arrays[i].setAfter(values[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares the after values with the values in <code>slot</code>.
|
||||
*/
|
||||
|
@ -207,10 +202,10 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
Integer topSlot = compareCurrent();
|
||||
if (topSlot != null) {
|
||||
// this key is already in the top N, skip it
|
||||
docCounts[topSlot] += 1;
|
||||
docCounts.increment(topSlot, 1);
|
||||
return topSlot;
|
||||
}
|
||||
if (afterValueSet && compareCurrentWithAfter() <= 0) {
|
||||
if (afterKeyIsSet && compareCurrentWithAfter() <= 0) {
|
||||
// this key is greater than the top value collected in the previous round, skip it
|
||||
return -1;
|
||||
}
|
||||
|
@ -239,9 +234,8 @@ final class CompositeValuesCollectorQueue implements Releasable {
|
|||
return newSlot;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(arrays);
|
||||
Releasables.close(docCounts);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,11 +40,12 @@ import java.util.Objects;
|
|||
* A {@link ValuesSource} builder for {@link CompositeAggregationBuilder}
|
||||
*/
|
||||
public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSourceBuilder<AB>> implements Writeable, ToXContentFragment {
|
||||
|
||||
protected final String name;
|
||||
private String field = null;
|
||||
private Script script = null;
|
||||
private ValueType valueType = null;
|
||||
private Object missing = null;
|
||||
private boolean missingBucket = false;
|
||||
private SortOrder order = SortOrder.ASC;
|
||||
private String format = null;
|
||||
|
||||
|
@ -66,7 +67,15 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
if (in.readBoolean()) {
|
||||
this.valueType = ValueType.readFromStream(in);
|
||||
}
|
||||
this.missing = in.readGenericValue();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
this.missingBucket = in.readBoolean();
|
||||
} else {
|
||||
this.missingBucket = false;
|
||||
}
|
||||
if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||
// skip missing value for BWC
|
||||
in.readGenericValue();
|
||||
}
|
||||
this.order = SortOrder.readFromStream(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
this.format = in.readOptionalString();
|
||||
|
@ -89,7 +98,13 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
if (hasValueType) {
|
||||
valueType.writeTo(out);
|
||||
}
|
||||
out.writeGenericValue(missing);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeBoolean(missingBucket);
|
||||
}
|
||||
if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||
// write missing value for BWC
|
||||
out.writeGenericValue(null);
|
||||
}
|
||||
order.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
out.writeOptionalString(format);
|
||||
|
@ -110,9 +125,7 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
if (script != null) {
|
||||
builder.field("script", script);
|
||||
}
|
||||
if (missing != null) {
|
||||
builder.field("missing", missing);
|
||||
}
|
||||
builder.field("missing_bucket", missingBucket);
|
||||
if (valueType != null) {
|
||||
builder.field("value_type", valueType.getPreferredName());
|
||||
}
|
||||
|
@ -127,7 +140,7 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Objects.hash(field, missing, script, valueType, order, format, innerHashCode());
|
||||
return Objects.hash(field, missingBucket, script, valueType, order, format, innerHashCode());
|
||||
}
|
||||
|
||||
protected abstract int innerHashCode();
|
||||
|
@ -142,7 +155,7 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
return Objects.equals(field, that.field()) &&
|
||||
Objects.equals(script, that.script()) &&
|
||||
Objects.equals(valueType, that.valueType()) &&
|
||||
Objects.equals(missing, that.missing()) &&
|
||||
Objects.equals(missingBucket, that.missingBucket()) &&
|
||||
Objects.equals(order, that.order()) &&
|
||||
Objects.equals(format, that.format()) &&
|
||||
innerEquals(that);
|
||||
|
@ -214,20 +227,20 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the value to use when the source finds a missing value in a
|
||||
* document
|
||||
* If true an explicit `null bucket will represent documents with missing values.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public AB missing(Object missing) {
|
||||
if (missing == null) {
|
||||
throw new IllegalArgumentException("[missing] must not be null");
|
||||
}
|
||||
this.missing = missing;
|
||||
public AB missingBucket(boolean missingBucket) {
|
||||
this.missingBucket = missingBucket;
|
||||
return (AB) this;
|
||||
}
|
||||
|
||||
public Object missing() {
|
||||
return missing;
|
||||
/**
|
||||
* False if documents with missing values are ignored, otherwise missing values are
|
||||
* represented by an explicit `null` value.
|
||||
*/
|
||||
public boolean missingBucket() {
|
||||
return missingBucket;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -290,13 +303,13 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
|
|||
|
||||
public final CompositeValuesSourceConfig build(SearchContext context) throws IOException {
|
||||
ValuesSourceConfig<?> config = ValuesSourceConfig.resolve(context.getQueryShardContext(),
|
||||
valueType, field, script, missing, null, format);
|
||||
valueType, field, script, null,null, format);
|
||||
|
||||
if (config.unmapped() && field != null && config.missing() == null) {
|
||||
if (config.unmapped() && field != null && missingBucket == false) {
|
||||
// this source cannot produce any values so we refuse to build
|
||||
// since composite buckets are not created on null values
|
||||
// since composite buckets are not created on null values by default.
|
||||
throw new QueryShardException(context.getQueryShardContext(),
|
||||
"failed to find field [" + field + "] and [missing] is not provided");
|
||||
"failed to find field [" + field + "] and [missing_bucket] is not set");
|
||||
}
|
||||
return innerBuild(context, config);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ class CompositeValuesSourceConfig {
|
|||
private final ValuesSource vs;
|
||||
private final DocValueFormat format;
|
||||
private final int reverseMul;
|
||||
private final Object missing;
|
||||
private final boolean missingBucket;
|
||||
|
||||
/**
|
||||
* Creates a new {@link CompositeValuesSourceConfig}.
|
||||
|
@ -41,16 +41,15 @@ class CompositeValuesSourceConfig {
|
|||
* @param vs The underlying {@link ValuesSource}.
|
||||
* @param format The {@link DocValueFormat} of this source.
|
||||
* @param order The sort order associated with this source.
|
||||
* @param missing The missing value or null if documents with missing value should be ignored.
|
||||
*/
|
||||
CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format,
|
||||
SortOrder order, @Nullable Object missing) {
|
||||
SortOrder order, boolean missingBucket) {
|
||||
this.name = name;
|
||||
this.fieldType = fieldType;
|
||||
this.vs = vs;
|
||||
this.format = format;
|
||||
this.reverseMul = order == SortOrder.ASC ? 1 : -1;
|
||||
this.missing = missing;
|
||||
this.missingBucket = missingBucket;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -83,10 +82,10 @@ class CompositeValuesSourceConfig {
|
|||
}
|
||||
|
||||
/**
|
||||
* The missing value for this configuration or null if documents with missing value should be ignored.
|
||||
* If true, an explicit `null bucket represents documents with missing values.
|
||||
*/
|
||||
Object missing() {
|
||||
return missing;
|
||||
boolean missingBucket() {
|
||||
return missingBucket;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -38,9 +38,7 @@ class CompositeValuesSourceParserHelper {
|
|||
ValueType targetValueType) {
|
||||
objectParser.declareField(VB::field, XContentParser::text,
|
||||
new ParseField("field"), ObjectParser.ValueType.STRING);
|
||||
|
||||
objectParser.declareField(VB::missing, XContentParser::objectText,
|
||||
new ParseField("missing"), ObjectParser.ValueType.VALUE);
|
||||
objectParser.declareBoolean(VB::missingBucket, new ParseField("missing_bucket"));
|
||||
|
||||
objectParser.declareField(VB::valueType, p -> {
|
||||
ValueType valueType = ValueType.resolveForScript(p.text());
|
||||
|
|
|
@ -226,7 +226,7 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild
|
|||
// is specified in the builder.
|
||||
final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format();
|
||||
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order(), missing());
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order(), missingBucket());
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
|
||||
}
|
||||
|
|
|
@ -38,34 +38,67 @@ import java.io.IOException;
|
|||
*/
|
||||
class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
|
||||
private final CheckedFunction<LeafReaderContext, SortedNumericDoubleValues, IOException> docValuesFunc;
|
||||
private final DoubleArray values;
|
||||
private final BitArray bits;
|
||||
private DoubleArray values;
|
||||
private double currentValue;
|
||||
private boolean missingCurrentValue;
|
||||
|
||||
DoubleValuesSource(BigArrays bigArrays, MappedFieldType fieldType,
|
||||
CheckedFunction<LeafReaderContext, SortedNumericDoubleValues, IOException> docValuesFunc,
|
||||
DocValueFormat format, Object missing, int size, int reverseMul) {
|
||||
super(format, fieldType, missing, size, reverseMul);
|
||||
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
|
||||
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
|
||||
this.docValuesFunc = docValuesFunc;
|
||||
this.values = bigArrays.newDoubleArray(size, false);
|
||||
this.bits = missingBucket ? new BitArray(bigArrays, 100) : null;
|
||||
this.values = bigArrays.newDoubleArray(Math.min(size, 100), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
void copyCurrent(int slot) {
|
||||
values.set(slot, currentValue);
|
||||
values = bigArrays.grow(values, slot+1);
|
||||
if (missingBucket && missingCurrentValue) {
|
||||
bits.clear(slot);
|
||||
} else {
|
||||
assert missingCurrentValue == false;
|
||||
if (missingBucket) {
|
||||
bits.set(slot);
|
||||
}
|
||||
values.set(slot, currentValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
int compare(int from, int to) {
|
||||
if (missingBucket) {
|
||||
if (bits.get(from) == false) {
|
||||
return bits.get(to) ? -1 * reverseMul : 0;
|
||||
} else if (bits.get(to) == false) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(values.get(from), values.get(to));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrent(int slot) {
|
||||
if (missingBucket) {
|
||||
if (missingCurrentValue) {
|
||||
return bits.get(slot) ? -1 * reverseMul : 0;
|
||||
} else if (bits.get(slot) == false) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, values.get(slot));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrentWithAfter() {
|
||||
if (missingBucket) {
|
||||
if (missingCurrentValue) {
|
||||
return afterValue != null ? -1 * reverseMul : 0;
|
||||
} else if (afterValue == null) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, afterValue);
|
||||
}
|
||||
|
||||
|
@ -75,7 +108,9 @@ class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
|
|||
|
||||
@Override
|
||||
void setAfter(Comparable<?> value) {
|
||||
if (value instanceof Number) {
|
||||
if (missingBucket && value == null) {
|
||||
afterValue = null;
|
||||
} else if (value instanceof Number) {
|
||||
afterValue = ((Number) value).doubleValue();
|
||||
} else {
|
||||
afterValue = format.parseDouble(value.toString(), false, () -> {
|
||||
|
@ -86,6 +121,10 @@ class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
|
|||
|
||||
@Override
|
||||
Double toComparable(int slot) {
|
||||
if (missingBucket && bits.get(slot) == false) {
|
||||
return null;
|
||||
}
|
||||
assert missingBucket == false || bits.get(slot);
|
||||
return values.get(slot);
|
||||
}
|
||||
|
||||
|
@ -99,8 +138,12 @@ class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
|
|||
int num = dvs.docValueCount();
|
||||
for (int i = 0; i < num; i++) {
|
||||
currentValue = dvs.nextValue();
|
||||
missingCurrentValue = false;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
} else if (missingBucket) {
|
||||
missingCurrentValue = true;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -127,6 +170,6 @@ class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(values);
|
||||
Releasables.close(values, bits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
|||
*/
|
||||
class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
||||
private final CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc;
|
||||
private final LongArray values;
|
||||
private LongArray values;
|
||||
private SortedSetDocValues lookup;
|
||||
private long currentValue;
|
||||
private Long afterValueGlobalOrd;
|
||||
|
@ -52,16 +52,17 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
private long lastLookupOrd = -1;
|
||||
private BytesRef lastLookupValue;
|
||||
|
||||
GlobalOrdinalValuesSource(BigArrays bigArrays,
|
||||
MappedFieldType type, CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc,
|
||||
DocValueFormat format, Object missing, int size, int reverseMul) {
|
||||
super(format, type, missing, size, reverseMul);
|
||||
GlobalOrdinalValuesSource(BigArrays bigArrays, MappedFieldType type,
|
||||
CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc,
|
||||
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
|
||||
super(bigArrays, format, type, missingBucket, size, reverseMul);
|
||||
this.docValuesFunc = docValuesFunc;
|
||||
this.values = bigArrays.newLongArray(size, false);
|
||||
this.values = bigArrays.newLongArray(Math.min(size, 100), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
void copyCurrent(int slot) {
|
||||
values = bigArrays.grow(values, slot+1);
|
||||
values.set(slot, currentValue);
|
||||
}
|
||||
|
||||
|
@ -89,7 +90,10 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
|
||||
@Override
|
||||
void setAfter(Comparable<?> value) {
|
||||
if (value.getClass() == String.class) {
|
||||
if (missingBucket && value == null) {
|
||||
afterValue = null;
|
||||
afterValueGlobalOrd = -1L;
|
||||
} else if (value.getClass() == String.class) {
|
||||
afterValue = format.parseBytesRef(value.toString());
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
|
||||
|
@ -99,10 +103,12 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
@Override
|
||||
BytesRef toComparable(int slot) throws IOException {
|
||||
long globalOrd = values.get(slot);
|
||||
if (globalOrd == lastLookupOrd) {
|
||||
if (missingBucket && globalOrd == -1) {
|
||||
return null;
|
||||
} else if (globalOrd == lastLookupOrd) {
|
||||
return lastLookupValue;
|
||||
} else {
|
||||
lastLookupOrd= globalOrd;
|
||||
lastLookupOrd = globalOrd;
|
||||
lastLookupValue = BytesRef.deepCopyOf(lookup.lookupOrd(values.get(slot)));
|
||||
return lastLookupValue;
|
||||
}
|
||||
|
@ -123,6 +129,9 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
currentValue = ord;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
} else if (missingBucket) {
|
||||
currentValue = -1;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -143,7 +152,7 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
|
|||
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
if (!currentValueIsSet) {
|
||||
if (currentValueIsSet == false) {
|
||||
if (dvs.advanceExact(doc)) {
|
||||
long ord;
|
||||
while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) {
|
||||
|
|
|
@ -115,7 +115,7 @@ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<H
|
|||
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
|
||||
final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval);
|
||||
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order(), missing());
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order(), missingBucket());
|
||||
} else {
|
||||
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
|
||||
}
|
||||
|
|
|
@ -332,6 +332,14 @@ public class InternalComposite
|
|||
@Override
|
||||
public int compareKey(InternalBucket other) {
|
||||
for (int i = 0; i < key.size(); i++) {
|
||||
if (key.get(i) == null) {
|
||||
if (other.key.get(i) == null) {
|
||||
continue;
|
||||
}
|
||||
return -1 * reverseMuls[i];
|
||||
} else if (other.key.get(i) == null) {
|
||||
return reverseMuls[i];
|
||||
}
|
||||
assert key.get(i).getClass() == other.key.get(i).getClass();
|
||||
@SuppressWarnings("unchecked")
|
||||
int cmp = ((Comparable) key.get(i)).compareTo(other.key.get(i)) * reverseMuls[i];
|
||||
|
@ -357,26 +365,29 @@ public class InternalComposite
|
|||
* for numbers and a string for {@link BytesRef}s.
|
||||
*/
|
||||
static Object formatObject(Object obj, DocValueFormat format) {
|
||||
if (obj == null) {
|
||||
return null;
|
||||
}
|
||||
if (obj.getClass() == BytesRef.class) {
|
||||
BytesRef value = (BytesRef) obj;
|
||||
if (format == DocValueFormat.RAW) {
|
||||
return value.utf8ToString();
|
||||
} else {
|
||||
return format.format((BytesRef) obj);
|
||||
return format.format(value);
|
||||
}
|
||||
} else if (obj.getClass() == Long.class) {
|
||||
Long value = (Long) obj;
|
||||
long value = (long) obj;
|
||||
if (format == DocValueFormat.RAW) {
|
||||
return value;
|
||||
} else {
|
||||
return format.format(value);
|
||||
}
|
||||
} else if (obj.getClass() == Double.class) {
|
||||
Double value = (Double) obj;
|
||||
double value = (double) obj;
|
||||
if (format == DocValueFormat.RAW) {
|
||||
return value;
|
||||
} else {
|
||||
return format.format((Double) obj);
|
||||
return format.format(value);
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
|
|
|
@ -45,38 +45,73 @@ import java.util.function.ToLongFunction;
|
|||
* A {@link SingleDimensionValuesSource} for longs.
|
||||
*/
|
||||
class LongValuesSource extends SingleDimensionValuesSource<Long> {
|
||||
private final BigArrays bigArrays;
|
||||
private final CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc;
|
||||
private final LongUnaryOperator rounding;
|
||||
|
||||
private final LongArray values;
|
||||
private BitArray bits;
|
||||
private LongArray values;
|
||||
private long currentValue;
|
||||
private boolean missingCurrentValue;
|
||||
|
||||
LongValuesSource(BigArrays bigArrays, MappedFieldType fieldType,
|
||||
CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc,
|
||||
LongUnaryOperator rounding, DocValueFormat format, Object missing, int size, int reverseMul) {
|
||||
super(format, fieldType, missing, size, reverseMul);
|
||||
LongValuesSource(BigArrays bigArrays,
|
||||
MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc,
|
||||
LongUnaryOperator rounding, DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
|
||||
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
|
||||
this.bigArrays = bigArrays;
|
||||
this.docValuesFunc = docValuesFunc;
|
||||
this.rounding = rounding;
|
||||
this.values = bigArrays.newLongArray(size, false);
|
||||
this.bits = missingBucket ? new BitArray(bigArrays, Math.min(size, 100)) : null;
|
||||
this.values = bigArrays.newLongArray(Math.min(size, 100), false);
|
||||
}
|
||||
|
||||
@Override
|
||||
void copyCurrent(int slot) {
|
||||
values.set(slot, currentValue);
|
||||
values = bigArrays.grow(values, slot+1);
|
||||
if (missingBucket && missingCurrentValue) {
|
||||
bits.clear(slot);
|
||||
} else {
|
||||
assert missingCurrentValue == false;
|
||||
if (missingBucket) {
|
||||
bits.set(slot);
|
||||
}
|
||||
values.set(slot, currentValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
int compare(int from, int to) {
|
||||
if (missingBucket) {
|
||||
if (bits.get(from) == false) {
|
||||
return bits.get(to) ? -1 * reverseMul : 0;
|
||||
} else if (bits.get(to) == false) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(values.get(from), values.get(to));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrent(int slot) {
|
||||
if (missingBucket) {
|
||||
if (missingCurrentValue) {
|
||||
return bits.get(slot) ? -1 * reverseMul : 0;
|
||||
} else if (bits.get(slot) == false) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, values.get(slot));
|
||||
}
|
||||
|
||||
@Override
|
||||
int compareCurrentWithAfter() {
|
||||
if (missingBucket) {
|
||||
if (missingCurrentValue) {
|
||||
return afterValue != null ? -1 * reverseMul : 0;
|
||||
} else if (afterValue == null) {
|
||||
return reverseMul;
|
||||
}
|
||||
}
|
||||
return compareValues(currentValue, afterValue);
|
||||
}
|
||||
|
||||
|
@ -86,7 +121,9 @@ class LongValuesSource extends SingleDimensionValuesSource<Long> {
|
|||
|
||||
@Override
|
||||
void setAfter(Comparable<?> value) {
|
||||
if (value instanceof Number) {
|
||||
if (missingBucket && value == null) {
|
||||
afterValue = null;
|
||||
} else if (value instanceof Number) {
|
||||
afterValue = ((Number) value).longValue();
|
||||
} else {
|
||||
// for date histogram source with "format", the after value is formatted
|
||||
|
@ -99,6 +136,9 @@ class LongValuesSource extends SingleDimensionValuesSource<Long> {
|
|||
|
||||
@Override
|
||||
Long toComparable(int slot) {
|
||||
if (missingBucket && bits.get(slot) == false) {
|
||||
return null;
|
||||
}
|
||||
return values.get(slot);
|
||||
}
|
||||
|
||||
|
@ -112,8 +152,12 @@ class LongValuesSource extends SingleDimensionValuesSource<Long> {
|
|||
int num = dvs.docValueCount();
|
||||
for (int i = 0; i < num; i++) {
|
||||
currentValue = dvs.nextValue();
|
||||
missingCurrentValue = false;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
} else if (missingBucket) {
|
||||
missingCurrentValue = true;
|
||||
next.collect(doc, bucket);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -182,6 +226,6 @@ class LongValuesSource extends SingleDimensionValuesSource<Long> {
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
Releasables.close(values);
|
||||
Releasables.close(values, bits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
|
@ -36,11 +37,11 @@ import java.io.IOException;
|
|||
* A source that can record and compare values of similar type.
|
||||
*/
|
||||
abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements Releasable {
|
||||
protected final BigArrays bigArrays;
|
||||
protected final DocValueFormat format;
|
||||
@Nullable
|
||||
protected final MappedFieldType fieldType;
|
||||
@Nullable
|
||||
protected final Object missing;
|
||||
protected final boolean missingBucket;
|
||||
|
||||
protected final int size;
|
||||
protected final int reverseMul;
|
||||
|
@ -50,17 +51,20 @@ abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements R
|
|||
/**
|
||||
* Creates a new {@link SingleDimensionValuesSource}.
|
||||
*
|
||||
* @param bigArrays The big arrays object.
|
||||
* @param format The format of the source.
|
||||
* @param fieldType The field type or null if the source is a script.
|
||||
* @param missing The missing value or null if documents with missing value should be ignored.
|
||||
* @param missingBucket If true, an explicit `null bucket represents documents with missing values.
|
||||
* @param size The number of values to record.
|
||||
* @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed.
|
||||
*/
|
||||
SingleDimensionValuesSource(DocValueFormat format, @Nullable MappedFieldType fieldType, @Nullable Object missing,
|
||||
SingleDimensionValuesSource(BigArrays bigArrays, DocValueFormat format,
|
||||
@Nullable MappedFieldType fieldType, boolean missingBucket,
|
||||
int size, int reverseMul) {
|
||||
this.bigArrays = bigArrays;
|
||||
this.format = format;
|
||||
this.fieldType = fieldType;
|
||||
this.missing = missing;
|
||||
this.missingBucket = missingBucket;
|
||||
this.size = size;
|
||||
this.reverseMul = reverseMul;
|
||||
this.afterValue = null;
|
||||
|
@ -138,7 +142,7 @@ abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements R
|
|||
*/
|
||||
protected boolean checkIfSortedDocsIsApplicable(IndexReader reader, MappedFieldType fieldType) {
|
||||
if (fieldType == null ||
|
||||
missing != null ||
|
||||
(missingBucket && afterValue == null) ||
|
||||
fieldType.indexOptions() == IndexOptions.NONE ||
|
||||
// inverse of the natural order
|
||||
reverseMul == -1) {
|
||||
|
|
|
@ -61,8 +61,9 @@ class TermsSortedDocsProducer extends SortedDocsProducer {
|
|||
DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), terms) : null;
|
||||
PostingsEnum reuse = null;
|
||||
boolean first = true;
|
||||
final BytesRef upper = upperValue == null ? null : BytesRef.deepCopyOf(upperValue);
|
||||
do {
|
||||
if (upperValue != null && upperValue.compareTo(te.term()) < 0) {
|
||||
if (upper != null && upper.compareTo(te.term()) < 0) {
|
||||
break;
|
||||
}
|
||||
reuse = te.postings(reuse, PostingsEnum.NONE);
|
||||
|
|
|
@ -93,6 +93,6 @@ public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder<Terms
|
|||
} else {
|
||||
format = config.format();
|
||||
}
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, format, order(), missing());
|
||||
return new CompositeValuesSourceConfig(name, fieldType, vs, format, order(), missingBucket());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,10 +36,6 @@ public class InternalDateRange extends InternalRange<InternalDateRange.Bucket, I
|
|||
|
||||
public static class Bucket extends InternalRange.Bucket {
|
||||
|
||||
public Bucket(boolean keyed, DocValueFormat formatter) {
|
||||
super(keyed, formatter);
|
||||
}
|
||||
|
||||
public Bucket(String key, double from, double to, long docCount, List<InternalAggregation> aggregations, boolean keyed,
|
||||
DocValueFormat formatter) {
|
||||
super(key, from, to, docCount, new InternalAggregations(aggregations), keyed, formatter);
|
||||
|
|
|
@ -35,10 +35,6 @@ public class InternalGeoDistance extends InternalRange<InternalGeoDistance.Bucke
|
|||
|
||||
static class Bucket extends InternalRange.Bucket {
|
||||
|
||||
Bucket(boolean keyed) {
|
||||
super(keyed, DocValueFormat.RAW);
|
||||
}
|
||||
|
||||
Bucket(String key, double from, double to, long docCount, List<InternalAggregation> aggregations, boolean keyed) {
|
||||
this(key, from, to, docCount, new InternalAggregations(aggregations), keyed);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.bucket.range;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -44,21 +45,17 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
|
||||
protected final transient boolean keyed;
|
||||
protected final transient DocValueFormat format;
|
||||
protected double from;
|
||||
protected double to;
|
||||
private long docCount;
|
||||
InternalAggregations aggregations;
|
||||
private String key;
|
||||
|
||||
public Bucket(boolean keyed, DocValueFormat formatter) {
|
||||
this.keyed = keyed;
|
||||
this.format = formatter;
|
||||
}
|
||||
protected final double from;
|
||||
protected final double to;
|
||||
private final long docCount;
|
||||
private final InternalAggregations aggregations;
|
||||
private final String key;
|
||||
|
||||
public Bucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed,
|
||||
DocValueFormat formatter) {
|
||||
this(keyed, formatter);
|
||||
this.key = key != null ? key : generateKey(from, to, formatter);
|
||||
DocValueFormat format) {
|
||||
this.keyed = keyed;
|
||||
this.format = format;
|
||||
this.key = key != null ? key : generateKey(from, to, format);
|
||||
this.from = from;
|
||||
this.to = to;
|
||||
this.docCount = docCount;
|
||||
|
@ -162,16 +159,25 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
return builder;
|
||||
}
|
||||
|
||||
protected String generateKey(double from, double to, DocValueFormat formatter) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(Double.isInfinite(from) ? "*" : formatter.format(from));
|
||||
sb.append("-");
|
||||
sb.append(Double.isInfinite(to) ? "*" : formatter.format(to));
|
||||
return sb.toString();
|
||||
private static String generateKey(double from, double to, DocValueFormat format) {
|
||||
StringBuilder builder = new StringBuilder()
|
||||
.append(Double.isInfinite(from) ? "*" : format.format(from))
|
||||
.append("-")
|
||||
.append(Double.isInfinite(to) ? "*" : format.format(to));
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeString(key);
|
||||
} else {
|
||||
out.writeOptionalString(key);
|
||||
}
|
||||
out.writeDouble(from);
|
||||
out.writeDouble(to);
|
||||
out.writeVLong(docCount);
|
||||
aggregations.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -206,15 +212,15 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public R create(String name, List<B> ranges, DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,
|
||||
public R create(String name, List<B> ranges, DocValueFormat format, boolean keyed, List<PipelineAggregator> pipelineAggregators,
|
||||
Map<String, Object> metaData) {
|
||||
return (R) new InternalRange<B, R>(name, ranges, formatter, keyed, pipelineAggregators, metaData);
|
||||
return (R) new InternalRange<B, R>(name, ranges, format, keyed, pipelineAggregators, metaData);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed,
|
||||
DocValueFormat formatter) {
|
||||
return (B) new Bucket(key, from, to, docCount, aggregations, keyed, formatter);
|
||||
DocValueFormat format) {
|
||||
return (B) new Bucket(key, from, to, docCount, aggregations, keyed, format);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -230,9 +236,9 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
}
|
||||
}
|
||||
|
||||
private List<B> ranges;
|
||||
protected DocValueFormat format;
|
||||
protected boolean keyed;
|
||||
private final List<B> ranges;
|
||||
protected final DocValueFormat format;
|
||||
protected final boolean keyed;
|
||||
|
||||
public InternalRange(String name, List<B> ranges, DocValueFormat format, boolean keyed,
|
||||
List<PipelineAggregator> pipelineAggregators,
|
||||
|
@ -253,7 +259,9 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
int size = in.readVInt();
|
||||
List<B> ranges = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
String key = in.readOptionalString();
|
||||
String key = in.getVersion().onOrAfter(Version.V_6_4_0)
|
||||
? in.readString()
|
||||
: in.readOptionalString();
|
||||
ranges.add(getFactory().createBucket(key, in.readDouble(), in.readDouble(), in.readVLong(),
|
||||
InternalAggregations.readAggregations(in), keyed, format));
|
||||
}
|
||||
|
@ -266,11 +274,7 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
out.writeBoolean(keyed);
|
||||
out.writeVInt(ranges.size());
|
||||
for (B bucket : ranges) {
|
||||
out.writeOptionalString(((Bucket) bucket).key);
|
||||
out.writeDouble(bucket.from);
|
||||
out.writeDouble(bucket.to);
|
||||
out.writeVLong(((Bucket) bucket).docCount);
|
||||
bucket.aggregations.writeTo(out);
|
||||
bucket.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
|
@ -27,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
|
|||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -36,6 +35,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
|
@ -97,6 +97,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
Setting.affixKeySetting("search.remote.", "skip_unavailable",
|
||||
key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS);
|
||||
|
||||
private static final Predicate<DiscoveryNode> DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion())
|
||||
&& (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode());
|
||||
|
||||
private final TransportService transportService;
|
||||
private final int numRemoteConnections;
|
||||
private volatile Map<String, RemoteClusterConnection> remoteClusters = Collections.emptyMap();
|
||||
|
@ -121,13 +124,6 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
connectionListener.onResponse(null);
|
||||
} else {
|
||||
CountDown countDown = new CountDown(seeds.size());
|
||||
Predicate<DiscoveryNode> nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion());
|
||||
if (REMOTE_NODE_ATTRIBUTE.exists(settings)) {
|
||||
// nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for
|
||||
// cross cluster search
|
||||
String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
|
||||
nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false")));
|
||||
}
|
||||
remoteClusters.putAll(this.remoteClusters);
|
||||
for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
|
||||
RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey());
|
||||
|
@ -143,7 +139,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
|
||||
if (remote == null) { // this is a new cluster we have to add a new representation
|
||||
remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections,
|
||||
nodePredicate);
|
||||
getNodePredicate(settings));
|
||||
remoteClusters.put(entry.getKey(), remote);
|
||||
}
|
||||
|
||||
|
@ -168,6 +164,15 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
this.remoteClusters = Collections.unmodifiableMap(remoteClusters);
|
||||
}
|
||||
|
||||
static Predicate<DiscoveryNode> getNodePredicate(Settings settings) {
|
||||
if (REMOTE_NODE_ATTRIBUTE.exists(settings)) {
|
||||
// nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for cross cluster search
|
||||
String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
|
||||
return DEFAULT_NODE_PREDICATE.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false")));
|
||||
}
|
||||
return DEFAULT_NODE_PREDICATE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if at least one remote cluster is configured
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.cluster.repositories.verify;
|
||||
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
public class VerifyRepositoryResponseTests extends AbstractXContentTestCase<VerifyRepositoryResponse> {
|
||||
|
||||
@Override
|
||||
protected VerifyRepositoryResponse doParseInstance(XContentParser parser) {
|
||||
return VerifyRepositoryResponse.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected VerifyRepositoryResponse createTestInstance() {
|
||||
VerifyRepositoryResponse response = new VerifyRepositoryResponse();
|
||||
List<VerifyRepositoryResponse.NodeView> nodes = new ArrayList<>();
|
||||
nodes.add(new VerifyRepositoryResponse.NodeView("node-id", "node-name"));
|
||||
response.setNodes(nodes);
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -19,18 +19,19 @@
|
|||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.AliasMetaData.Builder;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class AliasMetaDataTests extends ESTestCase {
|
||||
public class AliasMetaDataTests extends AbstractXContentTestCase<AliasMetaData> {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
final AliasMetaData before =
|
||||
|
@ -52,4 +53,49 @@ public class AliasMetaDataTests extends ESTestCase {
|
|||
|
||||
assertThat(after, equalTo(before));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AliasMetaData createTestInstance() {
|
||||
return createTestItem();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Predicate<String> getRandomFieldsExcludeFilter() {
|
||||
return p -> p.equals("") // do not add elements at the top-level as any element at this level is parsed as a new alias
|
||||
|| p.contains(".filter"); // do not insert random data into AliasMetaData#filter
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AliasMetaData doParseInstance(XContentParser parser) throws IOException {
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
parser.nextToken();
|
||||
}
|
||||
assertEquals(XContentParser.Token.FIELD_NAME, parser.currentToken());
|
||||
AliasMetaData aliasMetaData = AliasMetaData.Builder.fromXContent(parser);
|
||||
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
|
||||
return aliasMetaData;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean supportsUnknownFields() {
|
||||
return true;
|
||||
}
|
||||
|
||||
private static AliasMetaData createTestItem() {
|
||||
Builder builder = AliasMetaData.builder(randomAlphaOfLengthBetween(3, 10));
|
||||
if (randomBoolean()) {
|
||||
builder.routing(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.searchRouting(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.indexRouting(randomAlphaOfLengthBetween(3, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
builder.filter("{\"term\":{\"year\":2016}}");
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -607,7 +607,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.field("index_options", "offsets")
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
|
@ -623,7 +623,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.field("index_options", "positions")
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
|
@ -640,7 +640,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.field("term_vector", "with_positions_offsets")
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
|
@ -657,7 +657,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.field("term_vector", "with_positions")
|
||||
.endObject().endObject().endObject().endObject());
|
||||
|
||||
|
@ -682,7 +682,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 1)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
|
@ -716,7 +716,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
CompressedXContent json = new CompressedXContent(mapping);
|
||||
|
@ -741,7 +741,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 1)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
|
@ -760,7 +760,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 1)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
|
@ -783,7 +783,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 11)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
|
@ -800,7 +800,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 0)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
|
@ -817,7 +817,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.startObject("index_prefix")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 1)
|
||||
.field("max_chars", 25)
|
||||
.endObject()
|
||||
|
@ -834,13 +834,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "english")
|
||||
.field("index_prefix", (String) null)
|
||||
.field("index_prefixes", (String) null)
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(badConfigMapping))
|
||||
);
|
||||
assertThat(e.getMessage(), containsString("[index_prefix] must not have a [null] value"));
|
||||
assertThat(e.getMessage(), containsString("[index_prefixes] must not have a [null] value"));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -848,13 +848,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("index", "false")
|
||||
.startObject("index_prefix").endObject()
|
||||
.startObject("index_prefixes").endObject()
|
||||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(badConfigMapping))
|
||||
);
|
||||
assertThat(e.getMessage(), containsString("Cannot set index_prefix on unindexed field [field]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot set index_prefixes on unindexed field [field]"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,6 +71,19 @@ public class TextFieldTypeTests extends FieldTypeTestCase {
|
|||
tft.setFielddataMinSegmentSize(1000);
|
||||
}
|
||||
});
|
||||
addModifier(new Modifier("index_prefixes", true) {
|
||||
@Override
|
||||
public void modify(MappedFieldType ft) {
|
||||
TextFieldMapper.TextFieldType tft = (TextFieldMapper.TextFieldType)ft;
|
||||
TextFieldMapper.PrefixFieldType pft = tft.getPrefixFieldType();
|
||||
if (pft == null) {
|
||||
tft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft.name(), 3, 3));
|
||||
}
|
||||
else {
|
||||
tft.setPrefixFieldType(null);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testTermQuery() {
|
||||
|
|
|
@ -254,6 +254,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
result.totalShards(), result.failed(), result.failureReason(), detail);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
|
@ -296,6 +297,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
|
|
|
@ -65,8 +65,7 @@ public class PersistentTasksExecutorFullRestartIT extends ESIntegTestCase {
|
|||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
futures.add(future);
|
||||
taskIds[i] = UUIDs.base64UUID();
|
||||
service.startPersistentTask(taskIds[i], TestPersistentTasksExecutor.NAME, randomBoolean() ? null : new TestParams("Blah"),
|
||||
future);
|
||||
service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, randomBoolean() ? null : new TestParams("Blah"), future);
|
||||
}
|
||||
|
||||
for (int i = 0; i < numberOfTasks; i++) {
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.elasticsearch.tasks.TaskId;
|
|||
import org.elasticsearch.tasks.TaskInfo;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask;
|
||||
import org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskStatusListener;
|
||||
import org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener;
|
||||
import org.elasticsearch.persistent.TestPersistentTasksPlugin.Status;
|
||||
import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor;
|
||||
import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams;
|
||||
|
@ -69,15 +69,15 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
assertNoRunningTasks();
|
||||
}
|
||||
|
||||
public static class WaitForPersistentTaskStatusFuture<Params extends PersistentTaskParams>
|
||||
public static class WaitForPersistentTaskFuture<Params extends PersistentTaskParams>
|
||||
extends PlainActionFuture<PersistentTask<Params>>
|
||||
implements WaitForPersistentTaskStatusListener<Params> {
|
||||
implements WaitForPersistentTaskListener<Params> {
|
||||
}
|
||||
|
||||
public void testPersistentActionFailure() throws Exception {
|
||||
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
|
||||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
long allocationId = future.get().getAllocationId();
|
||||
assertBusy(() -> {
|
||||
// Wait for the task to start
|
||||
|
@ -108,7 +108,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
|
||||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
String taskId = UUIDs.base64UUID();
|
||||
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
long allocationId = future.get().getAllocationId();
|
||||
assertBusy(() -> {
|
||||
// Wait for the task to start
|
||||
|
@ -127,7 +127,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
logger.info("Simulating errant completion notification");
|
||||
//try sending completion request with incorrect allocation id
|
||||
PlainActionFuture<PersistentTask<?>> failedCompletionNotificationFuture = new PlainActionFuture<>();
|
||||
persistentTasksService.sendCompletionNotification(taskId, Long.MAX_VALUE, null, failedCompletionNotificationFuture);
|
||||
persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, failedCompletionNotificationFuture);
|
||||
assertThrows(failedCompletionNotificationFuture, ResourceNotFoundException.class);
|
||||
// Make sure that the task is still running
|
||||
assertThat(client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]")
|
||||
|
@ -142,7 +142,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
TestParams testParams = new TestParams("Blah");
|
||||
testParams.setExecutorNodeAttr("test");
|
||||
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future);
|
||||
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, future);
|
||||
String taskId = future.get().getId();
|
||||
|
||||
Settings nodeSettings = Settings.builder().put(nodeSettings(0)).put("node.attr.test_attr", "test").build();
|
||||
|
@ -169,14 +169,14 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
|
||||
// Remove the persistent task
|
||||
PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>();
|
||||
persistentTasksService.cancelPersistentTask(taskId, removeFuture);
|
||||
persistentTasksService.sendRemoveRequest(taskId, removeFuture);
|
||||
assertEquals(removeFuture.get().getId(), taskId);
|
||||
}
|
||||
|
||||
public void testPersistentActionStatusUpdate() throws Exception {
|
||||
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
|
||||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
persistentTasksService.startPersistentTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
String taskId = future.get().getId();
|
||||
|
||||
assertBusy(() -> {
|
||||
|
@ -200,16 +200,16 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
.get().getTasks().size(), equalTo(1));
|
||||
|
||||
int finalI = i;
|
||||
WaitForPersistentTaskStatusFuture<?> future1 = new WaitForPersistentTaskStatusFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskStatus(taskId,
|
||||
WaitForPersistentTaskFuture<?> future1 = new WaitForPersistentTaskFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskCondition(taskId,
|
||||
task -> task != null && task.getStatus() != null && task.getStatus().toString() != null &&
|
||||
task.getStatus().toString().equals("{\"phase\":\"phase " + (finalI + 1) + "\"}"),
|
||||
TimeValue.timeValueSeconds(10), future1);
|
||||
assertThat(future1.get().getId(), equalTo(taskId));
|
||||
}
|
||||
|
||||
WaitForPersistentTaskStatusFuture<?> future1 = new WaitForPersistentTaskStatusFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskStatus(taskId,
|
||||
WaitForPersistentTaskFuture<?> future1 = new WaitForPersistentTaskFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskCondition(taskId,
|
||||
task -> false, TimeValue.timeValueMillis(10), future1);
|
||||
|
||||
assertThrows(future1, IllegalStateException.class, "timed out after 10ms");
|
||||
|
@ -220,8 +220,8 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
" and allocation id -2 doesn't exist");
|
||||
|
||||
// Wait for the task to disappear
|
||||
WaitForPersistentTaskStatusFuture<?> future2 = new WaitForPersistentTaskStatusFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskStatus(taskId, Objects::isNull, TimeValue.timeValueSeconds(10), future2);
|
||||
WaitForPersistentTaskFuture<?> future2 = new WaitForPersistentTaskFuture<>();
|
||||
persistentTasksService.waitForPersistentTaskCondition(taskId, Objects::isNull, TimeValue.timeValueSeconds(10), future2);
|
||||
|
||||
logger.info("Completing the running task");
|
||||
// Complete the running task and make sure it finishes properly
|
||||
|
@ -235,11 +235,11 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
|
|||
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
|
||||
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
|
||||
String taskId = UUIDs.base64UUID();
|
||||
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
|
||||
future.get();
|
||||
|
||||
PlainActionFuture<PersistentTask<TestParams>> future2 = new PlainActionFuture<>();
|
||||
persistentTasksService.startPersistentTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2);
|
||||
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2);
|
||||
assertThrows(future2, ResourceAlreadyExistsException.class);
|
||||
|
||||
assertBusy(() -> {
|
||||
|
|
|
@ -235,14 +235,14 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
|
|||
AtomicReference<ActionListener<CancelTasksResponse>> capturedListener = new AtomicReference<>();
|
||||
PersistentTasksService persistentTasksService = new PersistentTasksService(Settings.EMPTY, null, null, null) {
|
||||
@Override
|
||||
public void sendTaskManagerCancellation(long taskId, ActionListener<CancelTasksResponse> listener) {
|
||||
void sendCancelRequest(final long taskId, final String reason, final ActionListener<CancelTasksResponse> listener) {
|
||||
capturedTaskId.set(taskId);
|
||||
capturedListener.set(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendCompletionNotification(String taskId, long allocationId, Exception failure,
|
||||
ActionListener<PersistentTask<?>> listener) {
|
||||
public void sendCompletionRequest(final String taskId, final long taskAllocationId,
|
||||
final Exception taskFailure, final ActionListener<PersistentTask<?>> listener) {
|
||||
fail("Shouldn't be called during Cluster State cancellation");
|
||||
}
|
||||
};
|
||||
|
|
|
@ -71,7 +71,7 @@ public class EnableAssignmentDeciderIT extends ESIntegTestCase {
|
|||
final CountDownLatch latch = new CountDownLatch(numberOfTasks);
|
||||
for (int i = 0; i < numberOfTasks; i++) {
|
||||
PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class);
|
||||
service.startPersistentTask("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(),
|
||||
service.sendStartRequest("task_" + i, TestPersistentTasksExecutor.NAME, randomTaskParams(),
|
||||
new ActionListener<PersistentTask<PersistentTaskParams>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTask<PersistentTaskParams> task) {
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.aggregations.bucket.composite;
|
||||
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class BitArrayTests extends ESTestCase {
|
||||
public void testRandom() {
|
||||
try (BitArray bitArray = new BitArray(BigArrays.NON_RECYCLING_INSTANCE, 1)) {
|
||||
int numBits = randomIntBetween(1000, 10000);
|
||||
for (int step = 0; step < 3; step++) {
|
||||
boolean[] bits = new boolean[numBits];
|
||||
List<Integer> slots = new ArrayList<>();
|
||||
for (int i = 0; i < numBits; i++) {
|
||||
bits[i] = randomBoolean();
|
||||
slots.add(i);
|
||||
}
|
||||
Collections.shuffle(slots, random());
|
||||
for (int i : slots) {
|
||||
if (bits[i]) {
|
||||
bitArray.set(i);
|
||||
} else {
|
||||
bitArray.clear(i);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < numBits; i++) {
|
||||
assertEquals(bitArray.get(i), bits[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -44,6 +44,9 @@ public class CompositeAggregationBuilderTests extends BaseAggregationTestCase<Co
|
|||
if (randomBoolean()) {
|
||||
histo.timeZone(randomDateTimeZone());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
histo.missingBucket(true);
|
||||
}
|
||||
return histo;
|
||||
}
|
||||
|
||||
|
@ -55,6 +58,9 @@ public class CompositeAggregationBuilderTests extends BaseAggregationTestCase<Co
|
|||
terms.script(new Script(randomAlphaOfLengthBetween(10, 20)));
|
||||
}
|
||||
terms.order(randomFrom(SortOrder.values()));
|
||||
if (randomBoolean()) {
|
||||
terms.missingBucket(true);
|
||||
}
|
||||
return terms;
|
||||
}
|
||||
|
||||
|
@ -65,6 +71,9 @@ public class CompositeAggregationBuilderTests extends BaseAggregationTestCase<Co
|
|||
} else {
|
||||
histo.script(new Script(randomAlphaOfLengthBetween(10, 20)));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
histo.missingBucket(true);
|
||||
}
|
||||
histo.interval(randomDoubleBetween(Math.nextUp(0), Double.MAX_VALUE, false));
|
||||
return histo;
|
||||
}
|
||||
|
|
|
@ -136,9 +136,9 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
|
|||
IndexSearcher searcher = new IndexSearcher(new MultiReader());
|
||||
QueryShardException exc =
|
||||
expectThrows(QueryShardException.class, () -> createAggregatorFactory(builder, searcher));
|
||||
assertThat(exc.getMessage(), containsString("failed to find field [unknown] and [missing] is not provided"));
|
||||
// should work when missing is provided
|
||||
terms.missing("missing");
|
||||
assertThat(exc.getMessage(), containsString("failed to find field [unknown] and [missing_bucket] is not set"));
|
||||
// should work when missing_bucket is set
|
||||
terms.missingBucket(true);
|
||||
createAggregatorFactory(builder, searcher);
|
||||
}
|
||||
|
||||
|
@ -187,6 +187,97 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testWithKeywordAndMissingBucket() throws Exception {
|
||||
final List<Map<String, List<Object>>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
Arrays.asList(
|
||||
createDocument("keyword", "a"),
|
||||
createDocument("long", 0L),
|
||||
createDocument("keyword", "c"),
|
||||
createDocument("keyword", "a"),
|
||||
createDocument("keyword", "d"),
|
||||
createDocument("keyword", "c"),
|
||||
createDocument("long", 5L)
|
||||
)
|
||||
);
|
||||
|
||||
// sort ascending, null bucket is first
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset,
|
||||
() -> {
|
||||
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword")
|
||||
.field("keyword")
|
||||
.missingBucket(true);
|
||||
return new CompositeAggregationBuilder("name", Collections.singletonList(terms));
|
||||
}, (result) -> {
|
||||
assertEquals(4, result.getBuckets().size());
|
||||
assertEquals("{keyword=d}", result.afterKey().toString());
|
||||
assertEquals("{keyword=null}", result.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(0).getDocCount());
|
||||
assertEquals("{keyword=a}", result.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(1).getDocCount());
|
||||
assertEquals("{keyword=c}", result.getBuckets().get(2).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(2).getDocCount());
|
||||
assertEquals("{keyword=d}", result.getBuckets().get(3).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(3).getDocCount());
|
||||
}
|
||||
);
|
||||
|
||||
// sort descending, null bucket is last
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset,
|
||||
() -> {
|
||||
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword")
|
||||
.field("keyword")
|
||||
.missingBucket(true)
|
||||
.order(SortOrder.DESC);
|
||||
return new CompositeAggregationBuilder("name", Collections.singletonList(terms));
|
||||
}, (result) -> {
|
||||
assertEquals(4, result.getBuckets().size());
|
||||
assertEquals("{keyword=null}", result.afterKey().toString());
|
||||
assertEquals("{keyword=null}", result.getBuckets().get(3).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(3).getDocCount());
|
||||
assertEquals("{keyword=a}", result.getBuckets().get(2).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(2).getDocCount());
|
||||
assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(1).getDocCount());
|
||||
assertEquals("{keyword=d}", result.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(0).getDocCount());
|
||||
}
|
||||
);
|
||||
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset,
|
||||
() -> {
|
||||
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword")
|
||||
.field("keyword")
|
||||
.missingBucket(true);
|
||||
return new CompositeAggregationBuilder("name", Collections.singletonList(terms))
|
||||
.aggregateAfter(Collections.singletonMap("keyword", null));
|
||||
}, (result) -> {
|
||||
assertEquals(3, result.getBuckets().size());
|
||||
assertEquals("{keyword=d}", result.afterKey().toString());
|
||||
assertEquals("{keyword=a}", result.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(0).getDocCount());
|
||||
assertEquals("{keyword=c}", result.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(1).getDocCount());
|
||||
assertEquals("{keyword=d}", result.getBuckets().get(2).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(2).getDocCount());
|
||||
}
|
||||
);
|
||||
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset,
|
||||
() -> {
|
||||
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder("keyword")
|
||||
.field("keyword")
|
||||
.missingBucket(true)
|
||||
.order(SortOrder.DESC);
|
||||
return new CompositeAggregationBuilder("name", Collections.singletonList(terms))
|
||||
.aggregateAfter(Collections.singletonMap("keyword", null));
|
||||
}, (result) -> {
|
||||
assertEquals(0, result.getBuckets().size());
|
||||
assertNull(result.afterKey());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testWithKeywordMissingAfter() throws Exception {
|
||||
final List<Map<String, List<Object>>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
|
@ -518,6 +609,67 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testWithKeywordLongAndMissingBucket() throws Exception {
|
||||
final List<Map<String, List<Object>>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
Arrays.asList(
|
||||
createDocument("keyword", "a", "long", 100L),
|
||||
createDocument("double", 0d),
|
||||
createDocument("keyword", "c", "long", 100L),
|
||||
createDocument("keyword", "a", "long", 0L),
|
||||
createDocument("keyword", "d", "long", 10L),
|
||||
createDocument("keyword", "c"),
|
||||
createDocument("keyword", "c", "long", 100L),
|
||||
createDocument("long", 100L),
|
||||
createDocument("double", 0d)
|
||||
)
|
||||
);
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery()), dataset,
|
||||
() -> new CompositeAggregationBuilder("name",
|
||||
Arrays.asList(
|
||||
new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true),
|
||||
new TermsValuesSourceBuilder("long").field("long").missingBucket(true)
|
||||
)
|
||||
),
|
||||
(result) -> {
|
||||
assertEquals(7, result.getBuckets().size());
|
||||
assertEquals("{keyword=d, long=10}", result.afterKey().toString());
|
||||
assertEquals("{keyword=null, long=null}", result.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(0).getDocCount());
|
||||
assertEquals("{keyword=null, long=100}", result.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(1).getDocCount());
|
||||
assertEquals("{keyword=a, long=0}", result.getBuckets().get(2).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(2).getDocCount());
|
||||
assertEquals("{keyword=a, long=100}", result.getBuckets().get(3).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(3).getDocCount());
|
||||
assertEquals("{keyword=c, long=null}", result.getBuckets().get(4).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(4).getDocCount());
|
||||
assertEquals("{keyword=c, long=100}", result.getBuckets().get(5).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(5).getDocCount());
|
||||
assertEquals("{keyword=d, long=10}", result.getBuckets().get(6).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(6).getDocCount());
|
||||
}
|
||||
);
|
||||
|
||||
testSearchCase(Arrays.asList(new MatchAllDocsQuery(), new DocValuesFieldExistsQuery("keyword")), dataset,
|
||||
() -> new CompositeAggregationBuilder("name",
|
||||
Arrays.asList(
|
||||
new TermsValuesSourceBuilder("keyword").field("keyword").missingBucket(true),
|
||||
new TermsValuesSourceBuilder("long").field("long").missingBucket(true)
|
||||
)
|
||||
).aggregateAfter(createAfterKey("keyword", "c", "long", null)
|
||||
),
|
||||
(result) -> {
|
||||
assertEquals(2, result.getBuckets().size());
|
||||
assertEquals("{keyword=d, long=10}", result.afterKey().toString());
|
||||
assertEquals("{keyword=c, long=100}", result.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(2L, result.getBuckets().get(0).getDocCount());
|
||||
assertEquals("{keyword=d, long=10}", result.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(1L, result.getBuckets().get(1).getDocCount());
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
public void testMultiValuedWithKeywordAndLong() throws Exception {
|
||||
final List<Map<String, List<Object>>> dataset = new ArrayList<>();
|
||||
dataset.addAll(
|
||||
|
|
|
@ -129,21 +129,24 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
assert(false);
|
||||
}
|
||||
}
|
||||
testRandomCase(true, types);
|
||||
testRandomCase(types);
|
||||
}
|
||||
|
||||
private void testRandomCase(ClassAndName... types) throws IOException {
|
||||
testRandomCase(true, types);
|
||||
testRandomCase(false, types);
|
||||
testRandomCase(true, true, types);
|
||||
testRandomCase(true, false, types);
|
||||
testRandomCase(false, true, types);
|
||||
testRandomCase(false, false, types);
|
||||
}
|
||||
|
||||
private void testRandomCase(boolean forceMerge, ClassAndName... types) throws IOException {
|
||||
private void testRandomCase(boolean forceMerge, boolean missingBucket, ClassAndName... types) throws IOException {
|
||||
final BigArrays bigArrays = BigArrays.NON_RECYCLING_INSTANCE;
|
||||
int numDocs = randomIntBetween(50, 100);
|
||||
List<Comparable<?>[]> possibleValues = new ArrayList<>();
|
||||
for (ClassAndName type : types) {
|
||||
int numValues = randomIntBetween(1, numDocs*2);
|
||||
Comparable<?>[] values = new Comparable[numValues];
|
||||
final Comparable<?>[] values;
|
||||
int numValues = randomIntBetween(1, numDocs * 2);
|
||||
values = new Comparable[numValues];
|
||||
if (type.clazz == Long.class) {
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
values[i] = randomLong();
|
||||
|
@ -157,7 +160,7 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
values[i] = new BytesRef(randomAlphaOfLengthBetween(5, 50));
|
||||
}
|
||||
} else {
|
||||
assert(false);
|
||||
assert (false);
|
||||
}
|
||||
possibleValues.add(values);
|
||||
}
|
||||
|
@ -171,30 +174,34 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
boolean hasAllField = true;
|
||||
for (int j = 0; j < types.length; j++) {
|
||||
int numValues = randomIntBetween(0, 5);
|
||||
List<Comparable<?>> values = new ArrayList<>();
|
||||
if (numValues == 0) {
|
||||
hasAllField = false;
|
||||
}
|
||||
List<Comparable<?>> values = new ArrayList<>();
|
||||
for (int k = 0; k < numValues; k++) {
|
||||
values.add(possibleValues.get(j)[randomIntBetween(0, possibleValues.get(j).length-1)]);
|
||||
if (types[j].clazz == Long.class) {
|
||||
long value = (Long) values.get(k);
|
||||
document.add(new SortedNumericDocValuesField(types[j].fieldType.name(), value));
|
||||
document.add(new LongPoint(types[j].fieldType.name(), value));
|
||||
} else if (types[j].clazz == Double.class) {
|
||||
document.add(new SortedNumericDocValuesField(types[j].fieldType.name(),
|
||||
NumericUtils.doubleToSortableLong((Double) values.get(k))));
|
||||
} else if (types[j].clazz == BytesRef.class) {
|
||||
BytesRef value = (BytesRef) values.get(k);
|
||||
document.add(new SortedSetDocValuesField(types[j].fieldType.name(), (BytesRef) values.get(k)));
|
||||
document.add(new TextField(types[j].fieldType.name(), value.utf8ToString(), Field.Store.NO));
|
||||
} else {
|
||||
assert(false);
|
||||
if (missingBucket) {
|
||||
values.add(null);
|
||||
}
|
||||
} else {
|
||||
for (int k = 0; k < numValues; k++) {
|
||||
values.add(possibleValues.get(j)[randomIntBetween(0, possibleValues.get(j).length - 1)]);
|
||||
if (types[j].clazz == Long.class) {
|
||||
long value = (Long) values.get(k);
|
||||
document.add(new SortedNumericDocValuesField(types[j].fieldType.name(), value));
|
||||
document.add(new LongPoint(types[j].fieldType.name(), value));
|
||||
} else if (types[j].clazz == Double.class) {
|
||||
document.add(new SortedNumericDocValuesField(types[j].fieldType.name(),
|
||||
NumericUtils.doubleToSortableLong((Double) values.get(k))));
|
||||
} else if (types[j].clazz == BytesRef.class) {
|
||||
BytesRef value = (BytesRef) values.get(k);
|
||||
document.add(new SortedSetDocValuesField(types[j].fieldType.name(), (BytesRef) values.get(k)));
|
||||
document.add(new TextField(types[j].fieldType.name(), value.utf8ToString(), Field.Store.NO));
|
||||
} else {
|
||||
assert (false);
|
||||
}
|
||||
}
|
||||
}
|
||||
docValues.add(values);
|
||||
}
|
||||
if (hasAllField) {
|
||||
if (hasAllField || missingBucket) {
|
||||
List<CompositeKey> comb = createListCombinations(docValues);
|
||||
keys.addAll(comb);
|
||||
}
|
||||
|
@ -210,29 +217,49 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
for (int i = 0; i < types.length; i++) {
|
||||
final MappedFieldType fieldType = types[i].fieldType;
|
||||
if (types[i].clazz == Long.class) {
|
||||
sources[i] = new LongValuesSource(bigArrays, fieldType,
|
||||
context -> DocValues.getSortedNumeric(context.reader(), fieldType.name()), value -> value,
|
||||
DocValueFormat.RAW, null, size, 1);
|
||||
sources[i] = new LongValuesSource(
|
||||
bigArrays,
|
||||
fieldType,
|
||||
context -> DocValues.getSortedNumeric(context.reader(), fieldType.name()),
|
||||
value -> value,
|
||||
DocValueFormat.RAW,
|
||||
missingBucket,
|
||||
size,
|
||||
1
|
||||
);
|
||||
} else if (types[i].clazz == Double.class) {
|
||||
sources[i] = new DoubleValuesSource(
|
||||
bigArrays, fieldType,
|
||||
bigArrays,
|
||||
fieldType,
|
||||
context -> FieldData.sortableLongBitsToDoubles(DocValues.getSortedNumeric(context.reader(), fieldType.name())),
|
||||
DocValueFormat.RAW, null, size, 1
|
||||
DocValueFormat.RAW,
|
||||
missingBucket,
|
||||
size,
|
||||
1
|
||||
);
|
||||
} else if (types[i].clazz == BytesRef.class) {
|
||||
if (forceMerge) {
|
||||
// we don't create global ordinals but we test this mode when the reader has a single segment
|
||||
// since ordinals are global in this case.
|
||||
sources[i] = new GlobalOrdinalValuesSource(
|
||||
bigArrays, fieldType,
|
||||
bigArrays,
|
||||
fieldType,
|
||||
context -> DocValues.getSortedSet(context.reader(), fieldType.name()),
|
||||
DocValueFormat.RAW, null, size, 1
|
||||
DocValueFormat.RAW,
|
||||
missingBucket,
|
||||
size,
|
||||
1
|
||||
);
|
||||
} else {
|
||||
sources[i] = new BinaryValuesSource(
|
||||
bigArrays,
|
||||
(b) -> {},
|
||||
fieldType,
|
||||
context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())),
|
||||
DocValueFormat.RAW, null, size, 1
|
||||
DocValueFormat.RAW,
|
||||
missingBucket,
|
||||
size,
|
||||
1
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -241,20 +268,13 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
}
|
||||
CompositeKey[] expected = keys.toArray(new CompositeKey[0]);
|
||||
Arrays.sort(expected, (a, b) -> compareKey(a, b));
|
||||
CompositeValuesCollectorQueue queue = new CompositeValuesCollectorQueue(sources, size);
|
||||
final SortedDocsProducer docsProducer = sources[0].createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery());
|
||||
for (boolean withProducer : new boolean[] {true, false}) {
|
||||
if (withProducer && docsProducer == null) {
|
||||
continue;
|
||||
}
|
||||
int pos = 0;
|
||||
CompositeKey last = null;
|
||||
while (pos < size) {
|
||||
queue.clear();
|
||||
if (last != null) {
|
||||
queue.setAfter(last.values());
|
||||
}
|
||||
|
||||
final CompositeValuesCollectorQueue queue =
|
||||
new CompositeValuesCollectorQueue(BigArrays.NON_RECYCLING_INSTANCE, sources, size, last);
|
||||
final SortedDocsProducer docsProducer = sources[0].createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery());
|
||||
for (LeafReaderContext leafReaderContext : reader.leaves()) {
|
||||
final LeafBucketCollector leafCollector = new LeafBucketCollector() {
|
||||
@Override
|
||||
|
@ -262,7 +282,7 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
queue.addIfCompetitive();
|
||||
}
|
||||
};
|
||||
if (withProducer) {
|
||||
if (docsProducer != null && withProducer) {
|
||||
assertEquals(DocIdSet.EMPTY,
|
||||
docsProducer.processLeaf(new MatchAllDocsQuery(), queue, leafReaderContext, false));
|
||||
} else {
|
||||
|
@ -310,6 +330,14 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
|
|||
private static int compareKey(CompositeKey key1, CompositeKey key2) {
|
||||
assert key1.size() == key2.size();
|
||||
for (int i = 0; i < key1.size(); i++) {
|
||||
if (key1.get(i) == null) {
|
||||
if (key2.get(i) == null) {
|
||||
continue;
|
||||
}
|
||||
return -1;
|
||||
} else if (key2.get(i) == null) {
|
||||
return 1;
|
||||
}
|
||||
Comparable<Object> cmp1 = (Comparable<Object>) key1.get(i);
|
||||
int cmp = cmp1.compareTo(key2.get(i));
|
||||
if (cmp != 0) {
|
||||
|
|
|
@ -40,10 +40,12 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
MappedFieldType keyword = new KeywordFieldMapper.KeywordFieldType();
|
||||
keyword.setName("keyword");
|
||||
BinaryValuesSource source = new BinaryValuesSource(
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
(b) -> {},
|
||||
keyword,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -55,10 +57,12 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
new TermQuery(new Term("keyword", "toto)"))));
|
||||
|
||||
source = new BinaryValuesSource(
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
(b) -> {},
|
||||
keyword,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
"missing_value",
|
||||
true,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -66,10 +70,12 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
assertNull(source.createSortedDocsProducerOrNull(reader, null));
|
||||
|
||||
source = new BinaryValuesSource(
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
(b) -> {},
|
||||
keyword,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
0,
|
||||
-1
|
||||
);
|
||||
|
@ -77,7 +83,15 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
|
||||
MappedFieldType ip = new IpFieldMapper.IpFieldType();
|
||||
ip.setName("ip");
|
||||
source = new BinaryValuesSource(ip, context -> null, DocValueFormat.RAW,null, 1, 1);
|
||||
source = new BinaryValuesSource(
|
||||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
(b) -> {},
|
||||
ip,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
false,
|
||||
1,
|
||||
1);
|
||||
assertNull(source.createSortedDocsProducerOrNull(reader, null));
|
||||
}
|
||||
|
||||
|
@ -88,7 +102,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
BigArrays.NON_RECYCLING_INSTANCE,
|
||||
keyword, context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -104,7 +118,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
keyword,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
"missing_value",
|
||||
true,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -116,7 +130,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
keyword,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
-1
|
||||
);
|
||||
|
@ -129,7 +143,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
ip,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -152,7 +166,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
context -> null,
|
||||
value -> value,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
@ -169,7 +183,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
context -> null,
|
||||
value -> value,
|
||||
DocValueFormat.RAW,
|
||||
0d,
|
||||
true,
|
||||
1,
|
||||
1);
|
||||
assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()));
|
||||
|
@ -182,7 +196,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
context -> null,
|
||||
value -> value,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
-1
|
||||
);
|
||||
|
@ -195,7 +209,7 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
|
|||
number,
|
||||
context -> null,
|
||||
DocValueFormat.RAW,
|
||||
null,
|
||||
false,
|
||||
1,
|
||||
1
|
||||
);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
|
@ -30,7 +29,9 @@ import org.elasticsearch.common.settings.AbstractScopedSettings;
|
|||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -40,6 +41,7 @@ import java.net.InetAddress;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -50,6 +52,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
|
@ -279,6 +282,75 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testRemoteNodeRoles() throws IOException, InterruptedException {
|
||||
final Settings settings = Settings.EMPTY;
|
||||
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
final Settings data = Settings.builder().put("node.master", false).build();
|
||||
final Settings dedicatedMaster = Settings.builder().put("node.data", false).put("node.ingest", "false").build();
|
||||
try (MockTransportService c1N1 =
|
||||
startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
|
||||
MockTransportService c1N2 =
|
||||
startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data);
|
||||
MockTransportService c2N1 =
|
||||
startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
|
||||
MockTransportService c2N2 =
|
||||
startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, data)) {
|
||||
final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode();
|
||||
final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode();
|
||||
final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode();
|
||||
final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode();
|
||||
knownNodes.add(c1N1Node);
|
||||
knownNodes.add(c1N2Node);
|
||||
knownNodes.add(c2N1Node);
|
||||
knownNodes.add(c2N2Node);
|
||||
Collections.shuffle(knownNodes, random());
|
||||
|
||||
try (MockTransportService transportService = MockTransportService.createNewService(
|
||||
settings,
|
||||
Version.CURRENT,
|
||||
threadPool,
|
||||
null)) {
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
builder.putList("search.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
|
||||
builder.putList("search.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
|
||||
try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) {
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
|
||||
assertTrue(service.isCrossClusterSearchEnabled());
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
|
||||
assertFalse(service.isRemoteNodeConnected("cluster_1", c1N1Node));
|
||||
assertTrue(service.isRemoteNodeConnected("cluster_1", c1N2Node));
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
|
||||
assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node));
|
||||
assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ActionListener<Void> connectionListener(final CountDownLatch latch) {
|
||||
return ActionListener.wrap(x -> latch.countDown(), x -> fail());
|
||||
}
|
||||
|
@ -630,4 +702,115 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetNodePredicateNodeRoles() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(Settings.EMPTY);
|
||||
{
|
||||
DiscoveryNode all = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(all));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dataMaster = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA, DiscoveryNode.Role.MASTER)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(dataMaster));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedMaster = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER)), Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(dedicatedMaster));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedIngest = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.INGEST)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(dedicatedIngest));
|
||||
}
|
||||
{
|
||||
DiscoveryNode masterIngest = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(masterIngest));
|
||||
}
|
||||
{
|
||||
DiscoveryNode dedicatedData = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(dedicatedData));
|
||||
}
|
||||
{
|
||||
DiscoveryNode ingestData = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(ingestData));
|
||||
}
|
||||
{
|
||||
DiscoveryNode coordOnly = new DiscoveryNode("id", address, Collections.emptyMap(),
|
||||
new HashSet<>(EnumSet.noneOf(DiscoveryNode.Role.class)), Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(coordOnly));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetNodePredicateNodeVersion() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(Settings.EMPTY);
|
||||
Version version = VersionUtils.randomVersion(random());
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.emptyMap(), roles, version);
|
||||
assertThat(nodePredicate.test(node), equalTo(Version.CURRENT.isCompatible(version)));
|
||||
}
|
||||
|
||||
public void testGetNodePredicateNodeAttrs() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
|
||||
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
|
||||
{
|
||||
DiscoveryNode nonGatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
|
||||
roles, Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(nonGatewayNode));
|
||||
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(nonGatewayNode));
|
||||
}
|
||||
{
|
||||
DiscoveryNode gatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
|
||||
roles, Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(gatewayNode));
|
||||
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(gatewayNode));
|
||||
}
|
||||
{
|
||||
DiscoveryNode noAttrNode = new DiscoveryNode("id", address, Collections.emptyMap(), roles, Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(noAttrNode));
|
||||
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(noAttrNode));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetNodePredicatesCombination() {
|
||||
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
|
||||
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
|
||||
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
|
||||
Set<DiscoveryNode.Role> allRoles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
|
||||
Set<DiscoveryNode.Role> dedicatedMasterRoles = new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER));
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
|
||||
dedicatedMasterRoles, Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
}
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
|
||||
dedicatedMasterRoles, Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
}
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
|
||||
dedicatedMasterRoles, Version.CURRENT);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
}
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
|
||||
allRoles, Version.CURRENT);
|
||||
assertTrue(nodePredicate.test(node));
|
||||
}
|
||||
{
|
||||
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
|
||||
allRoles, Version.V_5_3_0);
|
||||
assertFalse(nodePredicate.test(node));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,99 @@
|
|||
[role="xpack"]
|
||||
[float]
|
||||
[[forwarding-audit-logfiles]]
|
||||
==== Forwarding audit logs to a remote cluster
|
||||
=== Forwarding audit logs to a remote cluster
|
||||
|
||||
To index audit events to a remote Elasticsearch cluster, you configure
|
||||
the following `xpack.security.audit.index.client` settings:
|
||||
When you are auditing security events, you can optionally store the logs in an
|
||||
{es} index on a remote cluster. The logs are sent to the remote cluster by
|
||||
using the {javaclient}/transport-client.html[transport client].
|
||||
|
||||
* `xpack.security.audit.index.client.hosts`
|
||||
* `xpack.security.audit.index.client.cluster.name`
|
||||
* `xpack.security.audit.index.client.xpack.security.user`
|
||||
. Configure auditing such that the logs are stored in {es} rolling indices.
|
||||
See <<audit-index>>.
|
||||
|
||||
. Establish a connection to the remote cluster by configuring the following
|
||||
`xpack.security.audit.index.client` settings:
|
||||
+
|
||||
--
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.audit.index.client.hosts: 192.168.0.1, 192.168.0.2 <1>
|
||||
xpack.security.audit.index.client.cluster.name: logging-prod <2>
|
||||
xpack.security.audit.index.client.xpack.security.user: myuser:mypassword <3>
|
||||
--------------------------------------------------
|
||||
<1> A list of hosts in the remote cluster. If you are not using the default
|
||||
value for the `transport.tcp.port` setting on the remote cluster, you must
|
||||
specify the appropriate port number (prefixed by a colon) after each host.
|
||||
<2> The remote cluster name.
|
||||
<3> A valid user and password, which must have authority to create the
|
||||
`.security-audit` index on the remote cluster.
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/auditing-settings.html#remote-audit-settings[Remote Audit Log Indexing Configuration Settings].
|
||||
{ref}/auditing-settings.html#remote-audit-settings[Remote audit log indexing configuration settings].
|
||||
|
||||
You can pass additional settings to the remote client by specifying them in the
|
||||
`xpack.security.audit.index.client` namespace. For example, to allow the remote
|
||||
client to discover all of the nodes in the remote cluster you can specify the
|
||||
`client.transport.sniff` setting:
|
||||
--
|
||||
|
||||
. If the remote cluster has Transport Layer Security (TLS/SSL) enabled, you
|
||||
must specify extra security settings:
|
||||
|
||||
.. {ref}/configuring-tls.html#node-certificates[Generate a node certificate on
|
||||
the remote cluster], then copy that certificate to the client.
|
||||
|
||||
.. Enable TLS and specify the information required to access the node certificate.
|
||||
|
||||
*** If the signed certificate is in PKCS#12 format, add the following information
|
||||
to the `elasticsearch.yml` file:
|
||||
+
|
||||
--
|
||||
[source,yaml]
|
||||
----------------------------
|
||||
xpack.security.audit.index.client.transport.sniff: true
|
||||
----------------------------
|
||||
-----------------------------------------------------------
|
||||
xpack.security.audit.index.client.xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.audit.index.client.xpack.ssl.keystore.path: certs/remote-elastic-certificates.p12
|
||||
xpack.security.audit.index.client.xpack.ssl.truststore.path: certs/remote-elastic-certificates.p12
|
||||
-----------------------------------------------------------
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/security-settings.html#auditing-tls-ssl-settings[Auditing TLS settings].
|
||||
--
|
||||
|
||||
*** If the certificate is in PEM format, add the following information to the
|
||||
`elasticsearch.yml` file:
|
||||
+
|
||||
--
|
||||
[source, yaml]
|
||||
--------------------------------------------------
|
||||
xpack.security.audit.index.client.xpack.security.transport.ssl.enabled: true
|
||||
xpack.security.audit.index.client.xpack.ssl.key: /home/es/config/audit-client.key
|
||||
xpack.security.audit.index.client.xpack.ssl.certificate: /home/es/config/audit-client.crt
|
||||
xpack.security.audit.index.client.xpack.ssl.certificate_authorities: [ "/home/es/config/remote-ca.crt" ]
|
||||
--------------------------------------------------
|
||||
|
||||
For more information about these settings, see
|
||||
{ref}/security-settings.html#auditing-tls-ssl-settings[Auditing TLS settings].
|
||||
--
|
||||
|
||||
.. If you secured the certificate with a password, add the password to
|
||||
your {es} keystore:
|
||||
|
||||
*** If the signed certificate is in PKCS#12 format, use the following commands:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
-----------------------------------------------------------
|
||||
bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.ssl.keystore.secure_password
|
||||
|
||||
bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.ssl.truststore.secure_password
|
||||
-----------------------------------------------------------
|
||||
--
|
||||
|
||||
*** If the certificate is in PEM format, use the following commands:
|
||||
+
|
||||
--
|
||||
[source,shell]
|
||||
-----------------------------------------------------------
|
||||
bin/elasticsearch-keystore add xpack.security.audit.index.client.xpack.ssl.secure_key_passphrase
|
||||
-----------------------------------------------------------
|
||||
--
|
||||
|
||||
. Restart {es}.
|
||||
|
||||
When these steps are complete, your audit logs are stored in {es} rolling
|
||||
indices on the remote cluster.
|
|
@ -155,5 +155,5 @@ GET two:logs-2017.04/_search <1>
|
|||
// TEST[skip:todo]
|
||||
//TBD: Is there a missing description of the <1> callout above?
|
||||
|
||||
:edit_url: https://github.com/elastic/kibana/edit/{branch}/x-pack/docs/en/security/cross-cluster-kibana.asciidoc
|
||||
include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[]
|
||||
:edit_url: https://github.com/elastic/kibana/edit/{branch}/docs/security/cross-cluster-kibana.asciidoc
|
||||
include::{kib-repo-dir}/security/cross-cluster-kibana.asciidoc[]
|
||||
|
|
|
@ -130,10 +130,23 @@ Specifies the name of the remote cluster.
|
|||
|
||||
`xpack.security.audit.index.client.xpack.security.user`::
|
||||
Specifies the `username:password` pair that is used to authenticate with the
|
||||
remote cluster.
|
||||
remote cluster. This user must have authority to create the `.security-audit`
|
||||
index on the remote cluster.
|
||||
|
||||
If the remote {es} cluster has Transport Layer Security (TLS/SSL) enabled, you
|
||||
must set the following setting to `true`:
|
||||
|
||||
`xpack.security.audit.index.client.xpack.security.transport.ssl.enabled`::
|
||||
Used to enable or disable TLS/SSL for the transport client that forwards audit
|
||||
logs to the remote cluster. The default is `false`.
|
||||
|
||||
You must also specify the information necessary to access certificates. See
|
||||
<<auditing-tls-ssl-settings>>.
|
||||
|
||||
You can pass additional settings to the remote client by specifying them in the
|
||||
`xpack.security.audit.index.client` namespace. For example, to allow the remote
|
||||
`xpack.security.audit.index.client` namespace. For example, you can add
|
||||
<<modules-transport,transport settings>> and
|
||||
<<tcp-settings,advanced TCP settings>> in that namespace. To allow the remote
|
||||
client to discover all of the nodes in the remote cluster you can specify the
|
||||
`client.transport.sniff` setting:
|
||||
|
||||
|
|
|
@ -1257,6 +1257,16 @@ transport profile, use the prefix `transport.profiles.$PROFILE.xpack.security.`
|
|||
append the portion of the setting after `xpack.security.transport.`. For the key
|
||||
setting, this would be `transport.profiles.$PROFILE.xpack.security.ssl.key`.
|
||||
|
||||
[[auditing-tls-ssl-settings]]
|
||||
:ssl-prefix: xpack.security.audit.index.client.xpack
|
||||
:component: Auditing
|
||||
:client-auth-default!:
|
||||
:server!:
|
||||
|
||||
include::ssl-settings.asciidoc[]
|
||||
|
||||
See also <<remote-audit-settings>>.
|
||||
|
||||
[float]
|
||||
[[ip-filtering-settings]]
|
||||
==== IP filtering settings
|
||||
|
|
|
@ -46,7 +46,6 @@ Java Cryptography Architecture documentation]. Defaults to the value of
|
|||
|
||||
The following settings are used to specify a private key, certificate, and the
|
||||
trusted certificates that should be used when communicating over an SSL/TLS connection.
|
||||
If none of the settings below are specified, the {ref}/security-settings.html#ssl-tls-settings[Default TLS/SSL Settings] are used.
|
||||
ifdef::server[]
|
||||
A private key and certificate must be configured.
|
||||
endif::server[]
|
||||
|
|
|
@ -40,7 +40,6 @@ Docker images can be retrieved with the following commands:
|
|||
["source","sh",subs="attributes"]
|
||||
--------------------------------------------
|
||||
docker pull {docker-repo}:{version}
|
||||
docker pull {docker-repo}-platinum:{version}
|
||||
docker pull {docker-repo}-oss:{version}
|
||||
--------------------------------------------
|
||||
|
||||
|
|
|
@ -154,9 +154,9 @@ killed by firewalls or load balancers inbetween.
|
|||
You can use the `reporting` attachment type in an `email` action to automatically
|
||||
generate a Kibana report and distribute it via email.
|
||||
|
||||
include::{xkb-repo-dir}/reporting/watch-example.asciidoc[]
|
||||
include::{kib-repo-dir}/reporting/watch-example.asciidoc[]
|
||||
|
||||
include::{xkb-repo-dir}/reporting/report-intervals.asciidoc[]
|
||||
include::{kib-repo-dir}/reporting/report-intervals.asciidoc[]
|
||||
|
||||
//TODO: RE-ADD LINK:
|
||||
//For more information, see
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
package org.elasticsearch.xpack.core.ml.integration;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -35,10 +37,12 @@ public class MlRestTestStateCleaner {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void deleteAllDatafeeds() throws IOException {
|
||||
Map<String, Object> clusterStateAsMap = testCase.entityAsMap(adminClient.performRequest("GET", "/_cluster/state",
|
||||
Collections.singletonMap("filter_path", "metadata.ml.datafeeds")));
|
||||
List<Map<String, Object>> datafeeds =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("metadata.ml.datafeeds", clusterStateAsMap);
|
||||
final Request datafeedsRequest = new Request("GET", "/_xpack/ml/datafeeds");
|
||||
datafeedsRequest.addParameter("filter_path", "datafeeds");
|
||||
final Response datafeedsResponse = adminClient.performRequest(datafeedsRequest);
|
||||
@SuppressWarnings("unchecked")
|
||||
final List<Map<String, Object>> datafeeds =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("datafeeds", testCase.entityAsMap(datafeedsResponse));
|
||||
if (datafeeds == null) {
|
||||
return;
|
||||
}
|
||||
|
@ -75,11 +79,12 @@ public class MlRestTestStateCleaner {
|
|||
}
|
||||
|
||||
private void deleteAllJobs() throws IOException {
|
||||
Map<String, Object> clusterStateAsMap = testCase.entityAsMap(adminClient.performRequest("GET", "/_cluster/state",
|
||||
Collections.singletonMap("filter_path", "metadata.ml.jobs")));
|
||||
final Request jobsRequest = new Request("GET", "/_xpack/ml/anomaly_detectors");
|
||||
jobsRequest.addParameter("filter_path", "jobs");
|
||||
final Response response = adminClient.performRequest(jobsRequest);
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Map<String, Object>> jobConfigs =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("metadata.ml.jobs", clusterStateAsMap);
|
||||
final List<Map<String, Object>> jobConfigs =
|
||||
(List<Map<String, Object>>) XContentMapValues.extractValue("jobs", testCase.entityAsMap(response));
|
||||
if (jobConfigs == null) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -315,7 +315,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
|||
PersistentTasksCustomMetaData.PersistentTask<?> jobTask = MlMetadata.getJobTask(jobId, tasks);
|
||||
if (jobTask != null) {
|
||||
auditor.info(jobId, Messages.JOB_AUDIT_FORCE_CLOSING);
|
||||
persistentTasksService.cancelPersistentTask(jobTask.getId(),
|
||||
persistentTasksService.sendRemoveRequest(jobTask.getId(),
|
||||
new ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> task) {
|
||||
|
@ -400,7 +400,7 @@ public class TransportCloseJobAction extends TransportTasksAction<TransportOpenJ
|
|||
// so wait for that to happen here.
|
||||
void waitForJobClosed(CloseJobAction.Request request, WaitForCloseRequest waitForCloseRequest, CloseJobAction.Response response,
|
||||
ActionListener<CloseJobAction.Response> listener) {
|
||||
persistentTasksService.waitForPersistentTasksStatus(persistentTasksCustomMetaData -> {
|
||||
persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetaData -> {
|
||||
for (String persistentTaskId : waitForCloseRequest.persistentTaskIds) {
|
||||
if (persistentTasksCustomMetaData.getTask(persistentTaskId) != null) {
|
||||
return false;
|
||||
|
|
|
@ -90,7 +90,7 @@ public class TransportDeleteDatafeedAction extends TransportMasterNodeAction<Del
|
|||
if (datafeedTask == null) {
|
||||
listener.onResponse(true);
|
||||
} else {
|
||||
persistentTasksService.cancelPersistentTask(datafeedTask.getId(),
|
||||
persistentTasksService.sendRemoveRequest(datafeedTask.getId(),
|
||||
new ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> persistentTask) {
|
||||
|
|
|
@ -182,7 +182,7 @@ public class TransportDeleteJobAction extends TransportMasterNodeAction<DeleteJo
|
|||
if (jobTask == null) {
|
||||
listener.onResponse(null);
|
||||
} else {
|
||||
persistentTasksService.cancelPersistentTask(jobTask.getId(),
|
||||
persistentTasksService.sendRemoveRequest(jobTask.getId(),
|
||||
new ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> task) {
|
||||
|
|
|
@ -465,7 +465,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
|
||||
// Step 4. Start job task
|
||||
ActionListener<PutJobAction.Response> establishedMemoryUpdateListener = ActionListener.wrap(
|
||||
response -> persistentTasksService.startPersistentTask(MlMetadata.jobTaskId(jobParams.getJobId()),
|
||||
response -> persistentTasksService.sendStartRequest(MlMetadata.jobTaskId(jobParams.getJobId()),
|
||||
OpenJobAction.TASK_NAME, jobParams, finalListener),
|
||||
listener::onFailure
|
||||
);
|
||||
|
@ -518,8 +518,8 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
|
||||
private void waitForJobStarted(String taskId, OpenJobAction.JobParams jobParams, ActionListener<OpenJobAction.Response> listener) {
|
||||
JobPredicate predicate = new JobPredicate();
|
||||
persistentTasksService.waitForPersistentTaskStatus(taskId, predicate, jobParams.getTimeout(),
|
||||
new PersistentTasksService.WaitForPersistentTaskStatusListener<OpenJobAction.JobParams>() {
|
||||
persistentTasksService.waitForPersistentTaskCondition(taskId, predicate, jobParams.getTimeout(),
|
||||
new PersistentTasksService.WaitForPersistentTaskListener<OpenJobAction.JobParams>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<OpenJobAction.JobParams> persistentTask) {
|
||||
if (predicate.exception != null) {
|
||||
|
@ -550,7 +550,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
|
||||
private void cancelJobStart(PersistentTasksCustomMetaData.PersistentTask<OpenJobAction.JobParams> persistentTask, Exception exception,
|
||||
ActionListener<OpenJobAction.Response> listener) {
|
||||
persistentTasksService.cancelPersistentTask(persistentTask.getId(),
|
||||
persistentTasksService.sendRemoveRequest(persistentTask.getId(),
|
||||
new ActionListener<PersistentTasksCustomMetaData.PersistentTask<?>>() {
|
||||
@Override
|
||||
public void onResponse(PersistentTasksCustomMetaData.PersistentTask<?> task) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue