Merge branch 'master' into ccr

* master:
  Add Verify Repository High Level REST API (#30934)
  [CI] Mute SamlAuthenticatorTests testIncorrectSigningKeyIsRejected
  [DOCS] Fixes kibana security file location
  SQL: Remove log4j and joda from JDBC dependencies (#30938)
  Revert accidentally pushed changes in NoriAnalysisTests
  Fix composite agg serialization error
  Change ScriptException status to 400 (bad request) (#30861)
  Fix synced flush docs
  REST high-level client: add synced flush API (2) (#30650)
  Fix missing option serialization after backport
  Cross Cluster Search: do not use dedicated masters as gateways (#30926)
  Fix AliasMetaData parsing (#30866)
  Fsync state file before exposing it (#30929)
This commit is contained in:
Nhat Nguyen 2018-05-30 12:11:52 -04:00
commit 5e81a20171
64 changed files with 1551 additions and 951 deletions

View File

@ -34,6 +34,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
@ -269,6 +270,28 @@ public final class IndicesClient {
listener, emptySet(), headers);
}
/** Initiate a synced flush manually using the synced flush API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html">
* Synced flush API on elastic.co</a>
*/
public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, Header... headers) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced,
SyncedFlushResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously initiate a synced flush manually using the synced flush API
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-synced-flush.html">
* Synced flush API on elastic.co</a>
*/
public void flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, ActionListener<SyncedFlushResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, RequestConverters::flushSynced,
SyncedFlushResponse::fromXContent, listener, emptySet(), headers);
}
/**
* Retrieve the settings of one or more indices
* <p>

View File

@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
@ -41,6 +42,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
@ -211,6 +213,14 @@ final class RequestConverters {
return request;
}
static Request flushSynced(SyncedFlushRequest syncedFlushRequest) {
String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_flush/synced"));
Params parameters = new Params(request);
parameters.withIndicesOptions(syncedFlushRequest.indicesOptions());
return request;
}
static Request forceMerge(ForceMergeRequest forceMergeRequest) {
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
Request request = new Request(HttpPost.METHOD_NAME, endpoint(indices, "_forcemerge"));
@ -738,6 +748,19 @@ final class RequestConverters {
return request;
}
static Request verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest) {
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot")
.addPathPart(verifyRepositoryRequest.name())
.addPathPartAsIs("_verify")
.build();
Request request = new Request(HttpPost.METHOD_NAME, endpoint);
Params parameters = new Params(request);
parameters.withMasterTimeout(verifyRepositoryRequest.masterNodeTimeout());
parameters.withTimeout(verifyRepositoryRequest.timeout());
return request;
}
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
Request request = new Request(HttpPut.METHOD_NAME, endpoint);

View File

@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import java.io.IOException;
@ -116,4 +118,28 @@ public final class SnapshotClient {
restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers);
}
/**
* Verifies a snapshot repository.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public VerifyRepositoryResponse verifyRepository(VerifyRepositoryRequest verifyRepositoryRequest, Header... headers)
throws IOException {
return restHighLevelClient.performRequestAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository,
VerifyRepositoryResponse::fromXContent, emptySet(), headers);
}
/**
* Asynchronously verifies a snapshot repository.
* <p>
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
* API on elastic.co</a>
*/
public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryRequest,
ActionListener<VerifyRepositoryResponse> listener, Header... headers) {
restHighLevelClient.performRequestAsyncAndParseEntity(verifyRepositoryRequest, RequestConverters::verifyRepository,
VerifyRepositoryResponse::fromXContent, listener, emptySet(), headers);
}
}

View File

@ -0,0 +1,344 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import java.io.IOException;
import java.util.Map;
import java.util.HashMap;
import java.util.Collections;
import java.util.List;
import java.util.ArrayList;
public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment {
public static final String SHARDS_FIELD = "_shards";
private ShardCounts totalCounts;
private Map<String, IndexResult> indexResults;
SyncedFlushResponse(ShardCounts totalCounts, Map<String, IndexResult> indexResults) {
this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed);
this.indexResults = Collections.unmodifiableMap(indexResults);
}
/**
* @return The total number of shard copies that were processed across all indexes
*/
public int totalShards() {
return totalCounts.total;
}
/**
* @return The number of successful shard copies that were processed across all indexes
*/
public int successfulShards() {
return totalCounts.successful;
}
/**
* @return The number of failed shard copies that were processed across all indexes
*/
public int failedShards() {
return totalCounts.failed;
}
/**
* @return A map of results for each index where the keys of the map are the index names
* and the values are the results encapsulated in {@link IndexResult}.
*/
public Map<String, IndexResult> getIndexResults() {
return indexResults;
}
ShardCounts getShardCounts() {
return totalCounts;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(SHARDS_FIELD);
totalCounts.toXContent(builder, params);
builder.endObject();
for (Map.Entry<String, IndexResult> entry: indexResults.entrySet()) {
String indexName = entry.getKey();
IndexResult indexResult = entry.getValue();
builder.startObject(indexName);
indexResult.toXContent(builder, params);
builder.endObject();
}
return builder;
}
public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
ShardCounts totalCounts = null;
Map<String, IndexResult> indexResults = new HashMap<>();
XContentLocation startLoc = parser.getTokenLocation();
while (parser.nextToken().equals(Token.FIELD_NAME)) {
if (parser.currentName().equals(SHARDS_FIELD)) {
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
totalCounts = ShardCounts.fromXContent(parser);
} else {
String indexName = parser.currentName();
IndexResult indexResult = IndexResult.fromXContent(parser);
indexResults.put(indexName, indexResult);
}
}
if (totalCounts != null) {
return new SyncedFlushResponse(totalCounts, indexResults);
} else {
throw new ParsingException(
startLoc,
"Unable to reconstruct object. Total counts for shards couldn't be parsed."
);
}
}
/**
* Encapsulates the number of total successful and failed shard copies
*/
public static final class ShardCounts implements ToXContentFragment {
public static final String TOTAL_FIELD = "total";
public static final String SUCCESSFUL_FIELD = "successful";
public static final String FAILED_FIELD = "failed";
private static final ConstructingObjectParser<ShardCounts, Void> PARSER =
new ConstructingObjectParser<>(
"shardcounts",
a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
}
private int total;
private int successful;
private int failed;
ShardCounts(int total, int successful, int failed) {
this.total = total;
this.successful = successful;
this.failed = failed;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(TOTAL_FIELD, total);
builder.field(SUCCESSFUL_FIELD, successful);
builder.field(FAILED_FIELD, failed);
return builder;
}
public static ShardCounts fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
public boolean equals(ShardCounts other) {
if (other != null) {
return
other.total == this.total &&
other.successful == this.successful &&
other.failed == this.failed;
} else {
return false;
}
}
}
/**
* Description for the flush/synced results for a particular index.
* This includes total, successful and failed copies along with failure description for each failed copy.
*/
public static final class IndexResult implements ToXContentFragment {
public static final String TOTAL_FIELD = "total";
public static final String SUCCESSFUL_FIELD = "successful";
public static final String FAILED_FIELD = "failed";
public static final String FAILURES_FIELD = "failures";
@SuppressWarnings("unchecked")
private static final ConstructingObjectParser<IndexResult, Void> PARSER =
new ConstructingObjectParser<>(
"indexresult",
a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List<ShardFailure>)a[3])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD));
}
private ShardCounts counts;
private List<ShardFailure> failures;
IndexResult(int total, int successful, int failed, List<ShardFailure> failures) {
counts = new ShardCounts(total, successful, failed);
if (failures != null) {
this.failures = Collections.unmodifiableList(failures);
} else {
this.failures = Collections.unmodifiableList(new ArrayList<>());
}
}
/**
* @return The total number of shard copies that were processed for this index.
*/
public int totalShards() {
return counts.total;
}
/**
* @return The number of successful shard copies that were processed for this index.
*/
public int successfulShards() {
return counts.successful;
}
/**
* @return The number of failed shard copies that were processed for this index.
*/
public int failedShards() {
return counts.failed;
}
/**
* @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index.
*/
public List<ShardFailure> failures() {
return failures;
}
ShardCounts getShardCounts() {
return counts;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
counts.toXContent(builder, params);
if (failures.size() > 0) {
builder.startArray(FAILURES_FIELD);
for (ShardFailure failure : failures) {
failure.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
public static IndexResult fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
}
/**
* Description of a failed shard copy for an index.
*/
public static final class ShardFailure implements ToXContentFragment {
public static String SHARD_ID_FIELD = "shard";
public static String FAILURE_REASON_FIELD = "reason";
public static String ROUTING_FIELD = "routing";
private int shardId;
private String failureReason;
private Map<String, Object> routing;
@SuppressWarnings("unchecked")
static ConstructingObjectParser<ShardFailure, Void> PARSER = new ConstructingObjectParser<>(
"shardfailure",
a -> new ShardFailure((Integer)a[0], (String)a[1], (Map<String, Object>)a[2])
);
static {
PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD));
PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD));
PARSER.declareObject(
optionalConstructorArg(),
(parser, c) -> parser.map(),
new ParseField(ROUTING_FIELD)
);
}
ShardFailure(int shardId, String failureReason, Map<String, Object> routing) {
this.shardId = shardId;
this.failureReason = failureReason;
if (routing != null) {
this.routing = Collections.unmodifiableMap(routing);
} else {
this.routing = Collections.unmodifiableMap(new HashMap<>());
}
}
/**
* @return Id of the shard whose copy failed
*/
public int getShardId() {
return shardId;
}
/**
* @return Reason for failure of the shard copy
*/
public String getFailureReason() {
return failureReason;
}
/**
* @return Additional information about the failure.
*/
public Map<String, Object> getRouting() {
return routing;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SHARD_ID_FIELD, shardId);
builder.field(FAILURE_REASON_FIELD, failureReason);
if (routing.size() > 0) {
builder.field(ROUTING_FIELD, routing);
}
builder.endObject();
return builder;
}
public static ShardFailure fromXContent(XContentParser parser) throws IOException {
return PARSER.parse(parser, null);
}
}
}

View File

@ -38,6 +38,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
@ -563,6 +564,39 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
}
}
public void testSyncedFlush() throws IOException {
{
String index = "index";
Settings settings = Settings.builder()
.put("number_of_shards", 1)
.put("number_of_replicas", 0)
.build();
createIndex(index, settings);
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index);
SyncedFlushResponse flushResponse =
execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync);
assertThat(flushResponse.totalShards(), equalTo(1));
assertThat(flushResponse.successfulShards(), equalTo(1));
assertThat(flushResponse.failedShards(), equalTo(0));
}
{
String nonExistentIndex = "non_existent_index";
assertFalse(indexExists(nonExistentIndex));
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex);
ElasticsearchException exception = expectThrows(
ElasticsearchException.class,
() ->
execute(
syncedFlushRequest,
highLevelClient().indices()::flushSynced,
highLevelClient().indices()::flushSyncedAsync
)
);
assertEquals(RestStatus.NOT_FOUND, exception.status());
}
}
public void testClearCache() throws IOException {
{
String index = "index";

View File

@ -33,6 +33,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
@ -43,6 +44,7 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
@ -645,6 +647,29 @@ public class RequestConvertersTests extends ESTestCase {
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
}
public void testSyncedFlush() {
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
SyncedFlushRequest syncedFlushRequest;
if (randomBoolean()) {
syncedFlushRequest = new SyncedFlushRequest(indices);
} else {
syncedFlushRequest = new SyncedFlushRequest();
syncedFlushRequest.indices(indices);
}
Map<String, String> expectedParams = new HashMap<>();
setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, expectedParams);
Request request = RequestConverters.flushSynced(syncedFlushRequest);
StringJoiner endpoint = new StringJoiner("/", "/", "");
if (indices != null && indices.length > 0) {
endpoint.add(String.join(",", indices));
}
endpoint.add("_flush/synced");
assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
assertThat(request.getParameters(), equalTo(expectedParams));
assertThat(request.getEntity(), nullValue());
assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
}
public void testForceMerge() {
String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5);
ForceMergeRequest forceMergeRequest;
@ -1608,6 +1633,21 @@ public class RequestConvertersTests extends ESTestCase {
assertNull(request.getEntity());
}
public void testVerifyRepository() {
Map<String, String> expectedParams = new HashMap<>();
String repository = randomIndicesNames(1, 1)[0];
String endpoint = "/_snapshot/" + repository + "/_verify";
VerifyRepositoryRequest verifyRepositoryRequest = new VerifyRepositoryRequest(repository);
setRandomMasterTimeout(verifyRepositoryRequest, expectedParams);
setRandomTimeout(verifyRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
Request request = RequestConverters.verifyRepository(verifyRepositoryRequest);
assertThat(endpoint, equalTo(request.getEndpoint()));
assertThat(HttpPost.METHOD_NAME, equalTo(request.getMethod()));
assertThat(expectedParams, equalTo(request.getParameters()));
}
public void testPutTemplateRequest() throws Exception {
Map<String, String> names = new HashMap<>();
names.put("log", "log");

View File

@ -28,6 +28,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.RestStatus;
@ -86,10 +88,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
public void testSnapshotDeleteRepository() throws IOException {
String repository = "test";
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository,
Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
assertTrue(createTestRepository(repository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
GetRepositoriesRequest request = new GetRepositoriesRequest();
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
@ -102,4 +101,14 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
assertTrue(deleteResponse.isAcknowledged());
}
public void testVerifyRepository() throws IOException {
PutRepositoryResponse putRepositoryResponse = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}");
assertTrue(putRepositoryResponse.isAcknowledged());
VerifyRepositoryRequest request = new VerifyRepositoryRequest("test");
VerifyRepositoryResponse response = execute(request, highLevelClient().snapshot()::verifyRepository,
highLevelClient().snapshot()::verifyRepositoryAsync);
assertThat(response.getNodes().size(), equalTo(1));
}
}

View File

@ -0,0 +1,269 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.io.IOException;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.Set;
import java.util.HashSet;
import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.ObjectIntMap;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.test.ESTestCase;
public class SyncedFlushResponseTests extends ESTestCase {
public void testXContentSerialization() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
TestPlan plan = createTestPlan();
XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent());
assertNotNull(plan.result);
serverResponsebuilder.startObject();
plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS);
serverResponsebuilder.endObject();
XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent());
assertNotNull(plan.result);
clientResponsebuilder.startObject();
plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS);
clientResponsebuilder.endObject();
Map<String, Object> serverContentMap = convertFailureListToSet(
serverResponsebuilder
.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(),
LoggingDeprecationHandler.INSTANCE,
BytesReference.bytes(serverResponsebuilder).streamInput()
).map()
);
Map<String, Object> clientContentMap = convertFailureListToSet(
clientResponsebuilder
.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(),
LoggingDeprecationHandler.INSTANCE,
BytesReference.bytes(clientResponsebuilder).streamInput()
)
.map()
);
assertEquals(serverContentMap, clientContentMap);
}
public void testXContentDeserialization() throws IOException {
final XContentType xContentType = randomFrom(XContentType.values());
TestPlan plan = createTestPlan();
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
builder.startObject();
plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
XContentParser parser = builder
.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()
);
SyncedFlushResponse originalResponse = plan.clientResult;
SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser);
assertNotNull(parsedResponse);
assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts());
for (Map.Entry<String, SyncedFlushResponse.IndexResult> entry: originalResponse.getIndexResults().entrySet()) {
String index = entry.getKey();
SyncedFlushResponse.IndexResult responseResult = entry.getValue();
SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index);
assertNotNull(responseResult);
assertNotNull(parsedResult);
assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts());
assertEquals(responseResult.failures().size(), parsedResult.failures().size());
for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) {
assertTrue(containsFailure(parsedResult.failures(), responseShardFailure));
}
}
}
static class TestPlan {
SyncedFlushResponse.ShardCounts totalCounts;
Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result;
SyncedFlushResponse clientResult;
}
TestPlan createTestPlan() throws IOException {
final TestPlan testPlan = new TestPlan();
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
Map<String, SyncedFlushResponse.IndexResult> indexResults = new HashMap<>();
final XContentType xContentType = randomFrom(XContentType.values());
final int indexCount = randomIntBetween(1, 10);
int totalShards = 0;
int totalSuccessful = 0;
int totalFailed = 0;
for (int i = 0; i < indexCount; i++) {
final String index = "index_" + i;
int shards = randomIntBetween(1, 4);
int replicas = randomIntBetween(0, 2);
int successful = 0;
int failed = 0;
int failures = 0;
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
List<SyncedFlushResponse.ShardFailure> shardFailures = new ArrayList<>();
for (int shard = 0; shard < shards; shard++) {
final ShardId shardId = new ShardId(index, "_na_", shard);
if (randomInt(5) < 2) {
// total shard failure
failed += replicas + 1;
failures++;
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
shardFailures.add(
new SyncedFlushResponse.ShardFailure(
shardId.id(),
"simulated total failure",
new HashMap<>()
)
);
} else {
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
for (int copy = 0; copy < replicas + 1; copy++) {
final ShardRouting shardRouting =
TestShardRouting.newShardRouting(
index, shard, "node_" + shardId + "_" + copy, null,
copy == 0, ShardRoutingState.STARTED
);
if (randomInt(5) < 2) {
// shard copy failure
failed++;
failures++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
// Building the shardRouting map here.
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
Map<String, Object> routing =
shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS)
.generator()
.contentType()
.xContent()
.createParser(
xContentRegistry(), LoggingDeprecationHandler.INSTANCE,
BytesReference.bytes(builder).streamInput()
)
.map();
shardFailures.add(
new SyncedFlushResponse.ShardFailure(
shardId.id(),
"copy failure " + shardId,
routing
)
);
} else {
successful++;
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse());
}
}
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
}
}
indicesResults.put(index, shardsResults);
indexResults.put(
index,
new SyncedFlushResponse.IndexResult(
shards * (replicas + 1),
successful,
failed,
shardFailures
)
);
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
testPlan.expectedFailuresPerIndex.put(index, failures);
totalFailed += failed;
totalShards += shards * (replicas + 1);
totalSuccessful += successful;
}
testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults);
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed);
testPlan.clientResult = new SyncedFlushResponse(
new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed),
indexResults
);
return testPlan;
}
public boolean containsFailure(List<SyncedFlushResponse.ShardFailure> failures, SyncedFlushResponse.ShardFailure origFailure) {
for (SyncedFlushResponse.ShardFailure failure: failures) {
if (failure.getShardId() == origFailure.getShardId() &&
failure.getFailureReason().equals(origFailure.getFailureReason()) &&
failure.getRouting().equals(origFailure.getRouting())) {
return true;
}
}
return false;
}
public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) {
if (first == null) {
assertNull(second);
} else {
assertTrue(first.equals(second));
}
}
public Map<String, Object> convertFailureListToSet(Map<String, Object> input) {
Map<String, Object> retMap = new HashMap<>();
for (Map.Entry<String, Object> entry: input.entrySet()) {
if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) {
retMap.put(entry.getKey(), entry.getValue());
} else {
// This was an index entry.
@SuppressWarnings("unchecked")
Map<String, Object> indexResult = (Map<String, Object>)entry.getValue();
Map<String, Object> retResult = new HashMap<>();
for (Map.Entry<String, Object> entry2: indexResult.entrySet()) {
if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) {
@SuppressWarnings("unchecked")
List<Object> failures = (List<Object>)entry2.getValue();
Set<Object> retSet = new HashSet<>(failures);
retResult.put(entry.getKey(), retSet);
} else {
retResult.put(entry2.getKey(), entry2.getValue());
}
}
retMap.put(entry.getKey(), retResult);
}
}
return retMap;
}
}

View File

@ -37,6 +37,7 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
@ -55,8 +56,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.shrink.ResizeRequest;
import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
import org.elasticsearch.action.admin.indices.shrink.ResizeType;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
import org.elasticsearch.action.support.ActiveShardCount;
@ -64,6 +63,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.SyncedFlushResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -81,8 +81,6 @@ import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.Matchers.equalTo;
/**
* This class is used to generate the Java Indices API documentation.
* You need to wrap your code between two tags like:
@ -784,6 +782,89 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
}
}
public void testSyncedFlushIndex() throws Exception {
RestHighLevelClient client = highLevelClient();
{
createIndex("index1", Settings.EMPTY);
}
{
// tag::flush-synced-request
SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1>
SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2>
SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3>
// end::flush-synced-request
// tag::flush-synced-request-indicesOptions
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
// end::flush-synced-request-indicesOptions
// tag::flush-synced-execute
SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request);
// end::flush-synced-execute
// tag::flush-synced-response
int totalShards = flushSyncedResponse.totalShards(); // <1>
int successfulShards = flushSyncedResponse.successfulShards(); // <2>
int failedShards = flushSyncedResponse.failedShards(); // <3>
for (Map.Entry<String, SyncedFlushResponse.IndexResult> responsePerIndexEntry:
flushSyncedResponse.getIndexResults().entrySet()) {
String indexName = responsePerIndexEntry.getKey(); // <4>
SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue();
int totalShardsForIndex = indexResult.totalShards(); // <5>
int successfulShardsForIndex = indexResult.successfulShards(); // <6>
int failedShardsForIndex = indexResult.failedShards(); // <7>
if (failedShardsForIndex > 0) {
for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) {
int shardId = failureEntry.getShardId(); // <8>
String failureReason = failureEntry.getFailureReason(); // <9>
Map<String, Object> routing = failureEntry.getRouting(); // <10>
}
}
}
// end::flush-synced-response
// tag::flush-synced-execute-listener
ActionListener<SyncedFlushResponse> listener = new ActionListener<SyncedFlushResponse>() {
@Override
public void onResponse(SyncedFlushResponse refreshResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::flush-synced-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::flush-synced-execute-async
client.indices().flushSyncedAsync(request, listener); // <1>
// end::flush-synced-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
{
// tag::flush-synced-notfound
try {
SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist");
client.indices().flushSynced(request);
} catch (ElasticsearchException exception) {
if (exception.status() == RestStatus.NOT_FOUND) {
// <1>
}
}
// end::flush-synced-notfound
}
}
public void testGetSettings() throws Exception {
RestHighLevelClient client = highLevelClient();

View File

@ -27,6 +27,8 @@ import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRe
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
@ -297,6 +299,66 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
}
}
public void testSnapshotVerifyRepository() throws IOException {
RestHighLevelClient client = highLevelClient();
createTestRepositories();
// tag::verify-repository-request
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
// end::verify-repository-request
// tag::verify-repository-request-masterTimeout
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
request.masterNodeTimeout("1m"); // <2>
// end::verify-repository-request-masterTimeout
// tag::verify-repository-request-timeout
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
request.timeout("1m"); // <2>
// end::verify-repository-request-timeout
// tag::verify-repository-execute
VerifyRepositoryResponse response = client.snapshot().verifyRepository(request);
// end::verify-repository-execute
// tag::verify-repository-response
List<VerifyRepositoryResponse.NodeView> repositoryMetaDataResponse = response.getNodes();
// end::verify-repository-response
assertThat(1, equalTo(repositoryMetaDataResponse.size()));
assertThat("node-0", equalTo(repositoryMetaDataResponse.get(0).getName()));
}
public void testSnapshotVerifyRepositoryAsync() throws InterruptedException {
RestHighLevelClient client = highLevelClient();
{
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
// tag::verify-repository-execute-listener
ActionListener<VerifyRepositoryResponse> listener =
new ActionListener<VerifyRepositoryResponse>() {
@Override
public void onResponse(VerifyRepositoryResponse verifyRepositoryRestResponse) {
// <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::verify-repository-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::verify-repository-execute-async
client.snapshot().verifyRepositoryAsync(request, listener); // <1>
// end::verify-repository-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
}
private void createTestRepositories() throws IOException {
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
request.type(FsRepository.TYPE);

View File

@ -0,0 +1,91 @@
[[java-rest-high-flush-synced]]
=== Flush Synced API
[[java-rest-high-flush-synced-request]]
==== Flush Synced Request
A `SyncedFlushRequest` can be applied to one or more indices, or even on `_all` the indices:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request]
--------------------------------------------------
<1> Flush synced one index
<2> Flush synced multiple indices
<3> Flush synced all the indices
==== Optional arguments
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-request-indicesOptions]
--------------------------------------------------
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
how wildcard expressions are expanded
[[java-rest-high-flush-synced-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute]
--------------------------------------------------
[[java-rest-high-flush-synced-async]]
==== Asynchronous Execution
The asynchronous execution of a flush request requires both the `SyncedFlushRequest`
instance and an `ActionListener` instance to be passed to the asynchronous
method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-async]
--------------------------------------------------
<1> The `SyncedFlushRequest` to execute and the `ActionListener` to use when
the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `SyncedFlushResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of failure. The raised exception is provided as an argument
[[java-rest-high-flush-synced-response]]
==== Flush Synced Response
The returned `SyncedFlushResponse` allows to retrieve information about the
executed operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-response]
--------------------------------------------------
<1> Total number of shards hit by the flush request
<2> Number of shards where the flush has succeeded
<3> Number of shards where the flush has failed
<4> Name of the index whose results we are about to calculate.
<5> Total number of shards for index mentioned in 4.
<6> Successful shards for index mentioned in 4.
<7> Failed shards for index mentioned in 4.
<8> One of the failed shard ids of the failed index mentioned in 4.
<9> Reason for failure of copies of the shard mentioned in 8.
<10> JSON represented by a Map<String, Object>. Contains shard related information like id, state, version etc.
for the failed shard copies. If the entire shard failed then this returns an empty map.
By default, if the indices were not found, an `ElasticsearchException` will be thrown:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[flush-synced-notfound]
--------------------------------------------------
<1> Do something if the indices to be flushed were not found

View File

@ -0,0 +1,81 @@
[[java-rest-high-snapshot-verify-repository]]
=== Snapshot Verify Repository API
The Snapshot Verify Repository API allows to verify a registered repository.
[[java-rest-high-snapshot-verify-repository-request]]
==== Snapshot Verify Repository Request
A `VerifyRepositoryRequest`:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request]
--------------------------------------------------
==== Optional Arguments
The following arguments can optionally be provided:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout]
--------------------------------------------------
<1> Timeout to wait for the all the nodes to acknowledge the settings were applied
as a `TimeValue`
<2> Timeout to wait for the all the nodes to acknowledge the settings were applied
as a `String`
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-request-masterTimeout]
--------------------------------------------------
<1> Timeout to connect to the master node as a `TimeValue`
<2> Timeout to connect to the master node as a `String`
[[java-rest-high-snapshot-verify-repository-sync]]
==== Synchronous Execution
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute]
--------------------------------------------------
[[java-rest-high-snapshot-verify-repository-async]]
==== Asynchronous Execution
The asynchronous execution of a snapshot verify repository requires both the
`VerifyRepositoryRequest` instance and an `ActionListener` instance to be
passed to the asynchronous method:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-async]
--------------------------------------------------
<1> The `VerifyRepositoryRequest` to execute and the `ActionListener`
to use when the execution completes
The asynchronous method does not block and returns immediately. Once it is
completed the `ActionListener` is called back using the `onResponse` method
if the execution successfully completed or using the `onFailure` method if
it failed.
A typical listener for `VerifyRepositoryResponse` looks like:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-execute-listener]
--------------------------------------------------
<1> Called when the execution is successfully completed. The response is
provided as an argument
<2> Called in case of a failure. The raised exception is provided as an argument
[[java-rest-high-cluster-verify-repository-response]]
==== Snapshot Verify Repository Response
The returned `VerifyRepositoryResponse` allows to retrieve information about the
executed operation as follows:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[verify-repository-response]
--------------------------------------------------

View File

@ -67,6 +67,7 @@ Index Management::
* <<java-rest-high-split-index>>
* <<java-rest-high-refresh>>
* <<java-rest-high-flush>>
* <<java-rest-high-flush-synced>>
* <<java-rest-high-clear-cache>>
* <<java-rest-high-force-merge>>
* <<java-rest-high-rollover-index>>
@ -89,6 +90,7 @@ include::indices/shrink_index.asciidoc[]
include::indices/split_index.asciidoc[]
include::indices/refresh.asciidoc[]
include::indices/flush.asciidoc[]
include::indices/flush_synced.asciidoc[]
include::indices/clear_cache.asciidoc[]
include::indices/force_merge.asciidoc[]
include::indices/rollover.asciidoc[]
@ -116,10 +118,12 @@ The Java High Level REST Client supports the following Snapshot APIs:
* <<java-rest-high-snapshot-get-repository>>
* <<java-rest-high-snapshot-create-repository>>
* <<java-rest-high-snapshot-delete-repository>>
* <<java-rest-high-snapshot-verify-repository>>
include::snapshot/get_repository.asciidoc[]
include::snapshot/create_repository.asciidoc[]
include::snapshot/delete_repository.asciidoc[]
include::snapshot/verify_repository.asciidoc[]
== Tasks APIs

View File

@ -48,7 +48,7 @@ Which shows that the class of `doc.first` is
"java_class": "org.elasticsearch.index.fielddata.ScriptDocValues$Longs",
...
},
"status": 500
"status": 400
}
---------------------------------------------------------
// TESTRESPONSE[s/\.\.\./"script_stack": $body.error.script_stack, "script": $body.error.script, "lang": $body.error.lang, "caused_by": $body.error.caused_by, "root_cause": $body.error.root_cause, "reason": $body.error.reason/]

View File

@ -9,4 +9,9 @@ These `execution_hint` are removed and should be replaced by `global_ordinals`.
The dynamic cluster setting named `search.max_buckets` now defaults
to 10,000 (instead of unlimited in the previous version).
Requests that try to return more than the limit will fail with an exception.
Requests that try to return more than the limit will fail with an exception.
==== `missing` option of the `composite` aggregation has been removed
The `missing` option of the `composite` aggregation, deprecated in 6.x,
has been removed. `missing_bucket` should be used instead.

View File

@ -11,3 +11,9 @@ the getter methods for date objects were deprecated. These methods have
now been removed. Instead, use `.value` on `date` fields, or explicitly
parse `long` fields into a date object using
`Instance.ofEpochMillis(doc["myfield"].value)`.
==== Script errors will return as `400` error codes
Malformed scripts, either in search templates, ingest pipelines or search
requests, return `400 - Bad request` while they would previously return
`500 - Internal Server Error`. This also applies for stored scripts.

View File

@ -43,7 +43,7 @@ The Search API returns `400 - Bad request` while it would previously return
* the number of slices is too large
* keep alive for scroll is too large
* number of filters in the adjacency matrix aggregation is too large
* script compilation errors
==== Scroll queries cannot use the `request_cache` anymore

View File

@ -19,9 +19,9 @@
package org.elasticsearch.script.mustache;
import com.github.mustachejava.Mustache;
import com.github.mustachejava.MustacheException;
import com.github.mustachejava.MustacheFactory;
import java.io.StringReader;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
@ -31,12 +31,15 @@ import org.elasticsearch.script.GeneralScriptException;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.ScriptException;
import org.elasticsearch.script.TemplateScript;
import java.io.Reader;
import java.io.StringReader;
import java.io.StringWriter;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Collections;
import java.util.Map;
/**
@ -66,9 +69,14 @@ public final class MustacheScriptEngine implements ScriptEngine {
}
final MustacheFactory factory = createMustacheFactory(options);
Reader reader = new StringReader(templateSource);
Mustache template = factory.compile(reader, "query-template");
TemplateScript.Factory compiled = params -> new MustacheExecutableScript(template, params);
return context.factoryClazz.cast(compiled);
try {
Mustache template = factory.compile(reader, "query-template");
TemplateScript.Factory compiled = params -> new MustacheExecutableScript(template, params);
return context.factoryClazz.cast(compiled);
} catch (MustacheException ex) {
throw new ScriptException(ex.getMessage(), ex, Collections.emptyList(), templateSource, NAME);
}
}
private CustomMustacheFactory createMustacheFactory(Map<String, String> options) {

View File

@ -18,6 +18,15 @@
*/
package org.elasticsearch.script.mustache;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.ScriptException;
import org.elasticsearch.script.TemplateScript;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matcher;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
@ -29,15 +38,6 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
import com.github.mustachejava.MustacheException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.script.ScriptEngine;
import org.elasticsearch.script.TemplateScript;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matcher;
import static java.util.Collections.singleton;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@ -225,11 +225,17 @@ public class MustacheTests extends ESTestCase {
}
public void testsUnsupportedTagsToJson() {
MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{foo}}{{bar}}{{/toJson}}"));
final String script = "{{#toJson}}{{foo}}{{bar}}{{/toJson}}";
ScriptException e = expectThrows(ScriptException.class, () -> compile(script));
assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier"));
assertEquals(MustacheScriptEngine.NAME, e.getLang());
assertEquals(script, e.getScript());
e = expectThrows(MustacheException.class, () -> compile("{{#toJson}}{{/toJson}}"));
final String script2 = "{{#toJson}}{{/toJson}}";
e = expectThrows(ScriptException.class, () -> compile(script2));
assertThat(e.getMessage(), containsString("Mustache function [toJson] must contain one and only one identifier"));
assertEquals(MustacheScriptEngine.NAME, e.getLang());
assertEquals(script2, e.getScript());
}
public void testEmbeddedToJSON() throws Exception {
@ -312,11 +318,17 @@ public class MustacheTests extends ESTestCase {
}
public void testsUnsupportedTagsJoin() {
MustacheException e = expectThrows(MustacheException.class, () -> compile("{{#join}}{{/join}}"));
final String script = "{{#join}}{{/join}}";
ScriptException e = expectThrows(ScriptException.class, () -> compile(script));
assertThat(e.getMessage(), containsString("Mustache function [join] must contain one and only one identifier"));
assertEquals(MustacheScriptEngine.NAME, e.getLang());
assertEquals(script, e.getScript());
e = expectThrows(MustacheException.class, () -> compile("{{#join delimiter='a'}}{{/join delimiter='b'}}"));
final String script2 = "{{#join delimiter='a'}}{{/join delimiter='b'}}";
e = expectThrows(ScriptException.class, () -> compile(script2));
assertThat(e.getMessage(), containsString("Mismatched start/end tags"));
assertEquals(MustacheScriptEngine.NAME, e.getLang());
assertEquals(script2, e.getScript());
}
public void testJoinWithCustomDelimiter() {

View File

@ -35,7 +35,7 @@
id: "non_existing"
- do:
catch: request
catch: bad_request
put_script:
id: "1"
context: "search"

View File

@ -133,7 +133,7 @@ setup:
---
"Scripted Field with script error":
- do:
catch: request
catch: bad_request
search:
body:
script_fields:

View File

@ -17,7 +17,7 @@
indices.refresh: {}
- do:
catch: request
catch: bad_request
reindex:
body:
source:

View File

@ -446,7 +446,7 @@
indices.refresh: {}
- do:
catch: request
catch: bad_request
reindex:
refresh: true
body:

View File

@ -17,7 +17,7 @@
indices.refresh: {}
- do:
catch: request
catch: bad_request
update_by_query:
index: source
body:

View File

@ -434,7 +434,7 @@
indices.refresh: {}
- do:
catch: request
catch: bad_request
update_by_query:
index: twitter
refresh: true

View File

@ -332,7 +332,7 @@
wait_for_status: green
- do:
catch: request
catch: bad_request
ingest.put_pipeline:
id: "my_pipeline_1"
body: >
@ -348,5 +348,5 @@
]
}
- match: { error.header.processor_type: "set" }
- match: { error.type: "general_script_exception" }
- match: { error.reason: "Failed to compile inline script [{{#join}}{{/join}}] using lang [mustache]" }
- match: { error.type: "script_exception" }
- match: { error.reason: "Mustache function [join] must contain one and only one identifier" }

View File

@ -89,7 +89,7 @@
---
"Test script processor with syntax error in inline script":
- do:
catch: request
catch: bad_request
ingest.put_pipeline:
id: "my_pipeline"
body: >

View File

@ -327,8 +327,8 @@ setup:
---
"Composite aggregation and array size":
- skip:
version: " - 6.99.99"
reason: starting in 7.0 the composite sources do not allocate arrays eagerly.
version: " - 6.3.99"
reason: starting in 6.4 the composite sources do not allocate arrays eagerly.
- do:
search:

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Arrays;
@ -129,6 +130,12 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
private ClusterName clusterName;
private static final ObjectParser<VerifyRepositoryResponse, Void> PARSER =
new ObjectParser<>(VerifyRepositoryResponse.class.getName(), VerifyRepositoryResponse::new);
static {
PARSER.declareNamedObjects(VerifyRepositoryResponse::setNodes, NodeView.PARSER, new ParseField("nodes"));
}
VerifyRepositoryResponse() {
}
@ -167,6 +174,10 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
return clusterName;
}
protected void setNodes(List<NodeView> nodes) {
this.nodes = nodes.stream().map(n -> n.convertToDiscoveryNode()).collect(Collectors.toList());
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -187,8 +198,29 @@ public class VerifyRepositoryResponse extends ActionResponse implements ToXConte
return builder;
}
public static VerifyRepositoryResponse fromXContent(XContentParser parser) {
return PARSER.apply(parser, null);
}
@Override
public String toString() {
return Strings.toString(this);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
VerifyRepositoryResponse other = (VerifyRepositoryResponse) obj;
return nodes.equals(other.nodes);
}
@Override
public int hashCode() {
return nodes.hashCode();
}
}

View File

@ -30,10 +30,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Collections;
@ -42,7 +44,7 @@ import java.util.Set;
import static java.util.Collections.emptySet;
public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
public class AliasMetaData extends AbstractDiffable<AliasMetaData> implements ToXContentFragment {
private final String alias;
@ -199,6 +201,17 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
return readDiffFrom(AliasMetaData::new, in);
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
AliasMetaData.Builder.toXContent(this, builder, params);
return builder;
}
public static class Builder {
private final String alias;
@ -314,6 +327,8 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
if ("filter".equals(currentFieldName)) {
Map<String, Object> filter = parser.mapOrdered();
builder.filter(filter);
} else {
parser.skipChildren();
}
} else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) {
if ("filter".equals(currentFieldName)) {
@ -327,6 +342,8 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
} else if ("search_routing".equals(currentFieldName) || "searchRouting".equals(currentFieldName)) {
builder.searchRouting(parser.text());
}
} else if (token == XContentParser.Token.START_ARRAY) {
parser.skipChildren();
}
}
return builder.build();

View File

@ -141,9 +141,10 @@ public abstract class MetaDataStateFormat<T> {
Path finalPath = stateLocation.resolve(fileName);
try {
Files.copy(finalStatePath, tmpPath);
IOUtils.fsync(tmpPath, false); // fsync the state file
// we are on the same FileSystem / Partition here we can do an atomic move
Files.move(tmpPath, finalPath, StandardCopyOption.ATOMIC_MOVE);
IOUtils.fsync(stateLocation, true); // we just fsync the dir here..
IOUtils.fsync(stateLocation, true);
} finally {
Files.deleteIfExists(tmpPath);
}

View File

@ -1,5 +1,14 @@
package org.elasticsearch.script;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.rest.RestStatus;
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@ -25,14 +34,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
/**
* Exception from a scripting engine.
* <p>
@ -132,4 +133,9 @@ public class ScriptException extends ElasticsearchException {
throw new RuntimeException(e);
}
}
@Override
public RestStatus status() {
return RestStatus.BAD_REQUEST;
}
}

View File

@ -50,8 +50,8 @@ class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
BinaryValuesSource(BigArrays bigArrays, LongConsumer breakerConsumer,
MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc,
DocValueFormat format, boolean missingBucket, Object missing, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, missing, size, reverseMul);
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
this.breakerConsumer = breakerConsumer;
this.docValuesFunc = docValuesFunc;
this.values = bigArrays.newObjectArray(Math.min(size, 100));

View File

@ -271,7 +271,6 @@ final class CompositeAggregator extends BucketsAggregator {
vs::globalOrdinalsValues,
config.format(),
config.missingBucket(),
config.missing(),
size,
reverseMul
);
@ -288,7 +287,6 @@ final class CompositeAggregator extends BucketsAggregator {
vs::bytesValues,
config.format(),
config.missingBucket(),
config.missing(),
size,
reverseMul
);
@ -304,7 +302,6 @@ final class CompositeAggregator extends BucketsAggregator {
vs::bytesValues,
config.format(),
config.missingBucket(),
config.missing(),
size,
reverseMul
);
@ -318,7 +315,6 @@ final class CompositeAggregator extends BucketsAggregator {
vs::doubleValues,
config.format(),
config.missingBucket(),
config.missing(),
size,
reverseMul
);
@ -337,7 +333,6 @@ final class CompositeAggregator extends BucketsAggregator {
rounding,
config.format(),
config.missingBucket(),
config.missing(),
size,
reverseMul
);

View File

@ -23,8 +23,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryShardException;
@ -42,15 +40,12 @@ import java.util.Objects;
* A {@link ValuesSource} builder for {@link CompositeAggregationBuilder}
*/
public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSourceBuilder<AB>> implements Writeable, ToXContentFragment {
private static final DeprecationLogger DEPRECATION_LOGGER =
new DeprecationLogger(Loggers.getLogger(CompositeValuesSourceBuilder.class));
protected final String name;
private String field = null;
private Script script = null;
private ValueType valueType = null;
private boolean missingBucket = false;
private Object missing = null;
private SortOrder order = SortOrder.ASC;
private String format = null;
@ -72,12 +67,15 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
if (in.readBoolean()) {
this.valueType = ValueType.readFromStream(in);
}
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
this.missingBucket = in.readBoolean();
} else {
this.missingBucket = false;
}
this.missing = in.readGenericValue();
if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
// skip missing value for BWC
in.readGenericValue();
}
this.order = SortOrder.readFromStream(in);
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
this.format = in.readOptionalString();
@ -100,10 +98,13 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
if (hasValueType) {
valueType.writeTo(out);
}
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeBoolean(missingBucket);
}
out.writeGenericValue(missing);
if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
// write missing value for BWC
out.writeGenericValue(null);
}
order.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_3_0)) {
out.writeOptionalString(format);
@ -125,9 +126,6 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
builder.field("script", script);
}
builder.field("missing_bucket", missingBucket);
if (missing != null) {
builder.field("missing", missing);
}
if (valueType != null) {
builder.field("value_type", valueType.getPreferredName());
}
@ -142,7 +140,7 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
@Override
public final int hashCode() {
return Objects.hash(field, missingBucket, missing, script, valueType, order, format, innerHashCode());
return Objects.hash(field, missingBucket, script, valueType, order, format, innerHashCode());
}
protected abstract int innerHashCode();
@ -158,7 +156,6 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
Objects.equals(script, that.script()) &&
Objects.equals(valueType, that.valueType()) &&
Objects.equals(missingBucket, that.missingBucket()) &&
Objects.equals(missing, that.missing()) &&
Objects.equals(order, that.order()) &&
Objects.equals(format, that.format()) &&
innerEquals(that);
@ -229,28 +226,6 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
return valueType;
}
/**
* Sets the value to use when the source finds a missing value in a
* document.
*
* @deprecated Use {@link #missingBucket(boolean)} instead.
*/
@SuppressWarnings("unchecked")
@Deprecated
public AB missing(Object missing) {
if (missing == null) {
throw new IllegalArgumentException("[missing] must not be null");
}
DEPRECATION_LOGGER.deprecated("[missing] is deprecated. Please use [missing_bucket] instead.");
this.missing = missing;
return (AB) this;
}
@Deprecated
public Object missing() {
return missing;
}
/**
* If true an explicit `null bucket will represent documents with missing values.
*/
@ -328,18 +303,14 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
public final CompositeValuesSourceConfig build(SearchContext context) throws IOException {
ValuesSourceConfig<?> config = ValuesSourceConfig.resolve(context.getQueryShardContext(),
valueType, field, script, missing, null, format);
valueType, field, script, null,null, format);
if (config.unmapped() && field != null && missing == null && missingBucket == false) {
if (config.unmapped() && field != null && missingBucket == false) {
// this source cannot produce any values so we refuse to build
// since composite buckets are not created on null values by default.
throw new QueryShardException(context.getQueryShardContext(),
"failed to find field [" + field + "] and [missing_bucket] is not set");
}
if (missingBucket && missing != null) {
throw new QueryShardException(context.getQueryShardContext(),
"cannot use [missing] option in conjunction with [missing_bucket]");
}
return innerBuild(context, config);
}
}

View File

@ -32,7 +32,6 @@ class CompositeValuesSourceConfig {
private final ValuesSource vs;
private final DocValueFormat format;
private final int reverseMul;
private final Object missing;
private final boolean missingBucket;
/**
@ -42,18 +41,15 @@ class CompositeValuesSourceConfig {
* @param vs The underlying {@link ValuesSource}.
* @param format The {@link DocValueFormat} of this source.
* @param order The sort order associated with this source.
* @param missing The missing value or null if documents with missing value should be ignored.
*/
CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format,
SortOrder order, boolean missingBucket, @Nullable Object missing) {
SortOrder order, boolean missingBucket) {
this.name = name;
this.fieldType = fieldType;
this.vs = vs;
this.format = format;
this.reverseMul = order == SortOrder.ASC ? 1 : -1;
this.missingBucket = missingBucket;
assert missingBucket == false || missing == null;
this.missing = missing;
}
/**
@ -85,13 +81,6 @@ class CompositeValuesSourceConfig {
return format;
}
/**
* The missing value for this configuration or null if documents with missing value should be ignored.
*/
Object missing() {
return missing;
}
/**
* If true, an explicit `null bucket represents documents with missing values.
*/

View File

@ -38,8 +38,6 @@ class CompositeValuesSourceParserHelper {
ValueType targetValueType) {
objectParser.declareField(VB::field, XContentParser::text,
new ParseField("field"), ObjectParser.ValueType.STRING);
objectParser.declareField(VB::missing, XContentParser::objectText,
new ParseField("missing"), ObjectParser.ValueType.VALUE);
objectParser.declareBoolean(VB::missingBucket, new ParseField("missing_bucket"));
objectParser.declareField(VB::valueType, p -> {

View File

@ -226,7 +226,7 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild
// is specified in the builder.
final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format();
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order(), missingBucket(), missing());
return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order(), missingBucket());
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}

View File

@ -45,8 +45,8 @@ class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
DoubleValuesSource(BigArrays bigArrays, MappedFieldType fieldType,
CheckedFunction<LeafReaderContext, SortedNumericDoubleValues, IOException> docValuesFunc,
DocValueFormat format, boolean missingBucket, Object missing, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, missing, size, reverseMul);
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.bits = missingBucket ? new BitArray(bigArrays, 100) : null;
this.values = bigArrays.newDoubleArray(Math.min(size, 100), false);

View File

@ -54,8 +54,8 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
GlobalOrdinalValuesSource(BigArrays bigArrays, MappedFieldType type,
CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc,
DocValueFormat format, boolean missingBucket, Object missing, int size, int reverseMul) {
super(bigArrays, format, type, missingBucket, missing, size, reverseMul);
DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
super(bigArrays, format, type, missingBucket, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.values = bigArrays.newLongArray(Math.min(size, 100), false);
}

View File

@ -115,7 +115,7 @@ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<H
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval);
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order(), missingBucket(), missing());
return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order(), missingBucket());
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}

View File

@ -56,8 +56,8 @@ class LongValuesSource extends SingleDimensionValuesSource<Long> {
LongValuesSource(BigArrays bigArrays,
MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc,
LongUnaryOperator rounding, DocValueFormat format, boolean missingBucket, Object missing, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, missing, size, reverseMul);
LongUnaryOperator rounding, DocValueFormat format, boolean missingBucket, int size, int reverseMul) {
super(bigArrays, format, fieldType, missingBucket, size, reverseMul);
this.bigArrays = bigArrays;
this.docValuesFunc = docValuesFunc;
this.rounding = rounding;

View File

@ -41,8 +41,6 @@ abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements R
protected final DocValueFormat format;
@Nullable
protected final MappedFieldType fieldType;
@Nullable
protected final Object missing;
protected final boolean missingBucket;
protected final int size;
@ -57,18 +55,15 @@ abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements R
* @param format The format of the source.
* @param fieldType The field type or null if the source is a script.
* @param missingBucket If true, an explicit `null bucket represents documents with missing values.
* @param missing The missing value or null if documents with missing value should be ignored.
* @param size The number of values to record.
* @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed.
*/
SingleDimensionValuesSource(BigArrays bigArrays, DocValueFormat format,
@Nullable MappedFieldType fieldType, boolean missingBucket, @Nullable Object missing,
@Nullable MappedFieldType fieldType, boolean missingBucket,
int size, int reverseMul) {
assert missing == null || missingBucket == false;
this.bigArrays = bigArrays;
this.format = format;
this.fieldType = fieldType;
this.missing = missing;
this.missingBucket = missingBucket;
this.size = size;
this.reverseMul = reverseMul;
@ -147,7 +142,6 @@ abstract class SingleDimensionValuesSource<T extends Comparable<T>> implements R
*/
protected boolean checkIfSortedDocsIsApplicable(IndexReader reader, MappedFieldType fieldType) {
if (fieldType == null ||
missing != null ||
(missingBucket && afterValue == null) ||
fieldType.indexOptions() == IndexOptions.NONE ||
// inverse of the natural order

View File

@ -93,6 +93,6 @@ public class TermsValuesSourceBuilder extends CompositeValuesSourceBuilder<Terms
} else {
format = config.format();
}
return new CompositeValuesSourceConfig(name, fieldType, vs, format, order(), missingBucket(), missing());
return new CompositeValuesSourceConfig(name, fieldType, vs, format, order(), missingBucket());
}
}

View File

@ -18,8 +18,6 @@
*/
package org.elasticsearch.transport;
import org.elasticsearch.client.Client;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
@ -27,6 +25,7 @@ import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
@ -36,6 +35,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
@ -97,6 +97,9 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
Setting.affixKeySetting("search.remote.", "skip_unavailable",
key -> boolSetting(key, false, Setting.Property.NodeScope, Setting.Property.Dynamic), REMOTE_CLUSTERS_SEEDS);
private static final Predicate<DiscoveryNode> DEFAULT_NODE_PREDICATE = (node) -> Version.CURRENT.isCompatible(node.getVersion())
&& (node.isMasterNode() == false || node.isDataNode() || node.isIngestNode());
private final TransportService transportService;
private final int numRemoteConnections;
private volatile Map<String, RemoteClusterConnection> remoteClusters = Collections.emptyMap();
@ -121,13 +124,6 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
connectionListener.onResponse(null);
} else {
CountDown countDown = new CountDown(seeds.size());
Predicate<DiscoveryNode> nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion());
if (REMOTE_NODE_ATTRIBUTE.exists(settings)) {
// nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for
// cross cluster search
String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
nodePredicate = nodePredicate.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false")));
}
remoteClusters.putAll(this.remoteClusters);
for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey());
@ -143,7 +139,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
if (remote == null) { // this is a new cluster we have to add a new representation
remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections,
nodePredicate);
getNodePredicate(settings));
remoteClusters.put(entry.getKey(), remote);
}
@ -168,6 +164,15 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
this.remoteClusters = Collections.unmodifiableMap(remoteClusters);
}
static Predicate<DiscoveryNode> getNodePredicate(Settings settings) {
if (REMOTE_NODE_ATTRIBUTE.exists(settings)) {
// nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for cross cluster search
String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
return DEFAULT_NODE_PREDICATE.and((node) -> Booleans.parseBoolean(node.getAttributes().getOrDefault(attribute, "false")));
}
return DEFAULT_NODE_PREDICATE;
}
/**
* Returns <code>true</code> if at least one remote cluster is configured
*/

View File

@ -0,0 +1,47 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.repositories.verify;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.util.ArrayList;
import java.util.List;
public class VerifyRepositoryResponseTests extends AbstractXContentTestCase<VerifyRepositoryResponse> {
@Override
protected VerifyRepositoryResponse doParseInstance(XContentParser parser) {
return VerifyRepositoryResponse.fromXContent(parser);
}
@Override
protected boolean supportsUnknownFields() {
return false;
}
@Override
protected VerifyRepositoryResponse createTestInstance() {
VerifyRepositoryResponse response = new VerifyRepositoryResponse();
List<VerifyRepositoryResponse.NodeView> nodes = new ArrayList<>();
nodes.add(new VerifyRepositoryResponse.NodeView("node-id", "node-name"));
response.setNodes(nodes);
return response;
}
}

View File

@ -19,18 +19,19 @@
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.cluster.metadata.AliasMetaData.Builder;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import java.io.IOException;
import java.util.function.Predicate;
import static org.hamcrest.Matchers.equalTo;
public class AliasMetaDataTests extends ESTestCase {
public class AliasMetaDataTests extends AbstractXContentTestCase<AliasMetaData> {
public void testSerialization() throws IOException {
final AliasMetaData before =
@ -52,4 +53,49 @@ public class AliasMetaDataTests extends ESTestCase {
assertThat(after, equalTo(before));
}
@Override
protected AliasMetaData createTestInstance() {
return createTestItem();
}
@Override
protected Predicate<String> getRandomFieldsExcludeFilter() {
return p -> p.equals("") // do not add elements at the top-level as any element at this level is parsed as a new alias
|| p.contains(".filter"); // do not insert random data into AliasMetaData#filter
}
@Override
protected AliasMetaData doParseInstance(XContentParser parser) throws IOException {
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
parser.nextToken();
}
assertEquals(XContentParser.Token.FIELD_NAME, parser.currentToken());
AliasMetaData aliasMetaData = AliasMetaData.Builder.fromXContent(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
return aliasMetaData;
}
@Override
protected boolean supportsUnknownFields() {
return true;
}
private static AliasMetaData createTestItem() {
Builder builder = AliasMetaData.builder(randomAlphaOfLengthBetween(3, 10));
if (randomBoolean()) {
builder.routing(randomAlphaOfLengthBetween(3, 10));
}
if (randomBoolean()) {
builder.searchRouting(randomAlphaOfLengthBetween(3, 10));
}
if (randomBoolean()) {
builder.indexRouting(randomAlphaOfLengthBetween(3, 10));
}
if (randomBoolean()) {
builder.filter("{\"term\":{\"year\":2016}}");
}
return builder.build();
}
}

View File

@ -142,19 +142,6 @@ public class CompositeAggregatorTests extends AggregatorTestCase {
createAggregatorFactory(builder, searcher);
}
public void testMissingBucket() throws Exception {
TermsValuesSourceBuilder terms = new TermsValuesSourceBuilder(randomAlphaOfLengthBetween(5, 10))
.field("unknown")
.missingBucket(true)
.missing("MISSING");
CompositeAggregationBuilder builder = new CompositeAggregationBuilder("test", Collections.singletonList(terms));
IndexSearcher searcher = new IndexSearcher(new MultiReader());
QueryShardException exc =
expectThrows(QueryShardException.class, () -> createAggregator(builder, searcher));
assertWarnings("[missing] is deprecated. Please use [missing_bucket] instead.");
assertThat(exc.getMessage(), containsString("cannot use [missing] option in conjunction with [missing_bucket]"));
}
public void testWithKeyword() throws Exception {
final List<Map<String, List<Object>>> dataset = new ArrayList<>();
dataset.addAll(

View File

@ -224,7 +224,6 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
value -> value,
DocValueFormat.RAW,
missingBucket,
null,
size,
1
);
@ -235,7 +234,6 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
context -> FieldData.sortableLongBitsToDoubles(DocValues.getSortedNumeric(context.reader(), fieldType.name())),
DocValueFormat.RAW,
missingBucket,
null,
size,
1
);
@ -249,7 +247,6 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
context -> DocValues.getSortedSet(context.reader(), fieldType.name()),
DocValueFormat.RAW,
missingBucket,
null,
size,
1
);
@ -261,7 +258,6 @@ public class CompositeValuesCollectorQueueTests extends AggregatorTestCase {
context -> FieldData.toString(DocValues.getSortedSet(context.reader(), fieldType.name())),
DocValueFormat.RAW,
missingBucket,
null,
size,
1
);

View File

@ -46,7 +46,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
1,
1
);
@ -57,20 +56,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
assertNull(source.createSortedDocsProducerOrNull(reader,
new TermQuery(new Term("keyword", "toto)"))));
source = new BinaryValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
(b) -> {},
keyword,
context -> null,
DocValueFormat.RAW,
false,
"missing_value",
1,
1
);
assertNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()));
assertNull(source.createSortedDocsProducerOrNull(reader, null));
source = new BinaryValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
(b) -> {},
@ -78,7 +63,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
true,
null,
1,
1
);
@ -92,7 +76,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
0,
-1
);
@ -107,7 +90,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
1,
1);
assertNull(source.createSortedDocsProducerOrNull(reader, null));
@ -121,7 +103,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
keyword, context -> null,
DocValueFormat.RAW,
false,
null,
1,
1
);
@ -132,26 +113,12 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
assertNull(source.createSortedDocsProducerOrNull(reader,
new TermQuery(new Term("keyword", "toto)"))));
source = new GlobalOrdinalValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
keyword,
context -> null,
DocValueFormat.RAW,
false,
"missing_value",
1,
1
);
assertNull(source.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()));
assertNull(source.createSortedDocsProducerOrNull(reader, null));
source = new GlobalOrdinalValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
keyword,
context -> null,
DocValueFormat.RAW,
true,
null,
1,
1
);
@ -164,7 +131,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
1,
-1
);
@ -178,7 +144,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
1,
1
);
@ -202,7 +167,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
value -> value,
DocValueFormat.RAW,
false,
null,
1,
1
);
@ -214,27 +178,12 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
assertNull(source.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)"))));
LongValuesSource sourceWithMissing = new LongValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
number,
context -> null,
value -> value,
DocValueFormat.RAW,
false,
0d,
1,
1);
assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()));
assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, null));
assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new TermQuery(new Term("keyword", "toto)"))));
sourceWithMissing = new LongValuesSource(
BigArrays.NON_RECYCLING_INSTANCE,
number,
context -> null,
value -> value,
DocValueFormat.RAW,
true,
null,
1,
1);
assertNull(sourceWithMissing.createSortedDocsProducerOrNull(reader, new MatchAllDocsQuery()));
@ -248,7 +197,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
value -> value,
DocValueFormat.RAW,
false,
null,
1,
-1
);
@ -262,7 +210,6 @@ public class SingleDimensionValuesSourceTests extends ESTestCase {
context -> null,
DocValueFormat.RAW,
false,
null,
1,
1
);

View File

@ -18,7 +18,6 @@
*/
package org.elasticsearch.transport;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
@ -30,7 +29,9 @@ import org.elasticsearch.common.settings.AbstractScopedSettings;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
@ -40,6 +41,7 @@ import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -50,6 +52,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Predicate;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.instanceOf;
@ -279,6 +282,75 @@ public class RemoteClusterServiceTests extends ESTestCase {
}
}
public void testRemoteNodeRoles() throws IOException, InterruptedException {
final Settings settings = Settings.EMPTY;
final List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
final Settings data = Settings.builder().put("node.master", false).build();
final Settings dedicatedMaster = Settings.builder().put("node.data", false).put("node.ingest", "false").build();
try (MockTransportService c1N1 =
startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
MockTransportService c1N2 =
startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data);
MockTransportService c2N1 =
startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedMaster);
MockTransportService c2N2 =
startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, data)) {
final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode();
final DiscoveryNode c1N2Node = c1N2.getLocalDiscoNode();
final DiscoveryNode c2N1Node = c2N1.getLocalDiscoNode();
final DiscoveryNode c2N2Node = c2N2.getLocalDiscoNode();
knownNodes.add(c1N1Node);
knownNodes.add(c1N2Node);
knownNodes.add(c2N1Node);
knownNodes.add(c2N2Node);
Collections.shuffle(knownNodes, random());
try (MockTransportService transportService = MockTransportService.createNewService(
settings,
Version.CURRENT,
threadPool,
null)) {
transportService.start();
transportService.acceptIncomingRequests();
final Settings.Builder builder = Settings.builder();
builder.putList("search.remote.cluster_1.seeds", c1N1Node.getAddress().toString());
builder.putList("search.remote.cluster_2.seeds", c2N1Node.getAddress().toString());
try (RemoteClusterService service = new RemoteClusterService(settings, transportService)) {
assertFalse(service.isCrossClusterSearchEnabled());
service.initializeRemoteClusters();
assertFalse(service.isCrossClusterSearchEnabled());
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
final CountDownLatch firstLatch = new CountDownLatch(1);
service.updateRemoteCluster(
"cluster_1",
Arrays.asList(c1N1Address, c1N2Address),
connectionListener(firstLatch));
firstLatch.await();
final CountDownLatch secondLatch = new CountDownLatch(1);
service.updateRemoteCluster(
"cluster_2",
Arrays.asList(c2N1Address, c2N2Address),
connectionListener(secondLatch));
secondLatch.await();
assertTrue(service.isCrossClusterSearchEnabled());
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
assertFalse(service.isRemoteNodeConnected("cluster_1", c1N1Node));
assertTrue(service.isRemoteNodeConnected("cluster_1", c1N2Node));
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
assertFalse(service.isRemoteNodeConnected("cluster_2", c2N1Node));
assertTrue(service.isRemoteNodeConnected("cluster_2", c2N2Node));
}
}
}
}
private ActionListener<Void> connectionListener(final CountDownLatch latch) {
return ActionListener.wrap(x -> latch.countDown(), x -> fail());
}
@ -630,4 +702,115 @@ public class RemoteClusterServiceTests extends ESTestCase {
}
}
}
public void testGetNodePredicateNodeRoles() {
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(Settings.EMPTY);
{
DiscoveryNode all = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class)), Version.CURRENT);
assertTrue(nodePredicate.test(all));
}
{
DiscoveryNode dataMaster = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA, DiscoveryNode.Role.MASTER)), Version.CURRENT);
assertTrue(nodePredicate.test(dataMaster));
}
{
DiscoveryNode dedicatedMaster = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER)), Version.CURRENT);
assertFalse(nodePredicate.test(dedicatedMaster));
}
{
DiscoveryNode dedicatedIngest = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.INGEST)), Version.CURRENT);
assertTrue(nodePredicate.test(dedicatedIngest));
}
{
DiscoveryNode masterIngest = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.INGEST, DiscoveryNode.Role.MASTER)), Version.CURRENT);
assertTrue(nodePredicate.test(masterIngest));
}
{
DiscoveryNode dedicatedData = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA)), Version.CURRENT);
assertTrue(nodePredicate.test(dedicatedData));
}
{
DiscoveryNode ingestData = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.of(DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST)), Version.CURRENT);
assertTrue(nodePredicate.test(ingestData));
}
{
DiscoveryNode coordOnly = new DiscoveryNode("id", address, Collections.emptyMap(),
new HashSet<>(EnumSet.noneOf(DiscoveryNode.Role.class)), Version.CURRENT);
assertTrue(nodePredicate.test(coordOnly));
}
}
public void testGetNodePredicateNodeVersion() {
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
Set<DiscoveryNode.Role> roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(Settings.EMPTY);
Version version = VersionUtils.randomVersion(random());
DiscoveryNode node = new DiscoveryNode("id", address, Collections.emptyMap(), roles, version);
assertThat(nodePredicate.test(node), equalTo(Version.CURRENT.isCompatible(version)));
}
public void testGetNodePredicateNodeAttrs() {
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
Set<DiscoveryNode.Role> roles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
{
DiscoveryNode nonGatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
roles, Version.CURRENT);
assertFalse(nodePredicate.test(nonGatewayNode));
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(nonGatewayNode));
}
{
DiscoveryNode gatewayNode = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
roles, Version.CURRENT);
assertTrue(nodePredicate.test(gatewayNode));
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(gatewayNode));
}
{
DiscoveryNode noAttrNode = new DiscoveryNode("id", address, Collections.emptyMap(), roles, Version.CURRENT);
assertFalse(nodePredicate.test(noAttrNode));
assertTrue(RemoteClusterService.getNodePredicate(Settings.EMPTY).test(noAttrNode));
}
}
public void testGetNodePredicatesCombination() {
TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0);
Settings settings = Settings.builder().put("search.remote.node.attr", "gateway").build();
Predicate<DiscoveryNode> nodePredicate = RemoteClusterService.getNodePredicate(settings);
Set<DiscoveryNode.Role> allRoles = new HashSet<>(EnumSet.allOf(DiscoveryNode.Role.class));
Set<DiscoveryNode.Role> dedicatedMasterRoles = new HashSet<>(EnumSet.of(DiscoveryNode.Role.MASTER));
{
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
dedicatedMasterRoles, Version.CURRENT);
assertFalse(nodePredicate.test(node));
}
{
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
dedicatedMasterRoles, Version.CURRENT);
assertFalse(nodePredicate.test(node));
}
{
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "false"),
dedicatedMasterRoles, Version.CURRENT);
assertFalse(nodePredicate.test(node));
}
{
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
allRoles, Version.CURRENT);
assertTrue(nodePredicate.test(node));
}
{
DiscoveryNode node = new DiscoveryNode("id", address, Collections.singletonMap("gateway", "true"),
allRoles, Version.V_5_3_0);
assertFalse(nodePredicate.test(node));
}
}
}

View File

@ -155,5 +155,5 @@ GET two:logs-2017.04/_search <1>
// TEST[skip:todo]
//TBD: Is there a missing description of the <1> callout above?
:edit_url: https://github.com/elastic/kibana/edit/{branch}/x-pack/docs/en/security/cross-cluster-kibana.asciidoc
include::{xkb-repo-dir}/security/cross-cluster-kibana.asciidoc[]
:edit_url: https://github.com/elastic/kibana/edit/{branch}/docs/security/cross-cluster-kibana.asciidoc
include::{kib-repo-dir}/security/cross-cluster-kibana.asciidoc[]

View File

@ -896,6 +896,7 @@ public class SamlAuthenticatorTests extends SamlTestCase {
assertThat(attributes.attributes(), iterableWithSize(1));
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30970")
public void testIncorrectSigningKeyIsRejected() throws Exception {
final CryptoTransform signer = randomBoolean() ? this::signDoc : this::signAssertions;
Instant now = clock.instant();

View File

@ -66,11 +66,8 @@ dependencies {
compile (project(':libs:x-content')) {
transitive = false
}
compile 'joda-time:joda-time:2.9.9'
compile project(':libs:elasticsearch-core')
runtime "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
runtime "org.apache.logging.log4j:log4j-api:${versions.log4j}"
runtime "org.apache.logging.log4j:log4j-core:${versions.log4j}"
testCompile "org.elasticsearch.test:framework:${version}"
}
@ -105,109 +102,3 @@ test {
classpath += jar.outputs.files
dependsOn jar
}
thirdPartyAudit.excludes = [
'com.fasterxml.jackson.dataformat.yaml.YAMLFactory',
'com.fasterxml.jackson.dataformat.yaml.YAMLMapper',
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
'org.fusesource.jansi.Ansi',
'org.fusesource.jansi.AnsiRenderer$Code',
// from log4j
'com.conversantmedia.util.concurrent.DisruptorBlockingQueue',
'com.conversantmedia.util.concurrent.SpinPolicy',
'com.fasterxml.jackson.annotation.JsonInclude$Include',
'com.fasterxml.jackson.databind.DeserializationContext',
'com.fasterxml.jackson.databind.DeserializationFeature',
'com.fasterxml.jackson.databind.JsonMappingException',
'com.fasterxml.jackson.databind.JsonNode',
'com.fasterxml.jackson.databind.Module$SetupContext',
'com.fasterxml.jackson.databind.ObjectReader',
'com.fasterxml.jackson.databind.ObjectWriter',
'com.fasterxml.jackson.databind.SerializerProvider',
'com.fasterxml.jackson.databind.deser.std.StdDeserializer',
'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer',
'com.fasterxml.jackson.databind.module.SimpleModule',
'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter',
'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider',
'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer',
'com.fasterxml.jackson.databind.ser.std.StdSerializer',
'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule',
'com.fasterxml.jackson.dataformat.xml.XmlMapper',
'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter',
'com.fasterxml.jackson.databind.node.JsonNodeFactory',
'com.fasterxml.jackson.databind.node.ObjectNode',
'com.lmax.disruptor.BlockingWaitStrategy',
'com.lmax.disruptor.BusySpinWaitStrategy',
'com.lmax.disruptor.EventFactory',
'com.lmax.disruptor.EventTranslator',
'com.lmax.disruptor.EventTranslatorTwoArg',
'com.lmax.disruptor.EventTranslatorVararg',
'com.lmax.disruptor.ExceptionHandler',
'com.lmax.disruptor.LifecycleAware',
'com.lmax.disruptor.RingBuffer',
'com.lmax.disruptor.Sequence',
'com.lmax.disruptor.SequenceReportingEventHandler',
'com.lmax.disruptor.SleepingWaitStrategy',
'com.lmax.disruptor.TimeoutBlockingWaitStrategy',
'com.lmax.disruptor.WaitStrategy',
'com.lmax.disruptor.YieldingWaitStrategy',
'com.lmax.disruptor.dsl.Disruptor',
'com.lmax.disruptor.dsl.ProducerType',
'javax.jms.Connection',
'javax.jms.ConnectionFactory',
'javax.jms.Destination',
'javax.jms.JMSException',
'javax.jms.MapMessage',
'javax.jms.Message',
'javax.jms.MessageConsumer',
'javax.jms.MessageProducer',
'javax.jms.Session',
'javax.mail.Authenticator',
'javax.mail.Message$RecipientType',
'javax.mail.PasswordAuthentication',
'javax.mail.Session',
'javax.mail.Transport',
'javax.mail.internet.InternetAddress',
'javax.mail.internet.InternetHeaders',
'javax.mail.internet.MimeBodyPart',
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
'javax.mail.util.ByteArrayDataSource',
'javax.persistence.AttributeConverter',
'javax.persistence.EntityManager',
'javax.persistence.EntityManagerFactory',
'javax.persistence.EntityTransaction',
'javax.persistence.Persistence',
'javax.persistence.PersistenceException',
'org.apache.commons.compress.compressors.CompressorStreamFactory',
'org.apache.commons.compress.utils.IOUtils',
'org.apache.commons.csv.CSVFormat',
'org.apache.commons.csv.QuoteMode',
'org.apache.kafka.clients.producer.Callback',
'org.apache.kafka.clients.producer.KafkaProducer',
'org.apache.kafka.clients.producer.Producer',
'org.apache.kafka.clients.producer.ProducerRecord',
'org.apache.kafka.clients.producer.RecordMetadata',
'org.codehaus.stax2.XMLStreamWriter2',
'org.jctools.queues.MessagePassingQueue$Consumer',
'org.jctools.queues.MpscArrayQueue',
'org.osgi.framework.AdaptPermission',
'org.osgi.framework.AdminPermission',
'org.osgi.framework.Bundle',
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
'org.osgi.framework.BundleEvent',
'org.osgi.framework.BundleReference',
'org.osgi.framework.FrameworkUtil',
'org.osgi.framework.ServiceRegistration',
'org.osgi.framework.SynchronousBundleListener',
'org.osgi.framework.wiring.BundleWire',
'org.osgi.framework.wiring.BundleWiring',
'org.zeromq.ZMQ$Context',
'org.zeromq.ZMQ$Socket',
'org.zeromq.ZMQ'
]

View File

@ -1 +0,0 @@
f7b520c458572890807d143670c9b24f4de90897

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
=============================================================================
= NOTICE file corresponding to section 4d of the Apache License Version 2.0 =
=============================================================================
This product includes software developed by
Joda.org (http://www.joda.org/).

View File

@ -1 +0,0 @@
7a2999229464e7a324aa503c0a52ec0f05efe7bd

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 1999-2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
Apache log4j
Copyright 2007 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -1 +0,0 @@
c041978c686866ee8534f538c6220238db3bb6be

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 1999-2005 The Apache Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,5 +0,0 @@
Apache log4j
Copyright 2007 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -5,7 +5,7 @@
wait_for_status: green
- do:
catch: request
catch: bad_request
xpack.watcher.put_watch:
id: "my_exe_watch"
body: >
@ -33,7 +33,7 @@
}
- is_true: error.script_stack
- match: { status: 500 }
- match: { status: 400 }
---
"Test painless exceptions are returned when logging a broken response":