mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
ec9f1a9f3b
@ -24,6 +24,9 @@
|
||||
|
||||
=== Bug Fixes
|
||||
|
||||
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
|
||||
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
|
||||
|
||||
=== Regressions
|
||||
|
||||
=== Known Issues
|
||||
|
@ -284,9 +284,12 @@ executed again in order to conform to `requests_per_second`.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures.
|
||||
Delete-by-query is implemented using batches and any failure causes the entire
|
||||
process to abort but all failures in the current batch are collected into the
|
||||
array. You can use the `conflicts` option to prevent reindex from aborting on
|
||||
version conflicts.
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -161,12 +161,12 @@ POST _reindex
|
||||
|
||||
`index` and `type` in `source` can both be lists, allowing you to copy from
|
||||
lots of sources in one request. This will copy documents from the `_doc` and
|
||||
`post` types in the `twitter` and `blog` index. The copied documents would include the
|
||||
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
|
||||
`post` types in the `twitter` and `blog` index. The copied documents would include the
|
||||
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
|
||||
specific parameters, you can use `query`.
|
||||
|
||||
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
|
||||
will remain valid, but it's not easy to predict which document will survive because
|
||||
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
|
||||
will remain valid, but it's not easy to predict which document will survive because
|
||||
the iteration order isn't well defined.
|
||||
|
||||
[source,js]
|
||||
@ -666,9 +666,11 @@ executed again in order to conform to `requests_per_second`.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures. Reindex
|
||||
is implemented using batches and any failure causes the entire process to abort
|
||||
but all failures in the current batch are collected into the array. You can use
|
||||
the `conflicts` option to prevent reindex from aborting on version conflicts.
|
||||
|
||||
[float]
|
||||
[[docs-reindex-task-api]]
|
||||
@ -1004,7 +1006,7 @@ number for most indices. If slicing manually or otherwise tuning
|
||||
automatic slicing, use these guidelines.
|
||||
|
||||
Query performance is most efficient when the number of `slices` is equal to the
|
||||
number of shards in the index. If that number is large (e.g. 500),
|
||||
number of shards in the index. If that number is large (e.g. 500),
|
||||
choose a lower number as too many `slices` will hurt performance. Setting
|
||||
`slices` higher than the number of shards generally does not improve efficiency
|
||||
and adds overhead.
|
||||
@ -1018,7 +1020,7 @@ documents being reindexed and cluster resources.
|
||||
[float]
|
||||
=== Reindex daily indices
|
||||
|
||||
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
|
||||
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
|
||||
to reindex daily indices to apply a new template to the existing documents.
|
||||
|
||||
Assuming you have indices consisting of documents as follows:
|
||||
|
@ -338,9 +338,13 @@ executed again in order to conform to `requests_per_second`.
|
||||
|
||||
`failures`::
|
||||
|
||||
Array of all indexing failures. If this is non-empty then the request aborted
|
||||
because of those failures. See `conflicts` for how to prevent version conflicts
|
||||
from aborting the operation.
|
||||
Array of failures if there were any unrecoverable errors during the process. If
|
||||
this is non-empty then the request aborted because of those failures.
|
||||
Update-by-query is implemented using batches and any failure causes the entire
|
||||
process to abort but all failures in the current batch are collected into the
|
||||
array. You can use the `conflicts` option to prevent reindex from aborting on
|
||||
version conflicts.
|
||||
|
||||
|
||||
|
||||
[float]
|
||||
|
@ -44,12 +44,12 @@ If you register same snapshot repository with multiple clusters, only
|
||||
one cluster should have write access to the repository. All other clusters
|
||||
connected to that repository should set the repository to `readonly` mode.
|
||||
|
||||
NOTE: The snapshot format can change across major versions, so if you have
|
||||
clusters on different major versions trying to write the same repository,
|
||||
new snapshots written by one version will not be visible to the other. While
|
||||
setting the repository to `readonly` on all but one of the clusters should work
|
||||
with multiple clusters differing by one major version, it is not a supported
|
||||
configuration.
|
||||
IMPORTANT: The snapshot format can change across major versions, so if you have
|
||||
clusters on different versions trying to write the same repository, snapshots
|
||||
written by one version may not be visible to the other and the repository could
|
||||
be corrupted. While setting the repository to `readonly` on all but one of the
|
||||
clusters should work with multiple clusters differing by one major version, it
|
||||
is not a supported configuration.
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
|
@ -8,4 +8,7 @@ The changes listed below have been released for the first time in Elasticsearch
|
||||
=== Breaking changes
|
||||
|
||||
Core::
|
||||
* Tribe node has been removed in favor of Cross-Cluster-Search
|
||||
* Tribe node has been removed in favor of Cross-Cluster-Search
|
||||
|
||||
Rest API::
|
||||
* The Clear Cache API only supports `POST` as HTTP method
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"indices.clear_cache": {
|
||||
"documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-clearcache.html",
|
||||
"methods": ["POST", "GET"],
|
||||
"methods": ["POST"],
|
||||
"url": {
|
||||
"path": "/_cache/clear",
|
||||
"paths": ["/_cache/clear", "/{index}/_cache/clear"],
|
||||
|
@ -230,9 +230,9 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
||||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
if (snapshotInfo.state().completed()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatues =
|
||||
snapshotsService.snapshotShards(request.repository(), snapshotInfo);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatues.entrySet()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shardStatuses =
|
||||
snapshotsService.snapshotShards(repositoryName, repositoryData, snapshotInfo);
|
||||
for (Map.Entry<ShardId, IndexShardSnapshotStatus> shardStatus : shardStatuses.entrySet()) {
|
||||
IndexShardSnapshotStatus.Copy lastSnapshotStatus = shardStatus.getValue().asCopy();
|
||||
shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), lastSnapshotStatus));
|
||||
}
|
||||
|
@ -162,17 +162,17 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldCapabilitiesRequest that = (FieldCapabilitiesRequest) o;
|
||||
|
||||
if (!Arrays.equals(indices, that.indices)) return false;
|
||||
if (!indicesOptions.equals(that.indicesOptions)) return false;
|
||||
return Arrays.equals(fields, that.fields);
|
||||
return Arrays.equals(indices, that.indices) &&
|
||||
Objects.equals(indicesOptions, that.indicesOptions) &&
|
||||
Arrays.equals(fields, that.fields) &&
|
||||
Objects.equals(mergeResults, that.mergeResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Arrays.hashCode(indices);
|
||||
result = 31 * result + indicesOptions.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(fields);
|
||||
return result;
|
||||
return Objects.hash(Arrays.hashCode(indices),
|
||||
indicesOptions,
|
||||
Arrays.hashCode(fields),
|
||||
mergeResults);
|
||||
}
|
||||
}
|
||||
|
@ -230,13 +230,6 @@ public final class RepositoryData {
|
||||
return snapshotIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes the indices in the repository metadata; returns a new instance.
|
||||
*/
|
||||
public RepositoryData initIndices(final Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
@ -352,9 +345,10 @@ public final class RepositoryData {
|
||||
* Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata.
|
||||
*/
|
||||
public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException {
|
||||
Map<String, SnapshotId> snapshots = new HashMap<>();
|
||||
Map<String, SnapshotState> snapshotStates = new HashMap<>();
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
final Map<String, SnapshotId> snapshots = new HashMap<>();
|
||||
final Map<String, SnapshotState> snapshotStates = new HashMap<>();
|
||||
final Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||
String field = parser.currentName();
|
||||
@ -397,17 +391,18 @@ public final class RepositoryData {
|
||||
throw new ElasticsearchParseException("start object expected [indices]");
|
||||
}
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
String indexName = parser.currentName();
|
||||
String indexId = null;
|
||||
Set<SnapshotId> snapshotIds = new LinkedHashSet<>();
|
||||
final String indexName = parser.currentName();
|
||||
final Set<SnapshotId> snapshotIds = new LinkedHashSet<>();
|
||||
|
||||
IndexId indexId = null;
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("start object expected index[" + indexName + "]");
|
||||
}
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
String indexMetaFieldName = parser.currentName();
|
||||
final String indexMetaFieldName = parser.currentName();
|
||||
parser.nextToken();
|
||||
if (INDEX_ID.equals(indexMetaFieldName)) {
|
||||
indexId = parser.text();
|
||||
indexId = new IndexId(indexName, parser.text());
|
||||
} else if (SNAPSHOTS.equals(indexMetaFieldName)) {
|
||||
if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
|
||||
throw new ElasticsearchParseException("start array expected [snapshots]");
|
||||
@ -428,12 +423,22 @@ public final class RepositoryData {
|
||||
// since we already have the name/uuid combo in the snapshots array
|
||||
uuid = parser.text();
|
||||
}
|
||||
snapshotIds.add(snapshots.get(uuid));
|
||||
|
||||
SnapshotId snapshotId = snapshots.get(uuid);
|
||||
if (snapshotId != null) {
|
||||
snapshotIds.add(snapshotId);
|
||||
} else {
|
||||
// A snapshotted index references a snapshot which does not exist in
|
||||
// the list of snapshots. This can happen when multiple clusters in
|
||||
// different versions create or delete snapshot in the same repository.
|
||||
throw new ElasticsearchParseException("Detected a corrupted repository, index " + indexId
|
||||
+ " references an unknown snapshot uuid [" + uuid + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
assert indexId != null;
|
||||
indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds);
|
||||
indexSnapshots.put(indexId, snapshotIds);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown field name [" + field + "]");
|
||||
|
@ -31,7 +31,6 @@ import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
@ -40,9 +39,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
super(settings);
|
||||
controller.registerHandler(POST, "/_cache/clear", this);
|
||||
controller.registerHandler(POST, "/{index}/_cache/clear", this);
|
||||
|
||||
controller.registerHandler(GET, "/_cache/clear", this);
|
||||
controller.registerHandler(GET, "/{index}/_cache/clear", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -20,10 +20,8 @@ package org.elasticsearch.search.aggregations.bucket.significant;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
@ -103,11 +101,22 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
||||
|
||||
BucketSignificancePriorityQueue<SignificantStringTerms.Bucket> ordered = new BucketSignificancePriorityQueue<>(size);
|
||||
SignificantStringTerms.Bucket spare = null;
|
||||
for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) {
|
||||
if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) {
|
||||
final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0;
|
||||
final long maxId = needsFullScan ? valueCount : bucketOrds.size();
|
||||
for (long ord = 0; ord < maxId; ord++) {
|
||||
final long globalOrd;
|
||||
final long bucketOrd;
|
||||
if (needsFullScan) {
|
||||
bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord);
|
||||
globalOrd = ord;
|
||||
} else {
|
||||
assert bucketOrds != null;
|
||||
bucketOrd = ord;
|
||||
globalOrd = bucketOrds.get(ord);
|
||||
}
|
||||
if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) {
|
||||
continue;
|
||||
}
|
||||
final long bucketOrd = getBucketOrd(globalTermOrd);
|
||||
final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd);
|
||||
if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) {
|
||||
continue;
|
||||
@ -120,7 +129,7 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri
|
||||
spare = new SignificantStringTerms.Bucket(new BytesRef(), 0, 0, 0, 0, null, format);
|
||||
}
|
||||
spare.bucketOrd = bucketOrd;
|
||||
copy(lookupGlobalOrd.apply(globalTermOrd), spare.termBytes);
|
||||
copy(lookupGlobalOrd.apply(globalOrd), spare.termBytes);
|
||||
spare.subsetDf = bucketDocCount;
|
||||
spare.subsetSize = subsetSize;
|
||||
spare.supersetDf = termsAggFactory.getBackgroundFrequency(spare.termBytes);
|
||||
|
@ -71,7 +71,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||
protected final long valueCount;
|
||||
protected final GlobalOrdLookupFunction lookupGlobalOrd;
|
||||
|
||||
private final LongHash bucketOrds;
|
||||
protected final LongHash bucketOrds;
|
||||
|
||||
public interface GlobalOrdLookupFunction {
|
||||
BytesRef apply(long ord) throws IOException;
|
||||
@ -107,10 +107,6 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||
return bucketOrds != null;
|
||||
}
|
||||
|
||||
protected final long getBucketOrd(long globalOrd) {
|
||||
return bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd);
|
||||
}
|
||||
|
||||
private void collectGlobalOrd(int doc, long globalOrd, LeafBucketCollector sub) throws IOException {
|
||||
if (bucketOrds == null) {
|
||||
collectExistingBucket(sub, doc, globalOrd);
|
||||
@ -188,17 +184,28 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||
long otherDocCount = 0;
|
||||
BucketPriorityQueue<OrdBucket> ordered = new BucketPriorityQueue<>(size, order.comparator(this));
|
||||
OrdBucket spare = new OrdBucket(-1, 0, null, showTermDocCountError, 0);
|
||||
for (long globalTermOrd = 0; globalTermOrd < valueCount; ++globalTermOrd) {
|
||||
if (includeExclude != null && !acceptedGlobalOrdinals.get(globalTermOrd)) {
|
||||
final boolean needsFullScan = bucketOrds == null || bucketCountThresholds.getMinDocCount() == 0;
|
||||
final long maxId = needsFullScan ? valueCount : bucketOrds.size();
|
||||
for (long ord = 0; ord < maxId; ord++) {
|
||||
final long globalOrd;
|
||||
final long bucketOrd;
|
||||
if (needsFullScan) {
|
||||
bucketOrd = bucketOrds == null ? ord : bucketOrds.find(ord);
|
||||
globalOrd = ord;
|
||||
} else {
|
||||
assert bucketOrds != null;
|
||||
bucketOrd = ord;
|
||||
globalOrd = bucketOrds.get(ord);
|
||||
}
|
||||
if (includeExclude != null && !acceptedGlobalOrdinals.get(globalOrd)) {
|
||||
continue;
|
||||
}
|
||||
final long bucketOrd = getBucketOrd(globalTermOrd);
|
||||
final int bucketDocCount = bucketOrd < 0 ? 0 : bucketDocCount(bucketOrd);
|
||||
if (bucketCountThresholds.getMinDocCount() > 0 && bucketDocCount == 0) {
|
||||
continue;
|
||||
}
|
||||
otherDocCount += bucketDocCount;
|
||||
spare.globalOrd = globalTermOrd;
|
||||
spare.globalOrd = globalOrd;
|
||||
spare.bucketOrd = bucketOrd;
|
||||
spare.docCount = bucketDocCount;
|
||||
if (bucketCountThresholds.getShardMinDocCount() <= spare.docCount) {
|
||||
@ -378,7 +385,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||
}
|
||||
final long ord = i - 1; // remember we do +1 when counting
|
||||
final long globalOrd = mapping.applyAsLong(ord);
|
||||
long bucketOrd = getBucketOrd(globalOrd);
|
||||
long bucketOrd = bucketOrds == null ? globalOrd : bucketOrds.find(globalOrd);
|
||||
incrementBucketDocCount(bucketOrd, inc);
|
||||
}
|
||||
}
|
||||
|
@ -592,10 +592,9 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
* @return map of shard id to snapshot status
|
||||
*/
|
||||
public Map<ShardId, IndexShardSnapshotStatus> snapshotShards(final String repositoryName,
|
||||
final RepositoryData repositoryData,
|
||||
final SnapshotInfo snapshotInfo) throws IOException {
|
||||
final Repository repository = repositoriesService.repository(repositoryName);
|
||||
final RepositoryData repositoryData = repository.getRepositoryData();
|
||||
|
||||
final Map<ShardId, IndexShardSnapshotStatus> shardStatus = new HashMap<>();
|
||||
for (String index : snapshotInfo.indices()) {
|
||||
IndexId indexId = repositoryData.resolveIndexId(index);
|
||||
|
@ -21,15 +21,18 @@ package org.elasticsearch.action.fieldcaps;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.common.util.ArrayUtils;
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class FieldCapabilitiesRequestTests extends ESTestCase {
|
||||
private FieldCapabilitiesRequest randomRequest() {
|
||||
public class FieldCapabilitiesRequestTests extends AbstractStreamableTestCase<FieldCapabilitiesRequest> {
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesRequest createTestInstance() {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
|
||||
int size = randomIntBetween(1, 20);
|
||||
String[] randomFields = new String[size];
|
||||
@ -50,50 +53,33 @@ public class FieldCapabilitiesRequestTests extends ESTestCase {
|
||||
return request;
|
||||
}
|
||||
|
||||
public void testEqualsAndHashcode() {
|
||||
FieldCapabilitiesRequest request = new FieldCapabilitiesRequest();
|
||||
request.indices("foo");
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
request.fields("bar");
|
||||
|
||||
FieldCapabilitiesRequest other = new FieldCapabilitiesRequest();
|
||||
other.indices("foo");
|
||||
other.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
other.fields("bar");
|
||||
assertEquals(request, request);
|
||||
assertEquals(request, other);
|
||||
assertEquals(request.hashCode(), other.hashCode());
|
||||
|
||||
// change indices
|
||||
other.indices("foo", "bar");
|
||||
assertNotEquals(request, other);
|
||||
other.indices("foo");
|
||||
assertEquals(request, other);
|
||||
|
||||
// change fields
|
||||
other.fields("foo", "bar");
|
||||
assertNotEquals(request, other);
|
||||
other.fields("bar");
|
||||
assertEquals(request, request);
|
||||
|
||||
// change indices options
|
||||
other.indicesOptions(IndicesOptions.strictExpand());
|
||||
assertNotEquals(request, other);
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesRequest createBlankInstance() {
|
||||
return new FieldCapabilitiesRequest();
|
||||
}
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
for (int i = 0; i < 20; i++) {
|
||||
FieldCapabilitiesRequest request = randomRequest();
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
request.writeTo(output);
|
||||
output.flush();
|
||||
StreamInput input = output.bytes().streamInput();
|
||||
FieldCapabilitiesRequest deserialized = new FieldCapabilitiesRequest();
|
||||
deserialized.readFrom(input);
|
||||
assertEquals(deserialized, request);
|
||||
assertEquals(deserialized.hashCode(), request.hashCode());
|
||||
}
|
||||
@Override
|
||||
protected FieldCapabilitiesRequest mutateInstance(FieldCapabilitiesRequest instance) throws IOException {
|
||||
List<Consumer<FieldCapabilitiesRequest>> mutators = new ArrayList<>();
|
||||
mutators.add(request -> {
|
||||
String[] fields = ArrayUtils.concat(request.fields(), new String[] {randomAlphaOfLength(10)});
|
||||
request.fields(fields);
|
||||
});
|
||||
mutators.add(request -> {
|
||||
String[] indices = ArrayUtils.concat(instance.indices(), generateRandomStringArray(5, 10, false, false));
|
||||
request.indices(indices);
|
||||
});
|
||||
mutators.add(request -> {
|
||||
IndicesOptions indicesOptions = randomValueOtherThan(request.indicesOptions(),
|
||||
() -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
request.indicesOptions(indicesOptions);
|
||||
});
|
||||
mutators.add(request -> request.setMergeResults(!request.isMergeResults()));
|
||||
|
||||
FieldCapabilitiesRequest mutatedInstance = copyInstance(instance);
|
||||
Consumer<FieldCapabilitiesRequest> mutator = randomFrom(mutators);
|
||||
mutator.accept(mutatedInstance);
|
||||
return mutatedInstance;
|
||||
}
|
||||
|
||||
public void testValidation() {
|
||||
|
@ -19,11 +19,14 @@
|
||||
|
||||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
@ -39,7 +42,11 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
/**
|
||||
@ -101,15 +108,18 @@ public class RepositoryDataTests extends ESTestCase {
|
||||
public void testInitIndices() {
|
||||
final int numSnapshots = randomIntBetween(1, 30);
|
||||
final Map<String, SnapshotId> snapshotIds = new HashMap<>(numSnapshots);
|
||||
final Map<String, SnapshotState> snapshotStates = new HashMap<>(numSnapshots);
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
|
||||
snapshotIds.put(snapshotId.getUUID(), snapshotId);
|
||||
snapshotStates.put(snapshotId.getUUID(), randomFrom(SnapshotState.values()));
|
||||
}
|
||||
RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds,
|
||||
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList());
|
||||
// test that initializing indices works
|
||||
Map<IndexId, Set<SnapshotId>> indices = randomIndices(snapshotIds);
|
||||
RepositoryData newRepoData = repositoryData.initIndices(indices);
|
||||
RepositoryData newRepoData = new RepositoryData(repositoryData.getGenId(), snapshotIds, snapshotStates, indices,
|
||||
new ArrayList<>(repositoryData.getIncompatibleSnapshotIds()));
|
||||
List<SnapshotId> expected = new ArrayList<>(repositoryData.getSnapshotIds());
|
||||
Collections.sort(expected);
|
||||
List<SnapshotId> actual = new ArrayList<>(newRepoData.getSnapshotIds());
|
||||
@ -153,6 +163,81 @@ public class RepositoryDataTests extends ESTestCase {
|
||||
assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())));
|
||||
}
|
||||
|
||||
public void testIndexThatReferencesAnUnknownSnapshot() throws IOException {
|
||||
final XContent xContent = randomFrom(XContentType.values()).xContent();
|
||||
final RepositoryData repositoryData = generateRandomRepoData();
|
||||
|
||||
XContentBuilder builder = XContentBuilder.builder(xContent);
|
||||
repositoryData.snapshotsToXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
RepositoryData parsedRepositoryData = RepositoryData.snapshotsFromXContent(createParser(builder), repositoryData.getGenId());
|
||||
assertEquals(repositoryData, parsedRepositoryData);
|
||||
|
||||
Map<String, SnapshotId> snapshotIds = new HashMap<>();
|
||||
Map<String, SnapshotState> snapshotStates = new HashMap<>();
|
||||
for (SnapshotId snapshotId : parsedRepositoryData.getSnapshotIds()) {
|
||||
snapshotIds.put(snapshotId.getUUID(), snapshotId);
|
||||
snapshotStates.put(snapshotId.getUUID(), parsedRepositoryData.getSnapshotState(snapshotId));
|
||||
}
|
||||
|
||||
final IndexId corruptedIndexId = randomFrom(parsedRepositoryData.getIndices().values());
|
||||
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
for (Map.Entry<String, IndexId> snapshottedIndex : parsedRepositoryData.getIndices().entrySet()) {
|
||||
IndexId indexId = snapshottedIndex.getValue();
|
||||
Set<SnapshotId> snapshotsIds = new LinkedHashSet<>(parsedRepositoryData.getSnapshots(indexId));
|
||||
if (corruptedIndexId.equals(indexId)) {
|
||||
snapshotsIds.add(new SnapshotId("_uuid", "_does_not_exist"));
|
||||
}
|
||||
indexSnapshots.put(indexId, snapshotsIds);
|
||||
}
|
||||
assertNotNull(corruptedIndexId);
|
||||
|
||||
RepositoryData corruptedRepositoryData = new RepositoryData(parsedRepositoryData.getGenId(), snapshotIds, snapshotStates,
|
||||
indexSnapshots, new ArrayList<>(parsedRepositoryData.getIncompatibleSnapshotIds()));
|
||||
|
||||
final XContentBuilder corruptedBuilder = XContentBuilder.builder(xContent);
|
||||
corruptedRepositoryData.snapshotsToXContent(corruptedBuilder, ToXContent.EMPTY_PARAMS);
|
||||
|
||||
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
RepositoryData.snapshotsFromXContent(createParser(corruptedBuilder), corruptedRepositoryData.getGenId()));
|
||||
assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index " + corruptedIndexId + " references an unknown " +
|
||||
"snapshot uuid [_does_not_exist]"));
|
||||
}
|
||||
|
||||
public void testIndexThatReferenceANullSnapshot() throws IOException {
|
||||
final XContentBuilder builder = XContentBuilder.builder(randomFrom(XContentType.JSON).xContent());
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startArray("snapshots");
|
||||
builder.value(new SnapshotId("_name", "_uuid"));
|
||||
builder.endArray();
|
||||
|
||||
builder.startObject("indices");
|
||||
{
|
||||
builder.startObject("docs");
|
||||
{
|
||||
builder.field("id", "_id");
|
||||
builder.startArray("snapshots");
|
||||
{
|
||||
builder.startObject();
|
||||
if (randomBoolean()) {
|
||||
builder.field("name", "_name");
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () ->
|
||||
RepositoryData.snapshotsFromXContent(createParser(builder), randomNonNegativeLong()));
|
||||
assertThat(e.getMessage(), equalTo("Detected a corrupted repository, index [docs/_id] references an unknown snapshot uuid [null]"));
|
||||
}
|
||||
|
||||
public static RepositoryData generateRandomRepoData() {
|
||||
final int numIndices = randomIntBetween(1, 30);
|
||||
final List<IndexId> indices = new ArrayList<>(numIndices);
|
||||
|
@ -61,7 +61,6 @@ buildRestTests.expectedUnconvertedCandidates = [
|
||||
'en/watcher/trigger/schedule/yearly.asciidoc',
|
||||
'en/watcher/troubleshooting.asciidoc',
|
||||
'en/rest-api/license/delete-license.asciidoc',
|
||||
'en/rest-api/license/start-trial.asciidoc',
|
||||
'en/rest-api/license/update-license.asciidoc',
|
||||
'en/ml/api-quickref.asciidoc',
|
||||
'en/rest-api/ml/delete-calendar-event.asciidoc',
|
||||
|
@ -40,7 +40,7 @@ The following example checks whether you are eligible to start a trial:
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
POST _xpack/license/start_trial
|
||||
GET _xpack/license/start_trial
|
||||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:license testing issues]
|
||||
@ -49,6 +49,27 @@ Example response:
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"trial_was_started": true
|
||||
"eligible_to_start_trial": true
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
The following example starts a 30-day trial license. The acknowledge
|
||||
parameter is required as you are initiating a license that will expire.
|
||||
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
POST _xpack/license/start_trial?acknowledge=true
|
||||
------------------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[skip:license testing issues]
|
||||
|
||||
Example response:
|
||||
[source,js]
|
||||
------------------------------------------------------------
|
||||
{
|
||||
"trial_was_started": true,
|
||||
"acknowledged": true
|
||||
}
|
||||
------------------------------------------------------------
|
||||
// NOTCONSOLE
|
@ -41,11 +41,11 @@ public class LicensingClient {
|
||||
client.execute(DeleteLicenseAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
public PostStartTrialRequestBuilder preparePostUpgradeToTrial() {
|
||||
public PostStartTrialRequestBuilder preparePostStartTrial() {
|
||||
return new PostStartTrialRequestBuilder(client, PostStartTrialAction.INSTANCE);
|
||||
}
|
||||
|
||||
public GetTrialStatusRequestBuilder prepareGetUpgradeToTrial() {
|
||||
public GetTrialStatusRequestBuilder prepareGetStartTrial() {
|
||||
return new GetTrialStatusRequestBuilder(client, GetTrialStatusAction.INSTANCE);
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@ import java.io.IOException;
|
||||
|
||||
public class PostStartTrialRequest extends MasterNodeRequest<PostStartTrialRequest> {
|
||||
|
||||
private boolean acknowledge = false;
|
||||
private String type;
|
||||
|
||||
@Override
|
||||
@ -31,25 +32,47 @@ public class PostStartTrialRequest extends MasterNodeRequest<PostStartTrialReque
|
||||
return type;
|
||||
}
|
||||
|
||||
public PostStartTrialRequest acknowledge(boolean acknowledge) {
|
||||
this.acknowledge = acknowledge;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean isAcknowledged() {
|
||||
return acknowledge;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_3_0)) {
|
||||
type = in.readString();
|
||||
acknowledge = in.readBoolean();
|
||||
} else {
|
||||
type = "trial";
|
||||
acknowledge = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
Version version = Version.V_6_3_0;
|
||||
// TODO: Change to 6.3 after backport
|
||||
Version version = Version.V_7_0_0_alpha1;
|
||||
if (out.getVersion().onOrAfter(version)) {
|
||||
super.writeTo(out);
|
||||
out.writeString(type);
|
||||
out.writeBoolean(acknowledge);
|
||||
} else {
|
||||
throw new IllegalArgumentException("All nodes in cluster must be version [" + version
|
||||
+ "] or newer to use `type` parameter. Attempting to write to node with version [" + out.getVersion() + "].");
|
||||
if ("trial".equals(type) == false) {
|
||||
throw new IllegalArgumentException("All nodes in cluster must be version [" + version
|
||||
+ "] or newer to start trial with a different type than 'trial'. Attempting to write to " +
|
||||
"a node with version [" + out.getVersion() + "] with trial type [" + type + "].");
|
||||
} else if (acknowledge == false) {
|
||||
throw new IllegalArgumentException("Request must be acknowledged to send to a node with a version " +
|
||||
"prior to [" + version + "]. Attempting to send request to node with version [" + out.getVersion() + "] " +
|
||||
"without acknowledgement.");
|
||||
} else {
|
||||
super.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,4 +14,9 @@ class PostStartTrialRequestBuilder extends ActionRequestBuilder<PostStartTrialRe
|
||||
PostStartTrialRequestBuilder(ElasticsearchClient client, PostStartTrialAction action) {
|
||||
super(client, action, new PostStartTrialRequest());
|
||||
}
|
||||
|
||||
public PostStartTrialRequestBuilder setAcknowledge(boolean acknowledge) {
|
||||
request.acknowledge(acknowledge);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -5,23 +5,33 @@
|
||||
*/
|
||||
package org.elasticsearch.license;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
class PostStartTrialResponse extends ActionResponse {
|
||||
|
||||
// Nodes Prior to 6.3 did not have NEED_ACKNOWLEDGEMENT as part of status
|
||||
enum Pre63Status {
|
||||
UPGRADED_TO_TRIAL,
|
||||
TRIAL_ALREADY_ACTIVATED;
|
||||
}
|
||||
enum Status {
|
||||
UPGRADED_TO_TRIAL(true, null, RestStatus.OK),
|
||||
TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN);
|
||||
TRIAL_ALREADY_ACTIVATED(false, "Operation failed: Trial was already activated.", RestStatus.FORBIDDEN),
|
||||
NEED_ACKNOWLEDGEMENT(false,"Operation failed: Needs acknowledgement.", RestStatus.OK);
|
||||
|
||||
private final boolean isTrialStarted;
|
||||
|
||||
private final String errorMessage;
|
||||
private final RestStatus restStatus;
|
||||
|
||||
Status(boolean isTrialStarted, String errorMessage, RestStatus restStatus) {
|
||||
this.isTrialStarted = isTrialStarted;
|
||||
this.errorMessage = errorMessage;
|
||||
@ -39,15 +49,24 @@ class PostStartTrialResponse extends ActionResponse {
|
||||
RestStatus getRestStatus() {
|
||||
return restStatus;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private Status status;
|
||||
private Map<String, String[]> acknowledgeMessages;
|
||||
private String acknowledgeMessage;
|
||||
|
||||
PostStartTrialResponse() {
|
||||
}
|
||||
|
||||
PostStartTrialResponse(Status status) {
|
||||
this(status, Collections.emptyMap(), null);
|
||||
}
|
||||
|
||||
PostStartTrialResponse(Status status, Map<String, String[]> acknowledgeMessages, String acknowledgeMessage) {
|
||||
this.status = status;
|
||||
this.acknowledgeMessages = acknowledgeMessages;
|
||||
this.acknowledgeMessage = acknowledgeMessage;
|
||||
}
|
||||
|
||||
public Status getStatus() {
|
||||
@ -57,10 +76,58 @@ class PostStartTrialResponse extends ActionResponse {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
status = in.readEnum(Status.class);
|
||||
// TODO: Change to 6.3 after backport
|
||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
||||
acknowledgeMessage = in.readOptionalString();
|
||||
int size = in.readVInt();
|
||||
Map<String, String[]> acknowledgeMessages = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
String feature = in.readString();
|
||||
int nMessages = in.readVInt();
|
||||
String[] messages = new String[nMessages];
|
||||
for (int j = 0; j < nMessages; j++) {
|
||||
messages[j] = in.readString();
|
||||
}
|
||||
acknowledgeMessages.put(feature, messages);
|
||||
}
|
||||
this.acknowledgeMessages = acknowledgeMessages;
|
||||
} else {
|
||||
this.acknowledgeMessages = Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeEnum(status);
|
||||
// TODO: Change to 6.3 after backport
|
||||
Version version = Version.V_7_0_0_alpha1;
|
||||
if (out.getVersion().onOrAfter(version)) {
|
||||
out.writeEnum(status);
|
||||
out.writeOptionalString(acknowledgeMessage);
|
||||
out.writeVInt(acknowledgeMessages.size());
|
||||
for (Map.Entry<String, String[]> entry : acknowledgeMessages.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().length);
|
||||
for (String message : entry.getValue()) {
|
||||
out.writeString(message);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (status == Status.UPGRADED_TO_TRIAL) {
|
||||
out.writeEnum(Pre63Status.UPGRADED_TO_TRIAL);
|
||||
} else if (status == Status.TRIAL_ALREADY_ACTIVATED) {
|
||||
out.writeEnum(Pre63Status.TRIAL_ALREADY_ACTIVATED);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Starting trial on node with version [" + Version.CURRENT + "] requires " +
|
||||
"acknowledgement parameter.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Map<String, String[]> getAcknowledgementMessages() {
|
||||
return acknowledgeMessages;
|
||||
}
|
||||
|
||||
String getAcknowledgementMessage() {
|
||||
return acknowledgeMessage;
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ public class RestGetTrialStatus extends XPackRestHandler {
|
||||
|
||||
@Override
|
||||
protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
|
||||
return channel -> client.licensing().prepareGetUpgradeToTrial().execute(
|
||||
return channel -> client.licensing().prepareGetStartTrial().execute(
|
||||
new RestBuilderListener<GetTrialStatusResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(GetTrialStatusResponse response, XContentBuilder builder) throws Exception {
|
||||
|
@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.XPackClient;
|
||||
import org.elasticsearch.xpack.core.rest.XPackRestHandler;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
@ -30,23 +31,36 @@ public class RestPostStartTrialLicense extends XPackRestHandler {
|
||||
protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) throws IOException {
|
||||
PostStartTrialRequest startTrialRequest = new PostStartTrialRequest();
|
||||
startTrialRequest.setType(request.param("type", "trial"));
|
||||
startTrialRequest.acknowledge(request.paramAsBoolean("acknowledge", false));
|
||||
return channel -> client.licensing().postStartTrial(startTrialRequest,
|
||||
new RestBuilderListener<PostStartTrialResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(PostStartTrialResponse response, XContentBuilder builder) throws Exception {
|
||||
PostStartTrialResponse.Status status = response.getStatus();
|
||||
builder.startObject();
|
||||
builder.field("acknowledged", startTrialRequest.isAcknowledged());
|
||||
if (status.isTrialStarted()) {
|
||||
builder.startObject()
|
||||
.field("trial_was_started", true)
|
||||
.field("type", startTrialRequest.getType())
|
||||
.endObject();
|
||||
builder.field("trial_was_started", true);
|
||||
builder.field("type", startTrialRequest.getType());
|
||||
} else {
|
||||
builder.startObject()
|
||||
.field("trial_was_started", false)
|
||||
.field("error_message", status.getErrorMessage())
|
||||
.endObject();
|
||||
|
||||
builder.field("trial_was_started", false);
|
||||
builder.field("error_message", status.getErrorMessage());
|
||||
}
|
||||
|
||||
Map<String, String[]> acknowledgementMessages = response.getAcknowledgementMessages();
|
||||
if (acknowledgementMessages.isEmpty() == false) {
|
||||
builder.startObject("acknowledge");
|
||||
builder.field("message", response.getAcknowledgementMessage());
|
||||
for (Map.Entry<String, String[]> entry : acknowledgementMessages.entrySet()) {
|
||||
builder.startArray(entry.getKey());
|
||||
for (String message : entry.getValue()) {
|
||||
builder.value(message);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(status.getRestStatus(), builder);
|
||||
}
|
||||
});
|
||||
|
@ -15,10 +15,23 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.time.Clock;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class StartTrialClusterTask extends ClusterStateUpdateTask {
|
||||
|
||||
private static final String ACKNOWLEDGEMENT_HEADER = "This API initiates a free 30-day trial for all platinum features. " +
|
||||
"By starting this trial, you agree that it is subject to the terms and conditions at" +
|
||||
" https://www.elastic.co/legal/trial_license/. To begin your free trial, call /start_trial again and specify " +
|
||||
"the \"acknowledge=true\" parameter.";
|
||||
|
||||
private static final Map<String, String[]> ACK_MESSAGES = Collections.singletonMap("security",
|
||||
new String[] {"With a trial license, X-Pack security features are available, but are not enabled by default."});
|
||||
|
||||
private final Logger logger;
|
||||
private final String clusterName;
|
||||
private final PostStartTrialRequest request;
|
||||
@ -39,7 +52,10 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask {
|
||||
LicensesMetaData oldLicensesMetaData = oldState.metaData().custom(LicensesMetaData.TYPE);
|
||||
logger.debug("started self generated trial license: {}", oldLicensesMetaData);
|
||||
|
||||
if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
|
||||
if (request.isAcknowledged() == false) {
|
||||
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.NEED_ACKNOWLEDGEMENT,
|
||||
ACK_MESSAGES, ACKNOWLEDGEMENT_HEADER));
|
||||
} else if (oldLicensesMetaData == null || oldLicensesMetaData.isEligibleForTrial()) {
|
||||
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.UPGRADED_TO_TRIAL));
|
||||
} else {
|
||||
listener.onResponse(new PostStartTrialResponse(PostStartTrialResponse.Status.TRIAL_ALREADY_ACTIVATED));
|
||||
@ -50,7 +66,9 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask {
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
LicensesMetaData currentLicensesMetaData = currentState.metaData().custom(LicensesMetaData.TYPE);
|
||||
|
||||
if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
|
||||
if (request.isAcknowledged() == false) {
|
||||
return currentState;
|
||||
} else if (currentLicensesMetaData == null || currentLicensesMetaData.isEligibleForTrial()) {
|
||||
long issueDate = clock.millis();
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
long expiryDate = issueDate + LicenseService.NON_BASIC_SELF_GENERATED_LICENSE_DURATION.getMillis();
|
||||
|
@ -56,33 +56,47 @@ public class StartTrialLicenseTests extends AbstractLicensesIntegrationTestCase
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
assertEquals("{\"eligible_to_start_trial\":true}", body);
|
||||
|
||||
String type = randomFrom(LicenseService.VALID_TRIAL_TYPES);
|
||||
|
||||
Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + type);
|
||||
// Test that starting will fail without acknowledgement
|
||||
Response response2 = restClient.performRequest("POST", "/_xpack/license/start_trial");
|
||||
String body2 = Streams.copyToString(new InputStreamReader(response2.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(200, response2.getStatusLine().getStatusCode());
|
||||
assertTrue(body2.contains("\"trial_was_started\":true"));
|
||||
assertTrue(body2.contains("\"type\":\"" + type + "\""));
|
||||
assertTrue(body2.contains("\"trial_was_started\":false"));
|
||||
assertTrue(body2.contains("\"error_message\":\"Operation failed: Needs acknowledgement.\""));
|
||||
assertTrue(body2.contains("\"acknowledged\":false"));
|
||||
|
||||
assertBusy(() -> {
|
||||
GetLicenseResponse getLicenseResponse = licensingClient.prepareGetLicense().get();
|
||||
assertEquals("basic", getLicenseResponse.license().type());
|
||||
});
|
||||
|
||||
String type = randomFrom(LicenseService.VALID_TRIAL_TYPES);
|
||||
|
||||
Response response3 = restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + type);
|
||||
String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(200, response3.getStatusLine().getStatusCode());
|
||||
assertTrue(body3.contains("\"trial_was_started\":true"));
|
||||
assertTrue(body3.contains("\"type\":\"" + type + "\""));
|
||||
assertTrue(body3.contains("\"acknowledged\":true"));
|
||||
|
||||
assertBusy(() -> {
|
||||
GetLicenseResponse postTrialLicenseResponse = licensingClient.prepareGetLicense().get();
|
||||
assertEquals(type, postTrialLicenseResponse.license().type());
|
||||
});
|
||||
|
||||
Response response3 = restClient.performRequest("GET", "/_xpack/license/trial_status");
|
||||
String body3 = Streams.copyToString(new InputStreamReader(response3.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(200, response3.getStatusLine().getStatusCode());
|
||||
assertEquals("{\"eligible_to_start_trial\":false}", body3);
|
||||
Response response4 = restClient.performRequest("GET", "/_xpack/license/trial_status");
|
||||
String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(200, response4.getStatusLine().getStatusCode());
|
||||
assertEquals("{\"eligible_to_start_trial\":false}", body4);
|
||||
|
||||
String secondAttemptType = randomFrom(LicenseService.VALID_TRIAL_TYPES);
|
||||
|
||||
ResponseException ex = expectThrows(ResponseException.class,
|
||||
() -> restClient.performRequest("POST", "/_xpack/license/start_trial?type=" + secondAttemptType));
|
||||
Response response4 = ex.getResponse();
|
||||
String body4 = Streams.copyToString(new InputStreamReader(response4.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(403, response4.getStatusLine().getStatusCode());
|
||||
assertTrue(body4.contains("\"trial_was_started\":false"));
|
||||
assertTrue(body4.contains("\"error_message\":\"Operation failed: Trial was already activated.\""));
|
||||
() -> restClient.performRequest("POST", "/_xpack/license/start_trial?acknowledge=true&type=" + secondAttemptType));
|
||||
Response response5 = ex.getResponse();
|
||||
String body5 = Streams.copyToString(new InputStreamReader(response5.getEntity().getContent(), StandardCharsets.UTF_8));
|
||||
assertEquals(403, response5.getStatusLine().getStatusCode());
|
||||
assertTrue(body5.contains("\"trial_was_started\":false"));
|
||||
assertTrue(body5.contains("\"error_message\":\"Operation failed: Trial was already activated.\""));
|
||||
}
|
||||
|
||||
public void testInvalidType() throws Exception {
|
||||
|
@ -10,6 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.BinaryArithmeticProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.arithmetic.UnaryArithmeticProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.datetime.DateTimeProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.math.BinaryMathProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.math.MathProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.BucketExtractorProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ChainingProcessor;
|
||||
@ -40,6 +41,7 @@ public final class Processors {
|
||||
// arithmetic
|
||||
entries.add(new Entry(Processor.class, BinaryArithmeticProcessor.NAME, BinaryArithmeticProcessor::new));
|
||||
entries.add(new Entry(Processor.class, UnaryArithmeticProcessor.NAME, UnaryArithmeticProcessor::new));
|
||||
entries.add(new Entry(Processor.class, BinaryMathProcessor.NAME, BinaryMathProcessor::new));
|
||||
// datetime
|
||||
entries.add(new Entry(Processor.class, DateTimeProcessor.NAME, DateTimeProcessor::new));
|
||||
// math
|
||||
|
@ -10,6 +10,7 @@ import org.elasticsearch.xpack.sql.expression.Attribute;
|
||||
import org.elasticsearch.xpack.sql.expression.BinaryExpression;
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.ExpressionId;
|
||||
import org.elasticsearch.xpack.sql.expression.Expressions;
|
||||
import org.elasticsearch.xpack.sql.expression.FieldAttribute;
|
||||
import org.elasticsearch.xpack.sql.expression.Literal;
|
||||
import org.elasticsearch.xpack.sql.expression.NamedExpression;
|
||||
@ -159,7 +160,7 @@ abstract class QueryTranslator {
|
||||
}
|
||||
}
|
||||
|
||||
throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", e.nodeName(), e));
|
||||
throw new SqlIllegalArgumentException("Don't know how to translate {} {}", e.nodeName(), e);
|
||||
}
|
||||
|
||||
static LeafAgg toAgg(String id, Function f) {
|
||||
@ -171,7 +172,7 @@ abstract class QueryTranslator {
|
||||
}
|
||||
}
|
||||
|
||||
throw new UnsupportedOperationException(format(Locale.ROOT, "Don't know how to translate %s %s", f.nodeName(), f));
|
||||
throw new SqlIllegalArgumentException("Don't know how to translate {} {}", f.nodeName(), f);
|
||||
}
|
||||
|
||||
static class GroupingContext {
|
||||
@ -395,8 +396,8 @@ abstract class QueryTranslator {
|
||||
if (arg instanceof Literal) {
|
||||
return String.valueOf(((Literal) arg).value());
|
||||
}
|
||||
throw new SqlIllegalArgumentException("Does not know how to convert argument " + arg.nodeString()
|
||||
+ " for function " + af.nodeString());
|
||||
throw new SqlIllegalArgumentException("Does not know how to convert argument {} for function {}", arg.nodeString(),
|
||||
af.nodeString());
|
||||
}
|
||||
|
||||
// TODO: need to optimize on ngram
|
||||
@ -505,9 +506,9 @@ abstract class QueryTranslator {
|
||||
@Override
|
||||
protected QueryTranslation asQuery(BinaryComparison bc, boolean onAggs) {
|
||||
Check.isTrue(bc.right().foldable(),
|
||||
"Line %d:%d - Comparisons against variables are not (currently) supported; offender %s in %s",
|
||||
"Line {}:{}: Comparisons against variables are not (currently) supported; offender [{}] in [{}]",
|
||||
bc.right().location().getLineNumber(), bc.right().location().getColumnNumber(),
|
||||
bc.right().nodeName(), bc.nodeName());
|
||||
Expressions.name(bc.right()), bc.symbol());
|
||||
|
||||
if (bc.left() instanceof NamedExpression) {
|
||||
NamedExpression ne = (NamedExpression) bc.left();
|
||||
@ -605,8 +606,8 @@ abstract class QueryTranslator {
|
||||
return new TermQuery(loc, name, value);
|
||||
}
|
||||
|
||||
Check.isTrue(false, "don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(), bc);
|
||||
return null;
|
||||
throw new SqlIllegalArgumentException("Don't know how to translate binary comparison [{}] in [{}]", bc.right().nodeString(),
|
||||
bc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -700,9 +701,8 @@ abstract class QueryTranslator {
|
||||
return new QueryTranslation(query, aggFilter);
|
||||
}
|
||||
else {
|
||||
throw new UnsupportedOperationException("No idea how to translate " + e);
|
||||
throw new SqlIllegalArgumentException("No idea how to translate " + e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ public class FieldAttributeTests extends ESTestCase {
|
||||
public void testStarExpansionExcludesObjectAndUnsupportedTypes() {
|
||||
LogicalPlan plan = plan("SELECT * FROM test");
|
||||
List<? extends NamedExpression> list = ((Project) plan).projections();
|
||||
assertThat(list, hasSize(7));
|
||||
assertThat(list, hasSize(8));
|
||||
List<String> names = Expressions.names(list);
|
||||
assertThat(names, not(hasItem("some")));
|
||||
assertThat(names, not(hasItem("some.dotted")));
|
||||
|
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.sql.expression.function.scalar.math;
|
||||
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.test.AbstractWireSerializingTestCase;
|
||||
import org.elasticsearch.xpack.sql.expression.Literal;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.Processors;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.ConstantProcessor;
|
||||
import org.elasticsearch.xpack.sql.expression.function.scalar.processor.runtime.Processor;
|
||||
|
||||
import static org.elasticsearch.xpack.sql.tree.Location.EMPTY;
|
||||
|
||||
public class BinaryMathProcessorTests extends AbstractWireSerializingTestCase<BinaryMathProcessor> {
|
||||
public static BinaryMathProcessor randomProcessor() {
|
||||
return new BinaryMathProcessor(
|
||||
new ConstantProcessor(randomLong()),
|
||||
new ConstantProcessor(randomLong()),
|
||||
randomFrom(BinaryMathProcessor.BinaryMathOperation.values()));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BinaryMathProcessor createTestInstance() {
|
||||
return randomProcessor();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Reader<BinaryMathProcessor> instanceReader() {
|
||||
return BinaryMathProcessor::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NamedWriteableRegistry getNamedWriteableRegistry() {
|
||||
return new NamedWriteableRegistry(Processors.getNamedWriteables());
|
||||
}
|
||||
|
||||
public void testAtan2() {
|
||||
Processor ba = new ATan2(EMPTY, l(1), l(1)).makeProcessorDefinition().asProcessor();
|
||||
assertEquals(0.7853981633974483d, ba.process(null));
|
||||
}
|
||||
|
||||
public void testPower() {
|
||||
Processor ba = new Power(EMPTY, l(2), l(2)).makeProcessorDefinition().asProcessor();
|
||||
assertEquals(4d, ba.process(null));
|
||||
}
|
||||
|
||||
public void testHandleNull() {
|
||||
assertNull(new ATan2(EMPTY, l(null), l(3)).makeProcessorDefinition().asProcessor().process(null));
|
||||
assertNull(new Power(EMPTY, l(null), l(null)).makeProcessorDefinition().asProcessor().process(null));
|
||||
}
|
||||
|
||||
private static Literal l(Object value) {
|
||||
return Literal.of(EMPTY, value);
|
||||
}
|
||||
}
|
@ -17,7 +17,7 @@ public class SysColumnsTests extends ESTestCase {
|
||||
public void testSysColumns() {
|
||||
List<List<?>> rows = new ArrayList<>();
|
||||
SysColumns.fillInRows("test", "index", TypesTests.loadMapping("mapping-multi-field-variation.json", true), null, rows, null);
|
||||
assertEquals(15, rows.size());
|
||||
assertEquals(16, rows.size());
|
||||
assertEquals(24, rows.get(0).size());
|
||||
|
||||
List<?> row = rows.get(0);
|
||||
@ -38,13 +38,13 @@ public class SysColumnsTests extends ESTestCase {
|
||||
assertEquals(null, radix(row));
|
||||
assertEquals(Integer.MAX_VALUE, bufferLength(row));
|
||||
|
||||
row = rows.get(6);
|
||||
row = rows.get(7);
|
||||
assertEquals("some.dotted", name(row));
|
||||
assertEquals(Types.STRUCT, sqlType(row));
|
||||
assertEquals(null, radix(row));
|
||||
assertEquals(-1, bufferLength(row));
|
||||
|
||||
row = rows.get(14);
|
||||
row = rows.get(15);
|
||||
assertEquals("some.ambiguous.normalized", name(row));
|
||||
assertEquals(Types.VARCHAR, sqlType(row));
|
||||
assertEquals(null, radix(row));
|
||||
|
@ -6,6 +6,7 @@
|
||||
package org.elasticsearch.xpack.sql.planner;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.sql.SqlIllegalArgumentException;
|
||||
import org.elasticsearch.xpack.sql.analysis.analyzer.Analyzer;
|
||||
import org.elasticsearch.xpack.sql.analysis.index.EsIndex;
|
||||
import org.elasticsearch.xpack.sql.analysis.index.IndexResolution;
|
||||
@ -18,9 +19,11 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan;
|
||||
import org.elasticsearch.xpack.sql.plan.logical.Project;
|
||||
import org.elasticsearch.xpack.sql.planner.QueryTranslator.QueryTranslation;
|
||||
import org.elasticsearch.xpack.sql.querydsl.query.Query;
|
||||
import org.elasticsearch.xpack.sql.querydsl.query.RangeQuery;
|
||||
import org.elasticsearch.xpack.sql.querydsl.query.TermQuery;
|
||||
import org.elasticsearch.xpack.sql.type.EsField;
|
||||
import org.elasticsearch.xpack.sql.type.TypesTests;
|
||||
import org.joda.time.DateTime;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.TimeZone;
|
||||
@ -84,4 +87,56 @@ public class QueryTranslatorTests extends ESTestCase {
|
||||
assertEquals("int", tq.term());
|
||||
assertEquals(5, tq.value());
|
||||
}
|
||||
|
||||
public void testComparisonAgainstColumns() {
|
||||
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > int");
|
||||
assertTrue(p instanceof Project);
|
||||
p = ((Project) p).child();
|
||||
assertTrue(p instanceof Filter);
|
||||
Expression condition = ((Filter) p).condition();
|
||||
SqlIllegalArgumentException ex = expectThrows(SqlIllegalArgumentException.class, () -> QueryTranslator.toQuery(condition, false));
|
||||
assertEquals("Line 1:43: Comparisons against variables are not (currently) supported; offender [int] in [>]", ex.getMessage());
|
||||
}
|
||||
|
||||
public void testDateRange() {
|
||||
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > 1969-05-13");
|
||||
assertTrue(p instanceof Project);
|
||||
p = ((Project) p).child();
|
||||
assertTrue(p instanceof Filter);
|
||||
Expression condition = ((Filter) p).condition();
|
||||
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
|
||||
Query query = translation.query;
|
||||
assertTrue(query instanceof RangeQuery);
|
||||
RangeQuery rq = (RangeQuery) query;
|
||||
assertEquals("date", rq.field());
|
||||
assertEquals(1951, rq.lower());
|
||||
}
|
||||
|
||||
public void testDateRangeLiteral() {
|
||||
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > '1969-05-13'");
|
||||
assertTrue(p instanceof Project);
|
||||
p = ((Project) p).child();
|
||||
assertTrue(p instanceof Filter);
|
||||
Expression condition = ((Filter) p).condition();
|
||||
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
|
||||
Query query = translation.query;
|
||||
assertTrue(query instanceof RangeQuery);
|
||||
RangeQuery rq = (RangeQuery) query;
|
||||
assertEquals("date", rq.field());
|
||||
assertEquals("1969-05-13", rq.lower());
|
||||
}
|
||||
|
||||
public void testDateRangeCast() {
|
||||
LogicalPlan p = plan("SELECT some.string FROM test WHERE date > CAST('1969-05-13T12:34:56Z' AS DATE)");
|
||||
assertTrue(p instanceof Project);
|
||||
p = ((Project) p).child();
|
||||
assertTrue(p instanceof Filter);
|
||||
Expression condition = ((Filter) p).condition();
|
||||
QueryTranslation translation = QueryTranslator.toQuery(condition, false);
|
||||
Query query = translation.query;
|
||||
assertTrue(query instanceof RangeQuery);
|
||||
RangeQuery rq = (RangeQuery) query;
|
||||
assertEquals("date", rq.field());
|
||||
assertEquals(DateTime.parse("1969-05-13T12:34:56Z"), rq.lower());
|
||||
}
|
||||
}
|
@ -4,6 +4,7 @@
|
||||
"int" : { "type" : "integer" },
|
||||
"text" : { "type" : "text" },
|
||||
"keyword" : { "type" : "keyword" },
|
||||
"date" : { "type" : "date" },
|
||||
"unsupported" : { "type" : "ip_range" },
|
||||
"some" : {
|
||||
"properties" : {
|
||||
|
@ -11,6 +11,10 @@
|
||||
"type": {
|
||||
"type" : "string",
|
||||
"description" : "The type of trial license to generate (default: \"trial\")"
|
||||
},
|
||||
"acknowledge": {
|
||||
"type" : "boolean",
|
||||
"description" : "whether the user has acknowledged acknowledge messages (default: false)"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -133,7 +133,8 @@ teardown:
|
||||
|
||||
- do:
|
||||
catch: forbidden
|
||||
xpack.license.post_start_trial: {}
|
||||
xpack.license.post_start_trial:
|
||||
acknowledge: true
|
||||
|
||||
- match: { trial_was_started: false }
|
||||
- match: { error_message: "Operation failed: Trial was already activated." }
|
||||
@ -143,6 +144,7 @@ teardown:
|
||||
catch: bad_request
|
||||
xpack.license.post_start_trial:
|
||||
type: "basic"
|
||||
acknowledge: true
|
||||
---
|
||||
"Can start basic license if do not already have basic":
|
||||
- do:
|
||||
|
@ -63,7 +63,7 @@ public class ExecutableEmailAction extends ExecutableAction<EmailAction> {
|
||||
}
|
||||
|
||||
Email.Builder email = action.getEmail().render(templateEngine, model, htmlSanitizer, attachments);
|
||||
email.id(ctx.id().value());
|
||||
email.id(actionId + "_" + ctx.id().value());
|
||||
|
||||
if (ctx.simulateAction(actionId)) {
|
||||
return new EmailAction.Result.Simulated(email.build());
|
||||
|
@ -354,7 +354,7 @@ public class Email implements ToXContentObject {
|
||||
* after this is called is incorrect.
|
||||
*/
|
||||
public Email build() {
|
||||
assert id != null : "email id should not be null (should be set to the watch id";
|
||||
assert id != null : "email id should not be null";
|
||||
Email email = new Email(id, from, replyTo, priority, sentDate, to, cc, bcc, subject, textBody, htmlBody,
|
||||
unmodifiableMap(attachments));
|
||||
attachments = null;
|
||||
|
@ -171,7 +171,7 @@ public class EmailActionTests extends ESTestCase {
|
||||
assertThat(result, instanceOf(EmailAction.Result.Success.class));
|
||||
assertThat(((EmailAction.Result.Success) result).account(), equalTo(account));
|
||||
Email actualEmail = ((EmailAction.Result.Success) result).email();
|
||||
assertThat(actualEmail.id(), is(wid.value()));
|
||||
assertThat(actualEmail.id(), is("_id_" + wid.value()));
|
||||
assertThat(actualEmail, notNullValue());
|
||||
assertThat(actualEmail.subject(), is(subject == null ? null : subject.getTemplate()));
|
||||
assertThat(actualEmail.textBody(), is(textBody == null ? null : textBody.getTemplate()));
|
||||
|
@ -0,0 +1,88 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.watcher.actions.email;
|
||||
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext;
|
||||
import org.elasticsearch.xpack.core.watcher.watch.Payload;
|
||||
import org.elasticsearch.xpack.watcher.common.text.TextTemplateEngine;
|
||||
import org.elasticsearch.xpack.watcher.notification.email.EmailService;
|
||||
import org.elasticsearch.xpack.watcher.notification.email.EmailTemplate;
|
||||
import org.elasticsearch.xpack.watcher.notification.email.HtmlSanitizer;
|
||||
import org.elasticsearch.xpack.watcher.notification.email.support.EmailServer;
|
||||
import org.elasticsearch.xpack.watcher.test.MockTextTemplateEngine;
|
||||
import org.elasticsearch.xpack.watcher.test.WatcherTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import javax.mail.internet.MimeMessage;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class EmailMessageIdTests extends ESTestCase {
|
||||
|
||||
private EmailServer server;
|
||||
private TextTemplateEngine textTemplateEngine = new MockTextTemplateEngine();
|
||||
private HtmlSanitizer htmlSanitizer = new HtmlSanitizer(Settings.EMPTY);
|
||||
private EmailService emailService;
|
||||
private EmailAction emailAction;
|
||||
|
||||
@Before
|
||||
public void startSmtpServer() {
|
||||
server = EmailServer.localhost(logger);
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put("xpack.notification.email.account.test.smtp.auth", true)
|
||||
.put("xpack.notification.email.account.test.smtp.user", EmailServer.USERNAME)
|
||||
.put("xpack.notification.email.account.test.smtp.password", EmailServer.PASSWORD)
|
||||
.put("xpack.notification.email.account.test.smtp.port", server.port())
|
||||
.put("xpack.notification.email.account.test.smtp.host", "localhost")
|
||||
.build();
|
||||
|
||||
Set<Setting<?>> registeredSettings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
registeredSettings.addAll(EmailService.getSettings());
|
||||
ClusterSettings clusterSettings = new ClusterSettings(settings, registeredSettings);
|
||||
emailService = new EmailService(settings, null, clusterSettings);
|
||||
EmailTemplate emailTemplate = EmailTemplate.builder().from("from@example.org").to("to@example.org")
|
||||
.subject("subject").textBody("body").build();
|
||||
emailAction = new EmailAction(emailTemplate, null, null, null, null, null);
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopSmtpServer() {
|
||||
server.stop();
|
||||
}
|
||||
|
||||
public void testThatMessageIdIsUnique() throws Exception {
|
||||
List<MimeMessage> messages = new ArrayList<>();
|
||||
server.addListener(messages::add);
|
||||
ExecutableEmailAction firstEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine,
|
||||
htmlSanitizer, Collections.emptyMap());
|
||||
ExecutableEmailAction secondEmailAction = new ExecutableEmailAction(emailAction, logger, emailService, textTemplateEngine,
|
||||
htmlSanitizer, Collections.emptyMap());
|
||||
|
||||
WatchExecutionContext ctx = WatcherTestUtils.createWatchExecutionContext(logger);
|
||||
firstEmailAction.execute("my_first_action_id", ctx, Payload.EMPTY);
|
||||
secondEmailAction.execute("my_second_action_id", ctx, Payload.EMPTY);
|
||||
|
||||
assertThat(messages, hasSize(2));
|
||||
// check for unique message ids, should be two as well
|
||||
Set<String> messageIds = new HashSet<>();
|
||||
for (MimeMessage message : messages) {
|
||||
messageIds.add(message.getMessageID());
|
||||
}
|
||||
assertThat(messageIds, hasSize(2));
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ subprojects {
|
||||
approvedLicenses << 'Apache'
|
||||
}
|
||||
|
||||
String outputDir = "generated-resources/${project.name}"
|
||||
String outputDir = "${buildDir}/generated-resources/${project.name}"
|
||||
|
||||
// This is a top level task which we will add dependencies to below.
|
||||
// It is a single task that can be used to backcompat tests against all versions.
|
||||
@ -123,7 +123,7 @@ subprojects {
|
||||
group = 'verification'
|
||||
}
|
||||
|
||||
String output = "generated-resources/${project.name}"
|
||||
String output = "${buildDir}/generated-resources/${project.name}"
|
||||
task copyTestNodeKeystore(type: Copy) {
|
||||
from project(xpackModule('core'))
|
||||
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
|
||||
|
@ -71,7 +71,7 @@ task bwcTest {
|
||||
group = 'verification'
|
||||
}
|
||||
|
||||
String outputDir = "generated-resources/${project.name}"
|
||||
String outputDir = "${buildDir}/generated-resources/${project.name}"
|
||||
|
||||
for (Version version : bwcVersions.wireCompatible) {
|
||||
String baseName = "v${version}"
|
||||
|
@ -96,7 +96,7 @@ subprojects {
|
||||
}
|
||||
}
|
||||
|
||||
String outputDir = "generated-resources/${project.name}"
|
||||
String outputDir = "${buildDir}/generated-resources/${project.name}"
|
||||
|
||||
// This is a top level task which we will add dependencies to below.
|
||||
// It is a single task that can be used to backcompat tests against all versions.
|
||||
@ -105,7 +105,7 @@ subprojects {
|
||||
group = 'verification'
|
||||
}
|
||||
|
||||
String output = "generated-resources/${project.name}"
|
||||
String output = "${buildDir}/generated-resources/${project.name}"
|
||||
task copyTestNodeKeystore(type: Copy) {
|
||||
from project(xpackModule('core'))
|
||||
.file('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks')
|
||||
|
@ -6,7 +6,7 @@ dependencies {
|
||||
testCompile project(path: xpackProject('transport-client').path, configuration: 'runtime')
|
||||
}
|
||||
|
||||
String outputDir = "generated-resources/${project.name}"
|
||||
String outputDir = "${buildDir}/generated-resources/${project.name}"
|
||||
task copyXPackPluginProps(type: Copy) {
|
||||
from project(xpackModule('core')).file('src/main/plugin-metadata')
|
||||
from project(xpackModule('core')).tasks.pluginProperties
|
||||
|
@ -17,7 +17,7 @@ dependencies {
|
||||
testCompile project(path: xpackModule('core'), configuration: 'runtime')
|
||||
}
|
||||
|
||||
String outputDir = "generated-resources/${project.name}"
|
||||
String outputDir = "${buildDir}/generated-resources/${project.name}"
|
||||
task copyXPackPluginProps(type: Copy) {
|
||||
from project(xpackModule('core')).file('src/main/plugin-metadata')
|
||||
from project(xpackModule('core')).tasks.pluginProperties
|
||||
|
Loading…
x
Reference in New Issue
Block a user