Merge branch 'master' into close-index-api-refactoring
This commit is contained in:
commit
fb24469fe7
|
@ -56,10 +56,6 @@
|
|||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestIndicesAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotsService.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPool.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]aliases[/\\]IndexAliasesIT.java" checks="LineLength" />
|
||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]explain[/\\]ExplainActionIT.java" checks="LineLength" />
|
||||
|
|
|
@ -58,12 +58,12 @@ PUT /common_grams_example
|
|||
"filter": {
|
||||
"common_grams": {
|
||||
"type": "common_grams",
|
||||
"common_words": ["a", "an", "the"]
|
||||
"common_words": ["the", "is", "a"]
|
||||
},
|
||||
"common_grams_query": {
|
||||
"type": "common_grams",
|
||||
"query_mode": true,
|
||||
"common_words": ["a", "an", "the"]
|
||||
"common_words": ["the", "is", "a"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,3 +71,101 @@ PUT /common_grams_example
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
You can see the output by using e.g. the `_analyze` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /common_grams_example/_analyze
|
||||
{
|
||||
"analyzer" : "index_grams",
|
||||
"text" : "the quick brown is a fox"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
And the response will be:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"tokens" : [
|
||||
{
|
||||
"token" : "the",
|
||||
"start_offset" : 0,
|
||||
"end_offset" : 3,
|
||||
"type" : "word",
|
||||
"position" : 0
|
||||
},
|
||||
{
|
||||
"token" : "the_quick",
|
||||
"start_offset" : 0,
|
||||
"end_offset" : 9,
|
||||
"type" : "gram",
|
||||
"position" : 0,
|
||||
"positionLength" : 2
|
||||
},
|
||||
{
|
||||
"token" : "quick",
|
||||
"start_offset" : 4,
|
||||
"end_offset" : 9,
|
||||
"type" : "word",
|
||||
"position" : 1
|
||||
},
|
||||
{
|
||||
"token" : "brown",
|
||||
"start_offset" : 10,
|
||||
"end_offset" : 15,
|
||||
"type" : "word",
|
||||
"position" : 2
|
||||
},
|
||||
{
|
||||
"token" : "brown_is",
|
||||
"start_offset" : 10,
|
||||
"end_offset" : 18,
|
||||
"type" : "gram",
|
||||
"position" : 2,
|
||||
"positionLength" : 2
|
||||
},
|
||||
{
|
||||
"token" : "is",
|
||||
"start_offset" : 16,
|
||||
"end_offset" : 18,
|
||||
"type" : "word",
|
||||
"position" : 3
|
||||
},
|
||||
{
|
||||
"token" : "is_a",
|
||||
"start_offset" : 16,
|
||||
"end_offset" : 20,
|
||||
"type" : "gram",
|
||||
"position" : 3,
|
||||
"positionLength" : 2
|
||||
},
|
||||
{
|
||||
"token" : "a",
|
||||
"start_offset" : 19,
|
||||
"end_offset" : 20,
|
||||
"type" : "word",
|
||||
"position" : 4
|
||||
},
|
||||
{
|
||||
"token" : "a_fox",
|
||||
"start_offset" : 19,
|
||||
"end_offset" : 24,
|
||||
"type" : "gram",
|
||||
"position" : 4,
|
||||
"positionLength" : 2
|
||||
},
|
||||
{
|
||||
"token" : "fox",
|
||||
"start_offset" : 21,
|
||||
"end_offset" : 24,
|
||||
"type" : "word",
|
||||
"position" : 5
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -154,6 +154,35 @@ Produces intervals that are not contained by an interval from the filter rule
|
|||
`not_overlapping`::
|
||||
Produces intervals that do not overlap with an interval from the filter rule
|
||||
|
||||
[[interval-script-filter]]
|
||||
==== Script filters
|
||||
|
||||
You can also filter intervals based on their start position, end position and
|
||||
internal gap count, using a script. The script has access to an `interval`
|
||||
variable, with `start`, `end` and `gaps` methods:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _search
|
||||
{
|
||||
"query": {
|
||||
"intervals" : {
|
||||
"my_text" : {
|
||||
"match" : {
|
||||
"query" : "hot porridge",
|
||||
"filter" : {
|
||||
"script" : {
|
||||
"source" : "interval.start > 10 && interval.end < 20 && interval.gaps == 0"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[[interval-minimization]]
|
||||
==== Minimization
|
||||
|
||||
|
|
|
@ -234,6 +234,12 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc {
|
|||
float getFreq()
|
||||
}
|
||||
|
||||
class org.elasticsearch.index.query.IntervalFilterScript$Interval {
|
||||
int getStart()
|
||||
int getEnd()
|
||||
int getGaps()
|
||||
}
|
||||
|
||||
# for testing
|
||||
class org.elasticsearch.painless.FeatureTest no_import {
|
||||
int z
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: "Implemented in 7.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
mappings:
|
||||
test:
|
||||
properties:
|
||||
text:
|
||||
type: text
|
||||
analyzer: standard
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test", "_type": "test", "_id": "1"}}'
|
||||
- '{"text" : "Some like it hot, some like it cold"}'
|
||||
- '{"index": {"_index": "test", "_type": "test", "_id": "2"}}'
|
||||
- '{"text" : "Its cold outside, theres no kind of atmosphere"}'
|
||||
- '{"index": {"_index": "test", "_type": "test", "_id": "3"}}'
|
||||
- '{"text" : "Baby its cold there outside"}'
|
||||
- '{"index": {"_index": "test", "_type": "test", "_id": "4"}}'
|
||||
- '{"text" : "Outside it is cold and wet"}'
|
||||
|
||||
---
|
||||
"Test filtering by script":
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
intervals:
|
||||
text:
|
||||
match:
|
||||
query: "cold"
|
||||
filter:
|
||||
script:
|
||||
source: "interval.start > 3"
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
|
||||
|
|
@ -58,6 +58,21 @@ setup:
|
|||
- match: {hits.max_score: 2}
|
||||
- match: {hits.hits.0._score: 2}
|
||||
|
||||
- do:
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
query_string:
|
||||
default_field: text
|
||||
query: s*
|
||||
boost: 2
|
||||
|
||||
- match: {hits.total: 1}
|
||||
- match: {hits.max_score: 2}
|
||||
- match: {hits.hits.0._score: 2}
|
||||
|
||||
- do:
|
||||
search:
|
||||
rest_total_hits_as_int: true
|
||||
|
|
|
@ -52,7 +52,6 @@ import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude;
|
|||
* @see PointValues
|
||||
* @see LatLonDocValuesField
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class XLatLonShape {
|
||||
public static final int BYTES = LatLonPoint.BYTES;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.index.PointValues.Relation;
|
|||
* <p>The field must be indexed using
|
||||
* {@link XLatLonShape#createIndexableFields} added per document.
|
||||
*
|
||||
* @lucene.experimental
|
||||
**/
|
||||
final class XLatLonShapeBoundingBoxQuery extends XLatLonShapeQuery {
|
||||
final XRectangle2D rectangle2D;
|
||||
|
|
|
@ -41,7 +41,6 @@ import java.util.Arrays;
|
|||
* <p>The field must be indexed using
|
||||
* {@link XLatLonShape#createIndexableFields} added per document.
|
||||
*
|
||||
* @lucene.experimental
|
||||
**/
|
||||
final class XLatLonShapeLineQuery extends XLatLonShapeQuery {
|
||||
final Line[] lines;
|
||||
|
|
|
@ -31,7 +31,6 @@ import java.util.Arrays;
|
|||
* <p>The field must be indexed using
|
||||
* {@link XLatLonShape#createIndexableFields} added per document.
|
||||
*
|
||||
* @lucene.experimental
|
||||
**/
|
||||
final class XLatLonShapePolygonQuery extends XLatLonShapeQuery {
|
||||
final Polygon[] polygons;
|
||||
|
|
|
@ -45,7 +45,6 @@ import java.util.Objects;
|
|||
*
|
||||
* Note: this class implements the majority of the INTERSECTS, WITHIN, DISJOINT relation logic
|
||||
*
|
||||
* @lucene.experimental
|
||||
**/
|
||||
abstract class XLatLonShapeQuery extends Query {
|
||||
/** field name */
|
||||
|
|
|
@ -38,7 +38,6 @@ import static org.apache.lucene.geo.GeoUtils.orient;
|
|||
/**
|
||||
* 2D rectangle implementation containing spatial logic.
|
||||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public class XRectangle2D {
|
||||
final byte[] bbox;
|
||||
|
|
|
@ -64,7 +64,6 @@ import static org.apache.lucene.geo.GeoUtils.orient;
|
|||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
|
||||
* THIS SOFTWARE.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public final class XTessellator {
|
||||
// this is a dumb heuristic to control whether we cut over to sorted morton values
|
||||
|
|
|
@ -118,6 +118,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
|
||||
public static final int V_6_6_0_ID = 6060099;
|
||||
public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0);
|
||||
public static final int V_6_7_0_ID = 6070099;
|
||||
public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0);
|
||||
public static final int V_7_0_0_ID = 7000099;
|
||||
public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final Version CURRENT = V_7_0_0;
|
||||
|
@ -136,6 +138,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
switch (id) {
|
||||
case V_7_0_0_ID:
|
||||
return V_7_0_0;
|
||||
case V_6_7_0_ID:
|
||||
return V_6_7_0;
|
||||
case V_6_6_0_ID:
|
||||
return V_6_6_0;
|
||||
case V_6_5_4_ID:
|
||||
|
|
|
@ -43,6 +43,8 @@ import java.net.URLEncoder;
|
|||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
/**
|
||||
* A base class for the response of a write operation that involves a single doc
|
||||
|
@ -266,8 +268,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
seqNo = in.readZLong();
|
||||
primaryTerm = in.readVLong();
|
||||
} else {
|
||||
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
primaryTerm = 0;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
forcedRefresh = in.readBoolean();
|
||||
result = Result.readFrom(in);
|
||||
|
@ -378,8 +380,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
protected Result result = null;
|
||||
protected boolean forcedRefresh;
|
||||
protected ShardInfo shardInfo = null;
|
||||
protected Long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
protected Long primaryTerm = 0L;
|
||||
protected Long seqNo = UNASSIGNED_SEQ_NO;
|
||||
protected Long primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
|
|
|
@ -56,6 +56,7 @@ import java.util.Objects;
|
|||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
/**
|
||||
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
|
||||
|
@ -351,7 +352,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
|
|||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
long ifPrimaryTerm = 0;
|
||||
long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
int retryOnConflict = 0;
|
||||
String pipeline = valueOrDefault(defaultPipeline, globalPipeline);
|
||||
|
||||
|
|
|
@ -31,12 +31,13 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
/**
|
||||
* A request to delete a document from an index based on its type and id. Best created using
|
||||
|
@ -58,8 +59,8 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
|
|||
private String routing;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
private long ifPrimaryTerm = 0;
|
||||
private long ifSeqNo = UNASSIGNED_SEQ_NO;
|
||||
private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
public DeleteRequest() {
|
||||
}
|
||||
|
@ -116,16 +117,16 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
|
|||
validationException = addValidationError("version type [force] may no longer be used", validationException);
|
||||
}
|
||||
|
||||
if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
||||
if (ifSeqNo != UNASSIGNED_SEQ_NO && (
|
||||
versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY
|
||||
)) {
|
||||
validationException = addValidationError("compare and write operations can not use versioning", validationException);
|
||||
}
|
||||
|
||||
if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) {
|
||||
validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException);
|
||||
}
|
||||
if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) {
|
||||
validationException =
|
||||
addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException);
|
||||
}
|
||||
|
@ -239,7 +240,7 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
|
|||
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
|
||||
*/
|
||||
public DeleteRequest setIfSeqNo(long seqNo) {
|
||||
if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) {
|
||||
throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "].");
|
||||
}
|
||||
ifSeqNo = seqNo;
|
||||
|
@ -286,8 +287,8 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
|
|||
ifSeqNo = in.readZLong();
|
||||
ifPrimaryTerm = in.readVLong();
|
||||
} else {
|
||||
ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
ifPrimaryTerm = 0;
|
||||
ifSeqNo = UNASSIGNED_SEQ_NO;
|
||||
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -305,7 +306,7 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest>
|
|||
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
|
||||
out.writeZLong(ifSeqNo);
|
||||
out.writeVLong(ifPrimaryTerm);
|
||||
} else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
|
||||
} else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
|
||||
assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]";
|
||||
throw new IllegalStateException(
|
||||
"sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " +
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
|
@ -54,6 +53,8 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
/**
|
||||
* Index request to index a typed JSON document into a specific index and make it searchable. Best
|
||||
|
@ -106,8 +107,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
|
||||
|
||||
private boolean isRetry = false;
|
||||
private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
private long ifPrimaryTerm = 0;
|
||||
private long ifSeqNo = UNASSIGNED_SEQ_NO;
|
||||
private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
|
||||
public IndexRequest() {
|
||||
|
@ -174,7 +175,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
return validationException;
|
||||
}
|
||||
|
||||
if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
|
||||
if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
|
||||
validationException = addValidationError("create operations do not support compare and set. use index instead",
|
||||
validationException);
|
||||
return validationException;
|
||||
|
@ -207,15 +208,15 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
validationException = addValidationError("pipeline cannot be an empty string", validationException);
|
||||
}
|
||||
|
||||
if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && (
|
||||
if (ifSeqNo != UNASSIGNED_SEQ_NO && (
|
||||
versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY
|
||||
)) {
|
||||
validationException = addValidationError("compare and write operations can not use versioning", validationException);
|
||||
}
|
||||
if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) {
|
||||
validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException);
|
||||
}
|
||||
if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) {
|
||||
validationException =
|
||||
addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException);
|
||||
}
|
||||
|
@ -511,7 +512,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
|
||||
*/
|
||||
public IndexRequest setIfSeqNo(long seqNo) {
|
||||
if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) {
|
||||
throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "].");
|
||||
}
|
||||
ifSeqNo = seqNo;
|
||||
|
@ -573,8 +574,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
// generate id if not already provided
|
||||
if (id == null) {
|
||||
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
|
||||
assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
assert ifPrimaryTerm == 0;
|
||||
assert ifSeqNo == UNASSIGNED_SEQ_NO;
|
||||
assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM;
|
||||
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
|
||||
String uid;
|
||||
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) {
|
||||
|
@ -620,8 +621,8 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
ifSeqNo = in.readZLong();
|
||||
ifPrimaryTerm = in.readVLong();
|
||||
} else {
|
||||
ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
ifPrimaryTerm = 0;
|
||||
ifSeqNo = UNASSIGNED_SEQ_NO;
|
||||
ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -657,7 +658,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
|
||||
out.writeZLong(ifSeqNo);
|
||||
out.writeVLong(ifPrimaryTerm);
|
||||
} else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
|
||||
} else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
|
||||
assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]";
|
||||
throw new IllegalStateException(
|
||||
"sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " +
|
||||
|
|
|
@ -91,7 +91,8 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder)
|
||||
.query(groupQuery)
|
||||
.postFilter(searchRequest.source().postFilter());
|
||||
SearchRequest groupRequest = buildExpandSearchRequest(searchRequest, sourceBuilder);
|
||||
SearchRequest groupRequest = new SearchRequest(searchRequest);
|
||||
groupRequest.source(sourceBuilder);
|
||||
multiRequest.add(groupRequest);
|
||||
}
|
||||
}
|
||||
|
@ -120,22 +121,6 @@ final class ExpandSearchPhase extends SearchPhase {
|
|||
}
|
||||
}
|
||||
|
||||
private SearchRequest buildExpandSearchRequest(SearchRequest orig, SearchSourceBuilder sourceBuilder) {
|
||||
SearchRequest groupRequest = new SearchRequest(orig.indices())
|
||||
.types(orig.types())
|
||||
.source(sourceBuilder)
|
||||
.indicesOptions(orig.indicesOptions())
|
||||
.requestCache(orig.requestCache())
|
||||
.preference(orig.preference())
|
||||
.routing(orig.routing())
|
||||
.searchType(orig.searchType());
|
||||
if (orig.allowPartialSearchResults() != null){
|
||||
groupRequest.allowPartialSearchResults(orig.allowPartialSearchResults());
|
||||
}
|
||||
groupRequest.setMaxConcurrentShardRequests(orig.getMaxConcurrentShardRequests());
|
||||
return groupRequest;
|
||||
}
|
||||
|
||||
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
|
||||
SearchSourceBuilder groupSource = new SearchSourceBuilder();
|
||||
groupSource.from(options.getFrom());
|
||||
|
|
|
@ -87,6 +87,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
/**
|
||||
* Base class for requests that should be executed on a primary copy followed by replica copies.
|
||||
* Subclasses can resolve the target shard and provide implementation for primary and replica operations.
|
||||
|
@ -1248,7 +1250,7 @@ public abstract class TransportReplicationAction<
|
|||
request = requestSupplier.get();
|
||||
// null now, but will be populated by reading from the streams
|
||||
targetAllocationID = null;
|
||||
primaryTerm = 0L;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
|
||||
public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) {
|
||||
|
|
|
@ -345,17 +345,19 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode
|
|||
});
|
||||
|
||||
try {
|
||||
nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
|
||||
if (nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) {
|
||||
logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt(); // restore interrupt status
|
||||
logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
|
||||
}
|
||||
|
||||
try {
|
||||
indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
|
||||
if (indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) {
|
||||
logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt(); // restore interrupt status
|
||||
logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
|
||||
}
|
||||
ClusterInfo clusterInfo = getClusterInfo();
|
||||
try {
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.NotMasterException;
|
||||
import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
|
@ -48,7 +49,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -74,6 +74,8 @@ import java.util.Set;
|
|||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
public class ShardStateAction {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(ShardStateAction.class);
|
||||
|
@ -603,7 +605,7 @@ public class ShardStateAction {
|
|||
allocationId = in.readString();
|
||||
if (in.getVersion().before(Version.V_6_3_0)) {
|
||||
final long primaryTerm = in.readVLong();
|
||||
assert primaryTerm == 0L : "shard is only started by itself: primary term [" + primaryTerm + "]";
|
||||
assert primaryTerm == UNASSIGNED_PRIMARY_TERM : "shard is only started by itself: primary term [" + primaryTerm + "]";
|
||||
}
|
||||
this.message = in.readString();
|
||||
if (in.getVersion().before(Version.V_6_3_0)) {
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -1079,6 +1080,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
throw new IllegalStateException("you must set the number of shards before setting/reading primary terms");
|
||||
}
|
||||
primaryTerms = new long[numberOfShards()];
|
||||
Arrays.fill(primaryTerms, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -33,10 +33,12 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqN
|
|||
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
|
||||
/** Utility class to do efficient primary-key (only 1 doc contains the
|
||||
* given term) lookups by segment, re-using the enums. This class is
|
||||
|
@ -116,18 +118,18 @@ final class PerThreadIDVersionAndSeqNoLookup {
|
|||
if (seqNos != null && seqNos.advanceExact(docID)) {
|
||||
seqNo = seqNos.longValue();
|
||||
} else {
|
||||
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
NumericDocValues terms = context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
||||
if (terms != null && terms.advanceExact(docID)) {
|
||||
term = terms.longValue();
|
||||
} else {
|
||||
term = 0;
|
||||
term = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
|
||||
} else {
|
||||
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
term = 0;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
term = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
return new DocIdAndVersion(docID, versions.longValue(), seqNo, term, context.reader(), context.docBase);
|
||||
} else {
|
||||
|
@ -175,7 +177,7 @@ final class PerThreadIDVersionAndSeqNoLookup {
|
|||
if (seqNoDV != null && seqNoDV.advanceExact(docID)) {
|
||||
seqNo = seqNoDV.longValue();
|
||||
} else {
|
||||
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
}
|
||||
final boolean isLive = (liveDocs == null || liveDocs.get(docID));
|
||||
if (isLive) {
|
||||
|
|
|
@ -132,7 +132,8 @@ public class DiscoveryModule {
|
|||
transportService, namedWriteableRegistry, allocationService, masterService,
|
||||
() -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), hostsProvider, clusterApplier,
|
||||
Randomness.get()));
|
||||
discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier));
|
||||
discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier,
|
||||
gatewayMetaState));
|
||||
for (DiscoveryPlugin plugin : plugins) {
|
||||
plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings,
|
||||
hostsProvider, allocationService, gatewayMetaState).forEach((key, value) -> {
|
||||
|
|
|
@ -31,12 +31,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier;
|
||||
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
|
||||
import org.elasticsearch.cluster.service.ClusterApplierService;
|
||||
import org.elasticsearch.cluster.service.MasterService;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.gateway.GatewayMetaState;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Objects;
|
||||
|
@ -55,12 +57,17 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
|
|||
private volatile ClusterState clusterState;
|
||||
|
||||
public SingleNodeDiscovery(final Settings settings, final TransportService transportService,
|
||||
final MasterService masterService, final ClusterApplier clusterApplier) {
|
||||
final MasterService masterService, final ClusterApplier clusterApplier,
|
||||
final GatewayMetaState gatewayMetaState) {
|
||||
super(Objects.requireNonNull(settings));
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
this.transportService = Objects.requireNonNull(transportService);
|
||||
masterService.setClusterStateSupplier(() -> clusterState);
|
||||
this.clusterApplier = clusterApplier;
|
||||
|
||||
if (clusterApplier instanceof ClusterApplierService) {
|
||||
((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -72,7 +72,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.seqno.SeqNoStats;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
@ -106,6 +105,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
public abstract class Engine implements Closeable {
|
||||
|
||||
public static final String SYNC_COMMIT_ID = "sync_id";
|
||||
|
@ -147,7 +149,7 @@ public abstract class Engine implements Closeable {
|
|||
* 1. A primary initializes this marker once using the max_seq_no from its history, then advances when processing an update or delete.
|
||||
* 2. A replica never advances this marker by itself but only inherits from its primary (via advanceMaxSeqNoOfUpdatesOrDeletes).
|
||||
*/
|
||||
private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||
private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(UNASSIGNED_SEQ_NO);
|
||||
|
||||
protected Engine(EngineConfig engineConfig) {
|
||||
Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine");
|
||||
|
@ -425,8 +427,8 @@ public abstract class Engine implements Closeable {
|
|||
protected Result(Operation.TYPE operationType, Mapping requiredMappingUpdate) {
|
||||
this.operationType = operationType;
|
||||
this.version = Versions.NOT_FOUND;
|
||||
this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
this.term = 0L;
|
||||
this.seqNo = UNASSIGNED_SEQ_NO;
|
||||
this.term = UNASSIGNED_PRIMARY_TERM;
|
||||
this.failure = null;
|
||||
this.requiredMappingUpdate = requiredMappingUpdate;
|
||||
this.resultType = Type.MAPPING_UPDATE_REQUIRED;
|
||||
|
@ -522,7 +524,7 @@ public abstract class Engine implements Closeable {
|
|||
* use in case of the index operation failed before getting to internal engine
|
||||
**/
|
||||
public IndexResult(Exception failure, long version, long term) {
|
||||
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||
this(failure, version, term, UNASSIGNED_SEQ_NO);
|
||||
}
|
||||
|
||||
public IndexResult(Exception failure, long version, long term, long seqNo) {
|
||||
|
@ -554,7 +556,7 @@ public abstract class Engine implements Closeable {
|
|||
* use in case of the delete operation failed before getting to internal engine
|
||||
**/
|
||||
public DeleteResult(Exception failure, long version, long term) {
|
||||
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
|
||||
this(failure, version, term, UNASSIGNED_SEQ_NO, false);
|
||||
}
|
||||
|
||||
public DeleteResult(Exception failure, long version, long term, long seqNo, boolean found) {
|
||||
|
@ -1353,9 +1355,9 @@ public abstract class Engine implements Closeable {
|
|||
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
|
||||
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
|
||||
assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
|
||||
assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
|
||||
"ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
|
||||
assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) :
|
||||
assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) :
|
||||
"cas operations are only allowed if origin is primary. get [" + origin + "]";
|
||||
this.doc = doc;
|
||||
this.isRetry = isRetry;
|
||||
|
@ -1369,8 +1371,8 @@ public abstract class Engine implements Closeable {
|
|||
} // TEST ONLY
|
||||
|
||||
Index(Term uid, long primaryTerm, ParsedDocument doc, long version) {
|
||||
this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL,
|
||||
Origin.PRIMARY, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
||||
this(uid, doc, UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL,
|
||||
Origin.PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
|
||||
} // TEST ONLY
|
||||
|
||||
public ParsedDocument parsedDoc() {
|
||||
|
@ -1447,9 +1449,9 @@ public abstract class Engine implements Closeable {
|
|||
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
|
||||
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
|
||||
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
|
||||
assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
|
||||
assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
|
||||
"ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
|
||||
assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) :
|
||||
assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) :
|
||||
"cas operations are only allowed if origin is primary. get [" + origin + "]";
|
||||
this.type = Objects.requireNonNull(type);
|
||||
this.id = Objects.requireNonNull(id);
|
||||
|
@ -1458,13 +1460,13 @@ public abstract class Engine implements Closeable {
|
|||
}
|
||||
|
||||
public Delete(String type, String id, Term uid, long primaryTerm) {
|
||||
this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
Origin.PRIMARY, System.nanoTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
||||
this(type, id, uid, UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL,
|
||||
Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
|
||||
}
|
||||
|
||||
public Delete(Delete template, VersionType versionType) {
|
||||
this(template.type(), template.id(), template.uid(), template.seqNo(), template.primaryTerm(), template.version(),
|
||||
versionType, template.origin(), template.startTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
|
||||
versionType, template.origin(), template.startTime(), UNASSIGNED_SEQ_NO, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -48,6 +47,8 @@ import java.util.Objects;
|
|||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
||||
public class GetResult implements Streamable, Iterable<DocumentField>, ToXContentObject {
|
||||
|
||||
|
@ -82,9 +83,9 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
this.id = id;
|
||||
this.seqNo = seqNo;
|
||||
this.primaryTerm = primaryTerm;
|
||||
assert (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) || (seqNo >= 0 && primaryTerm >= 1) :
|
||||
assert (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) || (seqNo >= 0 && primaryTerm >= 1) :
|
||||
"seqNo: " + seqNo + " primaryTerm: " + primaryTerm;
|
||||
assert exists || (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) :
|
||||
assert exists || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) :
|
||||
"doc not found but seqNo/primaryTerm are set";
|
||||
this.version = version;
|
||||
this.exists = exists;
|
||||
|
@ -239,7 +240,7 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
}
|
||||
|
||||
public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException {
|
||||
if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node
|
||||
if (seqNo != UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node
|
||||
builder.field(_SEQ_NO, seqNo);
|
||||
builder.field(_PRIMARY_TERM, primaryTerm);
|
||||
}
|
||||
|
@ -313,8 +314,8 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
|
||||
String currentFieldName = parser.currentName();
|
||||
long version = -1;
|
||||
long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
long primaryTerm = 0;
|
||||
long seqNo = UNASSIGNED_SEQ_NO;
|
||||
long primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
Boolean found = null;
|
||||
BytesReference source = null;
|
||||
Map<String, DocumentField> fields = new HashMap<>();
|
||||
|
@ -388,8 +389,8 @@ public class GetResult implements Streamable, Iterable<DocumentField>, ToXConten
|
|||
seqNo = in.readZLong();
|
||||
primaryTerm = in.readVLong();
|
||||
} else {
|
||||
seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
primaryTerm = 0L;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
version = in.readLong();
|
||||
exists = in.readBoolean();
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.AutomatonQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
|
@ -40,6 +41,9 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.intervals.IntervalsSource;
|
||||
import org.apache.lucene.util.automaton.Automata;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -360,7 +364,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
boolean accept(int length) {
|
||||
return length >= minChars && length <= maxChars;
|
||||
return length >= minChars - 1 && length <= maxChars;
|
||||
}
|
||||
|
||||
void doXContent(XContentBuilder builder) throws IOException {
|
||||
|
@ -370,6 +374,22 @@ public class TextFieldMapper extends FieldMapper {
|
|||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) {
|
||||
if (value.length() >= minChars) {
|
||||
return super.termQuery(value, context);
|
||||
}
|
||||
List<Automaton> automata = new ArrayList<>();
|
||||
automata.add(Automata.makeString(value));
|
||||
for (int i = value.length(); i < minChars; i++) {
|
||||
automata.add(Automata.makeAnyChar());
|
||||
}
|
||||
Automaton automaton = Operations.concatenate(automata);
|
||||
AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton);
|
||||
query.setRewriteMethod(method);
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PrefixFieldType clone() {
|
||||
return new PrefixFieldType(name(), minChars, maxChars);
|
||||
|
@ -402,7 +422,6 @@ public class TextFieldMapper extends FieldMapper {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
|
||||
return Objects.hash(super.hashCode(), minChars, maxChars);
|
||||
}
|
||||
}
|
||||
|
@ -564,7 +583,7 @@ public class TextFieldMapper extends FieldMapper {
|
|||
if (prefixFieldType == null || prefixFieldType.accept(value.length()) == false) {
|
||||
return super.prefixQuery(value, method, context);
|
||||
}
|
||||
Query tq = prefixFieldType.termQuery(value, context);
|
||||
Query tq = prefixFieldType.prefixQuery(value, method, context);
|
||||
if (method == null || method == MultiTermQuery.CONSTANT_SCORE_REWRITE
|
||||
|| method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) {
|
||||
return new ConstantScoreQuery(tq);
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.intervals.IntervalIterator;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
||||
/**
|
||||
* Base class for scripts used as interval filters, see {@link IntervalsSourceProvider.IntervalFilter}
|
||||
*/
|
||||
public abstract class IntervalFilterScript {
|
||||
|
||||
public static class Interval {
|
||||
|
||||
private IntervalIterator iterator;
|
||||
|
||||
void setIterator(IntervalIterator iterator) {
|
||||
this.iterator = iterator;
|
||||
}
|
||||
|
||||
public int getStart() {
|
||||
return iterator.start();
|
||||
}
|
||||
|
||||
public int getEnd() {
|
||||
return iterator.end();
|
||||
}
|
||||
|
||||
public int getGaps() {
|
||||
return iterator.gaps();
|
||||
}
|
||||
}
|
||||
|
||||
public abstract boolean execute(Interval interval);
|
||||
|
||||
public interface Factory {
|
||||
IntervalFilterScript newInstance();
|
||||
}
|
||||
|
||||
public static final String[] PARAMETERS = new String[]{ "interval" };
|
||||
public static final ScriptContext<Factory> CONTEXT = new ScriptContext<>("interval", Factory.class);
|
||||
|
||||
}
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.intervals.FilteredIntervalsSource;
|
||||
import org.apache.lucene.search.intervals.IntervalIterator;
|
||||
import org.apache.lucene.search.intervals.Intervals;
|
||||
import org.apache.lucene.search.intervals.IntervalsSource;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -34,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -387,24 +390,59 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
}
|
||||
}
|
||||
|
||||
static class ScriptFilterSource extends FilteredIntervalsSource {
|
||||
|
||||
final IntervalFilterScript script;
|
||||
IntervalFilterScript.Interval interval = new IntervalFilterScript.Interval();
|
||||
|
||||
ScriptFilterSource(IntervalsSource in, String name, IntervalFilterScript script) {
|
||||
super("FILTER(" + name + ")", in);
|
||||
this.script = script;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accept(IntervalIterator it) {
|
||||
interval.setIterator(it);
|
||||
return script.execute(interval);
|
||||
}
|
||||
}
|
||||
|
||||
public static class IntervalFilter implements ToXContent, Writeable {
|
||||
|
||||
public static final String NAME = "filter";
|
||||
|
||||
private final String type;
|
||||
private final IntervalsSourceProvider filter;
|
||||
private final Script script;
|
||||
|
||||
public IntervalFilter(IntervalsSourceProvider filter, String type) {
|
||||
this.filter = filter;
|
||||
this.type = type.toLowerCase(Locale.ROOT);
|
||||
this.script = null;
|
||||
}
|
||||
|
||||
IntervalFilter(Script script) {
|
||||
this.script = script;
|
||||
this.type = "script";
|
||||
this.filter = null;
|
||||
}
|
||||
|
||||
public IntervalFilter(StreamInput in) throws IOException {
|
||||
this.type = in.readString();
|
||||
this.filter = in.readNamedWriteable(IntervalsSourceProvider.class);
|
||||
this.filter = in.readOptionalNamedWriteable(IntervalsSourceProvider.class);
|
||||
if (in.readBoolean()) {
|
||||
this.script = new Script(in);
|
||||
}
|
||||
else {
|
||||
this.script = null;
|
||||
}
|
||||
}
|
||||
|
||||
public IntervalsSource filter(IntervalsSource input, QueryShardContext context, MappedFieldType fieldType) throws IOException {
|
||||
if (script != null) {
|
||||
IntervalFilterScript ifs = context.getScriptService().compile(script, IntervalFilterScript.CONTEXT).newInstance();
|
||||
return new ScriptFilterSource(input, script.getIdOrCode(), ifs);
|
||||
}
|
||||
IntervalsSource filterSource = filter.getSource(context, fieldType);
|
||||
switch (type) {
|
||||
case "containing":
|
||||
|
@ -439,7 +477,14 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(type);
|
||||
out.writeNamedWriteable(filter);
|
||||
out.writeOptionalNamedWriteable(filter);
|
||||
if (script == null) {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
else {
|
||||
out.writeBoolean(true);
|
||||
script.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -458,6 +503,13 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
throw new ParsingException(parser.getTokenLocation(), "Expected [FIELD_NAME] but got [" + parser.currentToken() + "]");
|
||||
}
|
||||
String type = parser.currentName();
|
||||
if (Script.SCRIPT_PARSE_FIELD.match(type, parser.getDeprecationHandler())) {
|
||||
Script script = Script.parse(parser);
|
||||
if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Expected [END_OBJECT] but got [" + parser.currentToken() + "]");
|
||||
}
|
||||
return new IntervalFilter(script);
|
||||
}
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Expected [START_OBJECT] but got [" + parser.currentToken() + "]");
|
||||
}
|
||||
|
@ -475,4 +527,6 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -41,6 +41,11 @@ public class SequenceNumbers {
|
|||
*/
|
||||
public static final long NO_OPS_PERFORMED = -1L;
|
||||
|
||||
/**
|
||||
* Represents an unassigned primary term (e.g., when a primary shard was not yet allocated)
|
||||
*/
|
||||
public static final long UNASSIGNED_PRIMARY_TERM = 0L;
|
||||
|
||||
/**
|
||||
* Reads the sequence number stats from the commit data (maximum sequence number and local checkpoint).
|
||||
*
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.translog;
|
||||
|
||||
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -113,7 +114,7 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
|
|||
|
||||
protected Translog.Operation read(BufferedChecksumStreamInput inStream) throws IOException {
|
||||
final Translog.Operation op = Translog.readOperation(inStream);
|
||||
if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) {
|
||||
if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) {
|
||||
throw new TranslogCorruptedException(
|
||||
path.toString(),
|
||||
"operation's term is newer than translog header term; " +
|
||||
|
|
|
@ -34,6 +34,8 @@ import java.io.IOException;
|
|||
import java.nio.channels.FileChannel;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
|
||||
/**
|
||||
* Each translog file is started with a translog header then followed by translog operations.
|
||||
*/
|
||||
|
@ -45,8 +47,6 @@ final class TranslogHeader {
|
|||
public static final int VERSION_PRIMARY_TERM = 3; // added primary term
|
||||
public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM;
|
||||
|
||||
public static final long UNKNOWN_PRIMARY_TERM = 0L;
|
||||
|
||||
private final String translogUUID;
|
||||
private final long primaryTerm;
|
||||
private final int headerSizeInBytes;
|
||||
|
@ -146,7 +146,7 @@ final class TranslogHeader {
|
|||
primaryTerm = in.readLong();
|
||||
} else {
|
||||
assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]";
|
||||
primaryTerm = UNKNOWN_PRIMARY_TERM;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
}
|
||||
// Verify the checksum
|
||||
if (version >= VERSION_PRIMARY_TERM) {
|
||||
|
|
|
@ -207,7 +207,7 @@ public class TruncateTranslogAction {
|
|||
*/
|
||||
private static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException {
|
||||
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) {
|
||||
TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM);
|
||||
TranslogHeader header = new TranslogHeader(translogUUID, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
|
||||
header.write(fc);
|
||||
return header.sizeInBytes();
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.script;
|
|||
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.IntervalFilterScript;
|
||||
import org.elasticsearch.plugins.ScriptPlugin;
|
||||
import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript;
|
||||
|
||||
|
@ -60,7 +61,8 @@ public class ScriptModule {
|
|||
ScriptedMetricAggContexts.InitScript.CONTEXT,
|
||||
ScriptedMetricAggContexts.MapScript.CONTEXT,
|
||||
ScriptedMetricAggContexts.CombineScript.CONTEXT,
|
||||
ScriptedMetricAggContexts.ReduceScript.CONTEXT
|
||||
ScriptedMetricAggContexts.ReduceScript.CONTEXT,
|
||||
IntervalFilterScript.CONTEXT
|
||||
).collect(Collectors.toMap(c -> c.name, Function.identity()));
|
||||
}
|
||||
|
||||
|
|
|
@ -142,7 +142,8 @@ public class RestoreInfo implements ToXContentObject, Streamable {
|
|||
return builder;
|
||||
}
|
||||
|
||||
private static final ObjectParser<RestoreInfo, Void> PARSER = new ObjectParser<>(RestoreInfo.class.getName(), true, RestoreInfo::new);
|
||||
private static final ObjectParser<RestoreInfo, Void> PARSER = new ObjectParser<>(RestoreInfo.class.getName(),
|
||||
true, RestoreInfo::new);
|
||||
|
||||
static {
|
||||
ObjectParser<RestoreInfo, Void> shardsParser = new ObjectParser<>("shards", true, null);
|
||||
|
|
|
@ -103,8 +103,8 @@ import static org.elasticsearch.snapshots.SnapshotUtils.filterIndices;
|
|||
* First {@link #restoreSnapshot(RestoreRequest, org.elasticsearch.action.ActionListener)}
|
||||
* method reads information about snapshot and metadata from repository. In update cluster state task it checks restore
|
||||
* preconditions, restores global state if needed, creates {@link RestoreInProgress} record with list of shards that needs
|
||||
* to be restored and adds this shard to the routing table using {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)}
|
||||
* method.
|
||||
* to be restored and adds this shard to the routing table using
|
||||
* {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)} method.
|
||||
* <p>
|
||||
* Individual shards are getting restored as part of normal recovery process in
|
||||
* {@link IndexShard#restoreFromRepository(Repository)} )}
|
||||
|
@ -256,15 +256,18 @@ public class RestoreService implements ClusterStateApplier {
|
|||
for (Map.Entry<String, String> indexEntry : indices.entrySet()) {
|
||||
String index = indexEntry.getValue();
|
||||
boolean partial = checkPartial(index);
|
||||
SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index);
|
||||
SnapshotRecoverySource recoverySource =
|
||||
new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index);
|
||||
String renamedIndexName = indexEntry.getKey();
|
||||
IndexMetaData snapshotIndexMetaData = metaData.index(index);
|
||||
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
|
||||
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData,
|
||||
request.indexSettings, request.ignoreIndexSettings);
|
||||
try {
|
||||
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData,
|
||||
minIndexCompatibilityVersion);
|
||||
} catch (Exception ex) {
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be " +
|
||||
"upgraded", ex);
|
||||
}
|
||||
// Check that the index is closed or doesn't exist
|
||||
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
|
||||
|
@ -274,9 +277,16 @@ public class RestoreService implements ClusterStateApplier {
|
|||
// Index doesn't exist - create it and start recovery
|
||||
// Make sure that the index we are about to create has a validate name
|
||||
MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
|
||||
createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), currentState, false);
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
|
||||
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
|
||||
createIndexService.validateIndexSettings(renamedIndexName,
|
||||
snapshotIndexMetaData.getSettings(),
|
||||
currentState,
|
||||
false);
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData)
|
||||
.state(IndexMetaData.State.OPEN)
|
||||
.index(renamedIndexName);
|
||||
indexMdBuilder.settings(Settings.builder()
|
||||
.put(snapshotIndexMetaData.getSettings())
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
|
||||
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
|
||||
// Remove all aliases - they shouldn't be restored
|
||||
indexMdBuilder.removeAllAliases();
|
||||
|
@ -296,10 +306,13 @@ public class RestoreService implements ClusterStateApplier {
|
|||
} else {
|
||||
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
|
||||
// Index exists and it's closed - open it in metadata and start recovery
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData)
|
||||
.state(IndexMetaData.State.OPEN);
|
||||
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
|
||||
indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), currentIndexMetaData.getMappingVersion() + 1));
|
||||
indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), currentIndexMetaData.getSettingsVersion() + 1));
|
||||
indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(),
|
||||
currentIndexMetaData.getMappingVersion() + 1));
|
||||
indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(),
|
||||
currentIndexMetaData.getSettingsVersion() + 1));
|
||||
if (!request.includeAliases()) {
|
||||
// Remove all snapshot aliases
|
||||
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
|
||||
|
@ -314,7 +327,10 @@ public class RestoreService implements ClusterStateApplier {
|
|||
aliases.add(alias.value);
|
||||
}
|
||||
}
|
||||
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
|
||||
indexMdBuilder.settings(Settings.builder()
|
||||
.put(snapshotIndexMetaData.getSettings())
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID,
|
||||
currentIndexMetaData.getIndexUUID()));
|
||||
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
|
||||
rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
|
||||
blocks.updateBlocks(updatedIndexMetaData);
|
||||
|
@ -324,9 +340,12 @@ public class RestoreService implements ClusterStateApplier {
|
|||
|
||||
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
|
||||
if (!ignoreShards.contains(shard)) {
|
||||
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
|
||||
shardsBuilder.put(new ShardId(renamedIndex, shard),
|
||||
new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
|
||||
} else {
|
||||
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
|
||||
shardsBuilder.put(new ShardId(renamedIndex, shard),
|
||||
new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(),
|
||||
RestoreInProgress.State.FAILURE));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +409,9 @@ public class RestoreService implements ClusterStateApplier {
|
|||
private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
|
||||
for (Map.Entry<String, String> renamedIndex : renamedIndices.entrySet()) {
|
||||
if (aliases.contains(renamedIndex.getKey())) {
|
||||
throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
|
||||
throw new SnapshotRestoreException(snapshot,
|
||||
"cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of " +
|
||||
"conflict with an alias with the same name");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -409,28 +430,34 @@ public class RestoreService implements ClusterStateApplier {
|
|||
if (request.partial()) {
|
||||
return true;
|
||||
} else {
|
||||
throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
|
||||
throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot " +
|
||||
"restore");
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
|
||||
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData,
|
||||
String renamedIndex, boolean partial) {
|
||||
// Index exist - checking that it's closed
|
||||
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
|
||||
// TODO: Enable restore for open indices
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index with same name already exists in the cluster. " +
|
||||
"Either close or delete the existing index or restore the index under a different name by providing a rename pattern and replacement name");
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index " +
|
||||
"with same name already exists in the cluster. Either close or delete the existing index or restore the " +
|
||||
"index under a different name by providing a rename pattern and replacement name");
|
||||
}
|
||||
// Index exist - checking if it's partial restore
|
||||
if (partial) {
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such " +
|
||||
"index already exists");
|
||||
}
|
||||
// Make sure that the number of shards is the same. That's the only thing that we cannot change
|
||||
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
|
||||
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
|
||||
"] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
|
||||
throw new SnapshotRestoreException(snapshot,
|
||||
"cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shards " +
|
||||
"from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" +
|
||||
snapshotIndexMetaData.getNumberOfShards() + "] shards");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -442,7 +469,10 @@ public class RestoreService implements ClusterStateApplier {
|
|||
if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
|
||||
return indexMetaData;
|
||||
}
|
||||
Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
Settings normalizedChangeSettings = Settings.builder()
|
||||
.put(changeSettings)
|
||||
.normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX)
|
||||
.build();
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Set<String> keyFilters = new HashSet<>();
|
||||
|
@ -504,7 +534,8 @@ public class RestoreService implements ClusterStateApplier {
|
|||
|
||||
|
||||
} catch (Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot",
|
||||
request.repositoryName + ":" + request.snapshotName), e);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
@ -521,12 +552,14 @@ public class RestoreService implements ClusterStateApplier {
|
|||
if (shardsBuilder == null) {
|
||||
shardsBuilder = ImmutableOpenMap.builder(entry.shards());
|
||||
}
|
||||
shardsBuilder.put(shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted"));
|
||||
shardsBuilder.put(shardId,
|
||||
new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted"));
|
||||
}
|
||||
}
|
||||
if (shardsBuilder != null) {
|
||||
ImmutableOpenMap<ShardId, ShardRestoreStatus> shards = shardsBuilder.build();
|
||||
builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(), overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards));
|
||||
builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(),
|
||||
overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards));
|
||||
} else {
|
||||
builder.add(entry);
|
||||
}
|
||||
|
@ -602,8 +635,8 @@ public class RestoreService implements ClusterStateApplier {
|
|||
initializedShard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
|
||||
changes(unassignedShard.recoverySource()).shards.put(
|
||||
unassignedShard.shardId(),
|
||||
new ShardRestoreStatus(null,
|
||||
RestoreInProgress.State.FAILURE, "recovery source type changed from snapshot to " + initializedShard.recoverySource())
|
||||
new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE,
|
||||
"recovery source type changed from snapshot to " + initializedShard.recoverySource())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -672,7 +705,8 @@ public class RestoreService implements ClusterStateApplier {
|
|||
return null;
|
||||
}
|
||||
|
||||
static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor<CleanRestoreStateTaskExecutor.Task>, ClusterStateTaskListener {
|
||||
static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor<CleanRestoreStateTaskExecutor.Task>,
|
||||
ClusterStateTaskListener {
|
||||
|
||||
static class Task {
|
||||
final String uuid;
|
||||
|
|
|
@ -232,16 +232,18 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private final List<SnapshotShardFailure> shardFailures;
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, SnapshotState state) {
|
||||
this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0, Collections.emptyList(), null);
|
||||
this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0,
|
||||
Collections.emptyList(), null);
|
||||
}
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, SnapshotState state, Version version) {
|
||||
this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0, Collections.emptyList(), null);
|
||||
this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0,
|
||||
Collections.emptyList(), null);
|
||||
}
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, long startTime, Boolean includeGlobalState) {
|
||||
this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0,
|
||||
Collections.emptyList(), includeGlobalState);
|
||||
this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L,
|
||||
0, 0, Collections.emptyList(), includeGlobalState);
|
||||
}
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, long startTime, String reason, long endTime,
|
||||
|
@ -306,7 +308,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
public static SnapshotInfo incompatible(SnapshotId snapshotId) {
|
||||
return new SnapshotInfo(snapshotId, Collections.emptyList(), SnapshotState.INCOMPATIBLE,
|
||||
"the snapshot is incompatible with the current version of Elasticsearch and its exact version is unknown",
|
||||
null, 0L, 0L, 0, 0, Collections.emptyList(), null);
|
||||
null, 0L, 0L, 0, 0,
|
||||
Collections.emptyList(), null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -116,8 +116,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
private final UpdateSnapshotStatusAction updateSnapshotStatusHandler;
|
||||
|
||||
@Inject
|
||||
public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService,
|
||||
ThreadPool threadPool, TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings);
|
||||
this.indicesService = indicesService;
|
||||
|
@ -188,7 +188,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
for (Map.Entry<Snapshot, Map<ShardId, IndexShardSnapshotStatus>> snapshotShards : snapshotShardsMap.entrySet()) {
|
||||
Map<ShardId, IndexShardSnapshotStatus> shards = snapshotShards.getValue();
|
||||
if (shards.containsKey(shardId)) {
|
||||
logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId());
|
||||
logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]",
|
||||
shardId, snapshotShards.getKey().getSnapshotId());
|
||||
shards.get(shardId).abortIfNotCompleted("shard is closing, aborting");
|
||||
}
|
||||
}
|
||||
|
@ -337,7 +338,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e);
|
||||
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard",
|
||||
shardId, snapshot), e);
|
||||
failure.set(e);
|
||||
}
|
||||
|
||||
|
@ -367,7 +369,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
* @param snapshot snapshot
|
||||
* @param snapshotStatus snapshot status
|
||||
*/
|
||||
private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, final IndexShardSnapshotStatus snapshotStatus) {
|
||||
private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId,
|
||||
final IndexShardSnapshotStatus snapshotStatus) {
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary");
|
||||
|
@ -526,7 +529,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
*
|
||||
* @param request update shard status request
|
||||
*/
|
||||
private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request, ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) {
|
||||
private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request,
|
||||
ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) {
|
||||
logger.trace("received updated snapshot restore state [{}]", request);
|
||||
clusterService.submitStateUpdateTask(
|
||||
"update snapshot state",
|
||||
|
@ -549,7 +553,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
class SnapshotStateExecutor implements ClusterStateTaskExecutor<UpdateIndexShardSnapshotStatusRequest> {
|
||||
|
||||
@Override
|
||||
public ClusterTasksResult<UpdateIndexShardSnapshotStatusRequest> execute(ClusterState currentState, List<UpdateIndexShardSnapshotStatusRequest> tasks) throws Exception {
|
||||
public ClusterTasksResult<UpdateIndexShardSnapshotStatusRequest>
|
||||
execute(ClusterState currentState, List<UpdateIndexShardSnapshotStatusRequest> tasks) throws Exception {
|
||||
final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshots != null) {
|
||||
int changedCount = 0;
|
||||
|
@ -560,7 +565,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
|
||||
for (UpdateIndexShardSnapshotStatusRequest updateSnapshotState : tasks) {
|
||||
if (entry.snapshot().equals(updateSnapshotState.snapshot())) {
|
||||
logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state());
|
||||
logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(),
|
||||
updateSnapshotState.shardId(), updateSnapshotState.status().state());
|
||||
if (updated == false) {
|
||||
shards.putAll(entry.shards());
|
||||
updated = true;
|
||||
|
@ -588,7 +594,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
if (changedCount > 0) {
|
||||
logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount);
|
||||
|
||||
final SnapshotsInProgress updatedSnapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()]));
|
||||
final SnapshotsInProgress updatedSnapshots =
|
||||
new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()]));
|
||||
return ClusterTasksResult.<UpdateIndexShardSnapshotStatusRequest>builder().successes(tasks).build(
|
||||
ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build());
|
||||
}
|
||||
|
@ -606,8 +613,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(
|
||||
SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new
|
||||
SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool,
|
||||
actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -622,7 +629,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) throws Exception {
|
||||
protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state,
|
||||
ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) throws Exception {
|
||||
innerUpdateSnapshotState(request, listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -92,16 +92,20 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed;
|
|||
* <p>
|
||||
* A typical snapshot creating process looks like this:
|
||||
* <ul>
|
||||
* <li>On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots is currently running
|
||||
* and registers the new snapshot in cluster state</li>
|
||||
* <li>When cluster state is updated the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method
|
||||
* kicks in and initializes the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state</li>
|
||||
* <li>On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots
|
||||
* is currently running and registers the new snapshot in cluster state</li>
|
||||
* <li>When cluster state is updated
|
||||
* the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method kicks in and initializes
|
||||
* the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state</li>
|
||||
* <li>Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes
|
||||
* start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method</li>
|
||||
* <li>Once shard snapshot is created data node updates state of the shard in the cluster state using the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method</li>
|
||||
* <li>When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot as completed</li>
|
||||
* <li>Once shard snapshot is created data node updates state of the shard in the cluster state using
|
||||
* the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method</li>
|
||||
* <li>When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot
|
||||
* as completed</li>
|
||||
* <li>After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository,
|
||||
* notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state</li>
|
||||
* notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls
|
||||
* {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state</li>
|
||||
* </ul>
|
||||
*/
|
||||
public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier {
|
||||
|
@ -118,7 +122,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
private final CopyOnWriteArrayList<SnapshotCompletionListener> snapshotCompletionListeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) {
|
||||
public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
RepositoriesService repositoriesService, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
|
@ -253,7 +258,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshots == null || snapshots.entries().isEmpty()) {
|
||||
// Store newSnapshot here to be processed in clusterStateProcessed
|
||||
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
|
||||
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState,
|
||||
request.indicesOptions(), request.indices()));
|
||||
logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices);
|
||||
List<IndexId> snapshotIndices = repositoryData.resolveNewIndices(indices);
|
||||
newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId),
|
||||
|
@ -393,9 +399,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
|
||||
if (entry.state() != State.ABORTED) {
|
||||
// Replace the snapshot that was just intialized
|
||||
ImmutableOpenMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards = shards(currentState, entry.indices());
|
||||
ImmutableOpenMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards =
|
||||
shards(currentState, entry.indices());
|
||||
if (!partial) {
|
||||
Tuple<Set<String>, Set<String>> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData());
|
||||
Tuple<Set<String>, Set<String>> indicesWithMissingShards = indicesWithMissingShards(shards,
|
||||
currentState.metaData());
|
||||
Set<String> missing = indicesWithMissingShards.v1();
|
||||
Set<String> closed = indicesWithMissingShards.v2();
|
||||
if (missing.isEmpty() == false || closed.isEmpty() == false) {
|
||||
|
@ -437,8 +445,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot",
|
||||
snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e,
|
||||
new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -471,8 +481,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
|
||||
logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]",
|
||||
snapshot.snapshot().getSnapshotId()), e);
|
||||
removeSnapshotFromClusterState(snapshot.snapshot(), null, e,
|
||||
new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -484,7 +496,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
private final CreateSnapshotListener userCreateSnapshotListener;
|
||||
private final Exception e;
|
||||
|
||||
CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated, CreateSnapshotListener userCreateSnapshotListener, Exception e) {
|
||||
CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated,
|
||||
CreateSnapshotListener userCreateSnapshotListener, Exception e) {
|
||||
this.snapshot = snapshot;
|
||||
this.snapshotCreated = snapshotCreated;
|
||||
this.userCreateSnapshotListener = userCreateSnapshotListener;
|
||||
|
@ -520,7 +533,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
snapshot.includeGlobalState());
|
||||
} catch (Exception inner) {
|
||||
inner.addSuppressed(exception);
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository",
|
||||
snapshot.snapshot()), inner);
|
||||
}
|
||||
}
|
||||
userCreateSnapshotListener.onFailure(e);
|
||||
|
@ -744,8 +758,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
} else {
|
||||
// TODO: Restart snapshot on another node?
|
||||
snapshotChanged = true;
|
||||
logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId());
|
||||
shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
|
||||
logger.warn("failing snapshot of shard [{}] on closed node [{}]",
|
||||
shardEntry.key, shardStatus.nodeId());
|
||||
shards.put(shardEntry.key,
|
||||
new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -808,7 +824,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) {
|
||||
SnapshotsInProgress.Entry updatedSnapshot = snapshot;
|
||||
if (snapshot.state() == State.STARTED) {
|
||||
ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards = processWaitingShards(snapshot.shards(), routingTable);
|
||||
ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards = processWaitingShards(snapshot.shards(),
|
||||
routingTable);
|
||||
if (shards != null) {
|
||||
changed = true;
|
||||
if (!snapshot.state().completed() && completed(shards.values())) {
|
||||
|
@ -831,7 +848,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
||||
logger.warn(() ->
|
||||
new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -929,12 +947,14 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
* @param shards list of shard statuses
|
||||
* @return list of failed and closed indices
|
||||
*/
|
||||
private Tuple<Set<String>, Set<String>> indicesWithMissingShards(ImmutableOpenMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards, MetaData metaData) {
|
||||
private Tuple<Set<String>, Set<String>> indicesWithMissingShards(
|
||||
ImmutableOpenMap<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shards, MetaData metaData) {
|
||||
Set<String> missing = new HashSet<>();
|
||||
Set<String> closed = new HashSet<>();
|
||||
for (ObjectObjectCursor<ShardId, SnapshotsInProgress.ShardSnapshotStatus> entry : shards) {
|
||||
if (entry.value.state() == State.MISSING) {
|
||||
if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
|
||||
if (metaData.hasIndex(entry.key.getIndex().getName()) &&
|
||||
metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
|
||||
closed.add(entry.key.getIndex().getName());
|
||||
} else {
|
||||
missing.add(entry.key.getIndex().getName());
|
||||
|
@ -1130,7 +1150,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
|
||||
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
|
||||
throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted");
|
||||
throw new ConcurrentSnapshotExecutionException(snapshot,
|
||||
"cannot delete - another snapshot is currently being deleted");
|
||||
}
|
||||
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
|
||||
if (restoreInProgress != null) {
|
||||
|
@ -1236,7 +1257,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
listener, true);
|
||||
|
||||
} catch (Exception ex) {
|
||||
logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
|
||||
logger.warn(() ->
|
||||
new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
@ -1384,7 +1406,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
IndexMetaData indexMetaData = metaData.index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
// The index was deleted before we managed to start the snapshot - mark it as missing.
|
||||
builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0), new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index"));
|
||||
builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0),
|
||||
new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index"));
|
||||
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
ShardId shardId = new ShardId(indexMetaData.getIndex(), i);
|
||||
|
@ -1397,17 +1420,22 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
if (indexRoutingTable != null) {
|
||||
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
|
||||
if (primary == null || !primary.assignedToNode()) {
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
|
||||
builder.put(shardId,
|
||||
new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
|
||||
} else if (primary.relocating() || primary.initializing()) {
|
||||
// The WAITING state was introduced in V1.2.0 - don't use it if there are nodes with older version in the cluster
|
||||
// The WAITING state was introduced in V1.2.0 -
|
||||
// don't use it if there are nodes with older version in the cluster
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING));
|
||||
} else if (!primary.started()) {
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, "primary shard hasn't been started yet"));
|
||||
builder.put(shardId,
|
||||
new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING,
|
||||
"primary shard hasn't been started yet"));
|
||||
} else {
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId()));
|
||||
}
|
||||
} else {
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing routing table"));
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING,
|
||||
"missing routing table"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,7 +111,6 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
.put(super.nodeSettings(nodeOrdinal))
|
||||
// manual collection or upon cluster forming.
|
||||
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
|
||||
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s")
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -120,6 +119,11 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class);
|
||||
}
|
||||
|
||||
private void setClusterInfoTimeout(String timeValue) {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), timeValue).build()));
|
||||
}
|
||||
|
||||
public void testClusterInfoServiceCollectsInformation() throws Exception {
|
||||
internalCluster().startNodes(2);
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
|
@ -204,6 +208,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
setClusterInfoTimeout("1s");
|
||||
// timeouts shouldn't clear the info
|
||||
timeout.set(true);
|
||||
info = infoService.refresh();
|
||||
|
@ -237,6 +242,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
|
||||
// check we recover
|
||||
blockingActionFilter.blockActions();
|
||||
setClusterInfoTimeout("15s");
|
||||
info = infoService.refresh();
|
||||
assertNotNull("info should not be null", info);
|
||||
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2));
|
||||
|
|
|
@ -105,13 +105,13 @@ import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERV
|
|||
import static org.elasticsearch.node.Node.NODE_NAME_SETTING;
|
||||
import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME;
|
||||
import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
|
@ -946,9 +946,10 @@ public class CoordinatorTests extends ESTestCase {
|
|||
cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "committing setting update");
|
||||
|
||||
leader.disconnect();
|
||||
cluster.runFor(defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection");
|
||||
cluster.runFor(defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING) + defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING)
|
||||
+ DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection");
|
||||
|
||||
assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), contains(expectedBlock));
|
||||
assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), hasItem(expectedBlock));
|
||||
|
||||
// TODO reboot the leader and verify that the same block is applied when it restarts
|
||||
}
|
||||
|
|
|
@ -25,7 +25,9 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes
|
|||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.SnapshotDeletionsInProgress;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||
import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -72,7 +74,6 @@ public class SnapshotDisruptionIT extends ESIntegTestCase {
|
|||
.build();
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36779")
|
||||
public void testDisruptionOnSnapshotInitialization() throws Exception {
|
||||
final String idxName = "test";
|
||||
final List<String> allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3);
|
||||
|
@ -133,11 +134,15 @@ public class SnapshotDisruptionIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> wait until the snapshot is done");
|
||||
assertBusy(() -> {
|
||||
SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(false).get().getState()
|
||||
.custom(SnapshotsInProgress.TYPE);
|
||||
ClusterState state = dataNodeClient().admin().cluster().prepareState().get().getState();
|
||||
SnapshotsInProgress snapshots = state.custom(SnapshotsInProgress.TYPE);
|
||||
SnapshotDeletionsInProgress snapshotDeletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE);
|
||||
if (snapshots != null && snapshots.entries().size() > 0) {
|
||||
logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state());
|
||||
fail("Snapshot is still running");
|
||||
} else if (snapshotDeletionsInProgress != null && snapshotDeletionsInProgress.hasDeletionsInProgress()) {
|
||||
logger.info("Current snapshot deletion state [{}]", snapshotDeletionsInProgress);
|
||||
fail("Snapshot deletion is still running");
|
||||
} else {
|
||||
logger.info("Snapshot is no longer in the cluster state");
|
||||
}
|
||||
|
|
|
@ -167,4 +167,10 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testStatePersistence() throws Exception {
|
||||
createIndex("test");
|
||||
internalCluster().fullRestart();
|
||||
assertTrue(client().admin().indices().prepareExists("test").get().isExists());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class SingleNodeDiscoveryTests extends ESTestCase {
|
|||
clusterState.set(clusterStateSupplier.get());
|
||||
listener.onSuccess(source);
|
||||
}
|
||||
});
|
||||
}, null);
|
||||
discovery.start();
|
||||
discovery.startInitialJoin();
|
||||
final DiscoveryNodes nodes = clusterState.get().nodes();
|
||||
|
|
|
@ -172,6 +172,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANS
|
|||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY;
|
||||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
|
||||
import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
|
@ -1801,7 +1802,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
int opsPerformed = 0;
|
||||
long lastOpVersion = currentOpVersion;
|
||||
long lastOpSeqNo = UNASSIGNED_SEQ_NO;
|
||||
long lastOpTerm = 0;
|
||||
long lastOpTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
final AtomicLong currentTerm = new AtomicLong(1);
|
||||
BiFunction<Long, Engine.Index, Engine.Index> indexWithVersion = (version, index) -> new Engine.Index(index.uid(), index.parsedDoc(),
|
||||
UNASSIGNED_SEQ_NO, currentTerm.get(), version, index.versionType(), index.origin(), index.startTime(),
|
||||
|
@ -1900,7 +1901,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
docDeleted = true;
|
||||
lastOpVersion = result.getVersion();
|
||||
lastOpSeqNo = UNASSIGNED_SEQ_NO;
|
||||
lastOpTerm = 0;
|
||||
lastOpTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
opsPerformed++;
|
||||
}
|
||||
}
|
||||
|
@ -4309,7 +4310,7 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
final long seqNo;
|
||||
DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid());
|
||||
if (docIdAndSeqNo == null) {
|
||||
primaryTerm = 0;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
} else {
|
||||
seqNo = docIdAndSeqNo.seqNo;
|
||||
|
|
|
@ -44,6 +44,7 @@ import static java.util.Collections.singletonMap;
|
|||
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
import static org.elasticsearch.index.get.DocumentFieldTests.randomDocumentField;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
|
@ -209,7 +210,7 @@ public class GetResultTests extends ESTestCase {
|
|||
}
|
||||
} else {
|
||||
seqNo = UNASSIGNED_SEQ_NO;
|
||||
primaryTerm = 0;
|
||||
primaryTerm = UNASSIGNED_PRIMARY_TERM;
|
||||
version = -1;
|
||||
exists = false;
|
||||
}
|
||||
|
|
|
@ -31,10 +31,8 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -71,10 +69,8 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
||||
|
@ -817,18 +813,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testIndexPrefixMapping() throws IOException {
|
||||
|
||||
QueryShardContext queryShardContext = indexService.newQueryShardContext(
|
||||
randomInt(20), null, () -> {
|
||||
throw new UnsupportedOperationException();
|
||||
}, null);
|
||||
|
||||
{
|
||||
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties").startObject("field")
|
||||
.field("type", "text")
|
||||
.field("analyzer", "standard")
|
||||
.startObject("index_prefixes")
|
||||
.field("min_chars", 1)
|
||||
.field("min_chars", 2)
|
||||
.field("max_chars", 10)
|
||||
.endObject()
|
||||
.endObject().endObject()
|
||||
|
@ -837,16 +828,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
|
||||
assertEquals(mapping, mapper.mappingSource().toString());
|
||||
|
||||
assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=1:10"));
|
||||
|
||||
FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field");
|
||||
MappedFieldType fieldType = fieldMapper.fieldType;
|
||||
|
||||
Query q = fieldType.prefixQuery("goin", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
|
||||
assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q);
|
||||
q = fieldType.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q);
|
||||
assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:10"));
|
||||
|
||||
ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference
|
||||
.bytes(XContentFactory.jsonBuilder()
|
||||
|
@ -870,17 +852,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
CompressedXContent json = new CompressedXContent(mapping);
|
||||
DocumentMapper mapper = parser.parse("type", json);
|
||||
|
||||
FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field");
|
||||
MappedFieldType fieldType = fieldMapper.fieldType;
|
||||
assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:5"));
|
||||
|
||||
Query q1 = fieldType.prefixQuery("g", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
assertThat(q1, instanceOf(PrefixQuery.class));
|
||||
Query q2 = fieldType.prefixQuery("go", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
assertThat(q2, instanceOf(ConstantScoreQuery.class));
|
||||
Query q5 = fieldType.prefixQuery("going", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
assertThat(q5, instanceOf(ConstantScoreQuery.class));
|
||||
Query q6 = fieldType.prefixQuery("goings", CONSTANT_SCORE_REWRITE, queryShardContext);
|
||||
assertThat(q6, instanceOf(PrefixQuery.class));
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -898,10 +871,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject().endObject()
|
||||
.endObject().endObject());
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
||||
indexService.mapperService()
|
||||
.merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE);
|
||||
});
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
|
||||
indexService.mapperService().merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE));
|
||||
assertThat(e.getMessage(), containsString("Field [field._index_prefix] is defined twice in [type]"));
|
||||
|
||||
}
|
||||
|
|
|
@ -20,11 +20,18 @@ package org.elasticsearch.index.mapper;
|
|||
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.AutomatonQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.search.TermInSetQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.automaton.Automata;
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -32,6 +39,8 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE;
|
||||
|
||||
public class TextFieldTypeTests extends FieldTypeTestCase {
|
||||
@Override
|
||||
protected MappedFieldType createDefaultFieldType() {
|
||||
|
@ -143,4 +152,21 @@ public class TextFieldTypeTests extends FieldTypeTestCase {
|
|||
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true));
|
||||
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testIndexPrefixes() {
|
||||
TextFieldMapper.TextFieldType ft = new TextFieldMapper.TextFieldType();
|
||||
ft.setName("field");
|
||||
ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType("field._index_prefix", 2, 10));
|
||||
|
||||
Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, null);
|
||||
assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q);
|
||||
|
||||
q = ft.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, null);
|
||||
assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q);
|
||||
|
||||
q = ft.prefixQuery("g", CONSTANT_SCORE_REWRITE, null);
|
||||
Automaton automaton
|
||||
= Operations.concatenate(Arrays.asList(Automata.makeChar('g'), Automata.makeAnyChar()));
|
||||
assertEquals(new ConstantScoreQuery(new AutomatonQuery(new Term("field._index_prefix", "g*"), automaton)), q);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,11 +25,16 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.intervals.IntervalQuery;
|
||||
import org.apache.lucene.search.intervals.Intervals;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.AbstractQueryTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -277,4 +282,48 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase<IntervalQue
|
|||
});
|
||||
assertThat(e.getMessage(), equalTo("Only one interval rule can be specified, found [match] and [all_of]"));
|
||||
}
|
||||
|
||||
public void testScriptFilter() throws IOException {
|
||||
|
||||
IntervalFilterScript.Factory factory = () -> new IntervalFilterScript() {
|
||||
@Override
|
||||
public boolean execute(Interval interval) {
|
||||
return interval.getStart() > 3;
|
||||
}
|
||||
};
|
||||
|
||||
ScriptService scriptService = new ScriptService(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap()){
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public <FactoryType> FactoryType compile(Script script, ScriptContext<FactoryType> context) {
|
||||
assertEquals(IntervalFilterScript.CONTEXT, context);
|
||||
assertEquals(new Script("interval.start > 3"), script);
|
||||
return (FactoryType) factory;
|
||||
}
|
||||
};
|
||||
|
||||
QueryShardContext baseContext = createShardContext();
|
||||
QueryShardContext context = new QueryShardContext(baseContext.getShardId(), baseContext.getIndexSettings(),
|
||||
null, null, baseContext.getMapperService(), null,
|
||||
scriptService,
|
||||
null, null, null, null, null, null);
|
||||
|
||||
String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " +
|
||||
"\"match\" : { " +
|
||||
" \"query\" : \"term1\"," +
|
||||
" \"filter\" : { " +
|
||||
" \"script\" : { " +
|
||||
" \"source\" : \"interval.start > 3\" } } } } } }";
|
||||
|
||||
IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json);
|
||||
Query q = builder.toQuery(context);
|
||||
|
||||
|
||||
IntervalQuery expected = new IntervalQuery(STRING_FIELD_NAME,
|
||||
new IntervalsSourceProvider.ScriptFilterSource(Intervals.term("term1"), "interval.start > 3", null));
|
||||
assertEquals(expected, q);
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ public class TranslogHeaderTests extends ESTestCase {
|
|||
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
|
||||
final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel);
|
||||
assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID));
|
||||
assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM));
|
||||
assertThat(inHeader.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM));
|
||||
assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position()));
|
||||
}
|
||||
expectThrows(TranslogCorruptedException.class, () -> {
|
||||
|
|
|
@ -367,7 +367,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
logger.info("--> start 2 nodes");
|
||||
Client client = client();
|
||||
|
||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
|
||||
.put("number_of_replicas", 0)));
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
|
@ -392,7 +393,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
|
||||
|
||||
logger.info("--> snapshot");
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setWaitForCompletion(false)
|
||||
.setIndices("test-idx")
|
||||
.get();
|
||||
|
||||
logger.info("--> waiting for block to kick in");
|
||||
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
|
||||
|
@ -415,7 +419,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
nodes.add(internalCluster().startNode());
|
||||
Client client = client();
|
||||
|
||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
|
||||
.put("number_of_replicas", 0)));
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
|
@ -443,7 +448,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
|
||||
int numberOfFilesBeforeSnapshot = numberOfFiles(repo);
|
||||
logger.info("--> snapshot");
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setWaitForCompletion(false)
|
||||
.setIndices("test-idx")
|
||||
.get();
|
||||
|
||||
logger.info("--> waiting for block to kick in");
|
||||
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
|
||||
|
@ -509,7 +517,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
|||
ensureGreen("test-idx-all");
|
||||
|
||||
logger.info("--> create an index that will be closed");
|
||||
assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4)
|
||||
.put("number_of_replicas", 0)));
|
||||
ensureGreen("test-idx-closed");
|
||||
|
||||
logger.info("--> indexing some data into test-idx-all");
|
||||
|
|
|
@ -1345,7 +1345,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
|
||||
|
||||
logger.info("--> make sure snapshot doesn't exist");
|
||||
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
|
||||
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo")
|
||||
.addSnapshots("test-snap-1"), SnapshotMissingException.class);
|
||||
|
||||
for (String index : indices) {
|
||||
assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId())));
|
||||
|
@ -1384,7 +1385,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
|
||||
|
||||
logger.info("--> make sure snapshot doesn't exist");
|
||||
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
|
||||
assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo")
|
||||
.addSnapshots("test-snap-1"), SnapshotMissingException.class);
|
||||
}
|
||||
|
||||
public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception {
|
||||
|
@ -2014,7 +2016,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> waiting for block to kick in");
|
||||
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
|
||||
|
||||
logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode);
|
||||
logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot",
|
||||
blockedNode);
|
||||
SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet();
|
||||
assertThat(response.getSnapshots().size(), equalTo(1));
|
||||
SnapshotStatus snapshotStatus = response.getSnapshots().get(0);
|
||||
|
|
|
@ -346,7 +346,8 @@ public class MockRepository extends FsRepository {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
|
||||
throws IOException {
|
||||
maybeIOExceptionOrBlock(blobName);
|
||||
super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
|
||||
if (RandomizedContext.current().getRandom().nextBoolean()) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.script;
|
|||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
import org.elasticsearch.index.query.IntervalFilterScript;
|
||||
import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc;
|
||||
import org.elasticsearch.index.similarity.ScriptedSimilarity.Field;
|
||||
import org.elasticsearch.index.similarity.ScriptedSimilarity.Query;
|
||||
|
@ -287,6 +288,9 @@ public class MockScriptEngine implements ScriptEngine {
|
|||
} else if (context.instanceClazz.equals(ScriptedMetricAggContexts.ReduceScript.class)) {
|
||||
ScriptedMetricAggContexts.ReduceScript.Factory factory = mockCompiled::createMetricAggReduceScript;
|
||||
return context.factoryClazz.cast(factory);
|
||||
} else if (context.instanceClazz.equals(IntervalFilterScript.class)) {
|
||||
IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript;
|
||||
return context.factoryClazz.cast(factory);
|
||||
}
|
||||
ContextCompiler compiler = contexts.get(context);
|
||||
if (compiler != null) {
|
||||
|
@ -353,6 +357,15 @@ public class MockScriptEngine implements ScriptEngine {
|
|||
public ScriptedMetricAggContexts.ReduceScript createMetricAggReduceScript(Map<String, Object> params, List<Object> states) {
|
||||
return new MockMetricAggReduceScript(params, states, script != null ? script : ctx -> 42d);
|
||||
}
|
||||
|
||||
public IntervalFilterScript createIntervalFilterScript() {
|
||||
return new IntervalFilterScript() {
|
||||
@Override
|
||||
public boolean execute(Interval interval) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
public static class MockFilterScript implements FilterScript.LeafFactory {
|
||||
|
|
|
@ -961,7 +961,8 @@ public final class InternalTestCluster extends TestCluster {
|
|||
.put(newSettings)
|
||||
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), newIdSeed)
|
||||
.build();
|
||||
if (DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) {
|
||||
final boolean usingSingleNodeDiscovery = DiscoveryModule.DISCOVERY_TYPE_SETTING.get(finalSettings).equals("single-node");
|
||||
if (usingSingleNodeDiscovery == false && DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(finalSettings) == false) {
|
||||
throw new IllegalStateException(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
" is not configured after restart of [" + name + "]");
|
||||
}
|
||||
|
|
|
@ -94,6 +94,16 @@ descriptions for the format that you are using.
|
|||
the request associated with this event. This header can
|
||||
be used freely by the client to mark API calls, as it has
|
||||
no semantics in Elasticsearch.
|
||||
`x_forwarded_for` :: The verbatim value of the `X-Forwarded-For` HTTP request
|
||||
header (if present) of the request associated with the
|
||||
audit event. This header is commonly added by proxies
|
||||
when they forward requests and the value is the address
|
||||
of the proxied client. When a request crosses multiple
|
||||
proxies the header is a comma delimited list with the
|
||||
last value being the address of the second to last
|
||||
proxy server (the address of the last proxy server is
|
||||
designated by the `origin.address` field).
|
||||
|
||||
|
||||
==== Audit event attributes of the REST event type
|
||||
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.license.LicenseService;
|
||||
import org.elasticsearch.license.LicensesMetaData;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -124,6 +125,10 @@ public abstract class CcrIntegTestCase extends ESTestCase {
|
|||
Function.identity());
|
||||
leaderCluster.beforeTest(random(), 0.0D);
|
||||
leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster());
|
||||
assertBusy(() -> {
|
||||
ClusterService clusterService = leaderCluster.getInstance(ClusterService.class);
|
||||
assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE));
|
||||
});
|
||||
|
||||
String address = leaderCluster.getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString();
|
||||
InternalTestCluster followerCluster = new InternalTestCluster(randomLong(), createTempDir(), true, true, numberOfNodesPerCluster(),
|
||||
|
@ -133,6 +138,10 @@ public abstract class CcrIntegTestCase extends ESTestCase {
|
|||
|
||||
followerCluster.beforeTest(random(), 0.0D);
|
||||
followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster());
|
||||
assertBusy(() -> {
|
||||
ClusterService clusterService = followerCluster.getInstance(ClusterService.class);
|
||||
assertNotNull(clusterService.state().metaData().custom(LicensesMetaData.TYPE));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.license.LicenseService;
|
||||
import org.elasticsearch.license.LicensesMetaData;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -57,6 +58,11 @@ public abstract class CcrSingleNodeTestCase extends ESSingleNodeTestCase {
|
|||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
}
|
||||
|
||||
@Before
|
||||
public void waitForTrialLicenseToBeGenerated() throws Exception {
|
||||
assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metaData().custom(LicensesMetaData.TYPE)));
|
||||
}
|
||||
|
||||
@After
|
||||
public void purgeCCRMetadata() throws Exception {
|
||||
ClusterService clusterService = getInstanceFromNode(ClusterService.class);
|
||||
|
|
|
@ -28,6 +28,7 @@ appender.audit_rolling.layout.pattern = {\
|
|||
%varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
|
||||
%varsNotEmpty{, "indices":%map{indices}}\
|
||||
%varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
|
||||
%varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
|
||||
%varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
|
||||
%varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
|
||||
%varsNotEmpty{, "event.category":"%enc{%map{event.category}}{JSON}"}\
|
||||
|
@ -56,6 +57,7 @@ appender.audit_rolling.layout.pattern = {\
|
|||
# "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
|
||||
# "indices" the array of indices that the "action" is acting upon
|
||||
# "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
|
||||
# "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
|
||||
# "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
|
||||
# "rule" name of the applied rulee if the "origin.type" is "ip_filter"
|
||||
# "event.category" fixed value "elasticsearch-audit"
|
||||
|
|
|
@ -38,12 +38,12 @@ public class AnnotationIndex {
|
|||
public static final String INDEX_NAME = ".ml-annotations-6";
|
||||
|
||||
/**
|
||||
* Create the .ml-annotations index with correct mappings.
|
||||
* This index is read and written by the UI results views,
|
||||
* so needs to exist when there might be ML results to view.
|
||||
* Create the .ml-annotations index with correct mappings if it does not already
|
||||
* exist. This index is read and written by the UI results views, so needs to
|
||||
* exist when there might be ML results to view.
|
||||
*/
|
||||
public static void createAnnotationsIndex(Settings settings, Client client, ClusterState state,
|
||||
final ActionListener<Boolean> finalListener) {
|
||||
public static void createAnnotationsIndexIfNecessary(Settings settings, Client client, ClusterState state,
|
||||
final ActionListener<Boolean> finalListener) {
|
||||
|
||||
final ActionListener<Boolean> createAliasListener = ActionListener.wrap(success -> {
|
||||
final IndicesAliasesRequest request = client.admin().indices().prepareAliases()
|
||||
|
|
|
@ -10,6 +10,8 @@ package org.elasticsearch.xpack.core.ml.job.persistence;
|
|||
*/
|
||||
public final class AnomalyDetectorsIndex {
|
||||
|
||||
public static final int CONFIG_INDEX_MAX_RESULTS_WINDOW = 10_000;
|
||||
|
||||
private AnomalyDetectorsIndex() {
|
||||
}
|
||||
|
||||
|
|
|
@ -670,7 +670,9 @@ public class MachineLearning extends Plugin implements ActionPlugin, AnalysisPlu
|
|||
// least possible burden on Elasticsearch
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1")
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting))
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayedNodeTimeOutSetting)
|
||||
.put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(),
|
||||
AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW))
|
||||
.version(Version.CURRENT.id)
|
||||
.putMapping(ElasticsearchMappings.DOC_TYPE, Strings.toString(configMapping))
|
||||
.build();
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -14,6 +15,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
|||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.MlTasks;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
|
||||
/**
|
||||
* Checks whether migration can start and whether ML resources (e.g. jobs, datafeeds)
|
||||
|
@ -37,10 +39,12 @@ public class MlConfigMigrationEligibilityCheck {
|
|||
this.isConfigMigrationEnabled = configMigrationEnabled;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Can migration start? Returns:
|
||||
* False if config migration is disabled via the setting {@link #ENABLE_CONFIG_MIGRATION}
|
||||
* False if the min node version of the cluster is before {@link #MIN_NODE_VERSION}
|
||||
* False if the .ml-config index shards are not active
|
||||
* True otherwise
|
||||
* @param clusterState The cluster state
|
||||
* @return A boolean that dictates if config migration can start
|
||||
|
@ -54,12 +58,26 @@ public class MlConfigMigrationEligibilityCheck {
|
|||
if (minNodeVersion.before(MIN_NODE_VERSION)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return mlConfigIndexIsAllocated(clusterState);
|
||||
}
|
||||
|
||||
static boolean mlConfigIndexIsAllocated(ClusterState clusterState) {
|
||||
if (clusterState.metaData().hasIndex(AnomalyDetectorsIndex.configIndexName()) == false) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IndexRoutingTable routingTable = clusterState.getRoutingTable().index(AnomalyDetectorsIndex.configIndexName());
|
||||
if (routingTable == null || routingTable.allPrimaryShardsActive() == false) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the job a eligible for migration? Returns:
|
||||
* False if {@link #canStartMigration(ClusterState)} returns {@code false}
|
||||
* False if the job is not in the cluster state
|
||||
* False if the {@link Job#isDeleting()}
|
||||
* False if the job has a persistent task
|
||||
* True otherwise i.e. the job is present, not deleting
|
||||
|
|
|
@ -11,6 +11,8 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
|
@ -21,6 +23,7 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -29,6 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.MlTasks;
|
||||
|
@ -126,19 +130,11 @@ public class MlConfigMigrator {
|
|||
* @param listener The success listener
|
||||
*/
|
||||
public void migrateConfigsWithoutTasks(ClusterState clusterState, ActionListener<Boolean> listener) {
|
||||
|
||||
if (migrationEligibilityCheck.canStartMigration(clusterState) == false) {
|
||||
listener.onResponse(false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (migrationInProgress.compareAndSet(false, true) == false) {
|
||||
listener.onResponse(Boolean.FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug("migrating ml configurations");
|
||||
|
||||
ActionListener<Boolean> unMarkMigrationInProgress = ActionListener.wrap(
|
||||
response -> {
|
||||
migrationInProgress.set(false);
|
||||
|
@ -150,19 +146,34 @@ public class MlConfigMigrator {
|
|||
}
|
||||
);
|
||||
|
||||
snapshotMlMeta(MlMetadata.getMlMetadata(clusterState), ActionListener.wrap(
|
||||
response -> {
|
||||
// We have successfully snapshotted the ML configs so we don't need to try again
|
||||
tookConfigSnapshot.set(true);
|
||||
List<JobsAndDatafeeds> batches = splitInBatches(clusterState);
|
||||
if (batches.isEmpty()) {
|
||||
unMarkMigrationInProgress.onResponse(Boolean.FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
List<JobsAndDatafeeds> batches = splitInBatches(clusterState);
|
||||
if (batches.isEmpty()) {
|
||||
unMarkMigrationInProgress.onResponse(Boolean.FALSE);
|
||||
return;
|
||||
}
|
||||
migrateBatches(batches, unMarkMigrationInProgress);
|
||||
},
|
||||
unMarkMigrationInProgress::onFailure
|
||||
if (clusterState.metaData().hasIndex(AnomalyDetectorsIndex.configIndexName()) == false) {
|
||||
createConfigIndex(ActionListener.wrap(
|
||||
response -> {
|
||||
unMarkMigrationInProgress.onResponse(Boolean.FALSE);
|
||||
},
|
||||
unMarkMigrationInProgress::onFailure
|
||||
));
|
||||
return;
|
||||
}
|
||||
|
||||
if (migrationEligibilityCheck.canStartMigration(clusterState) == false) {
|
||||
unMarkMigrationInProgress.onResponse(Boolean.FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
snapshotMlMeta(MlMetadata.getMlMetadata(clusterState), ActionListener.wrap(
|
||||
response -> {
|
||||
// We have successfully snapshotted the ML configs so we don't need to try again
|
||||
tookConfigSnapshot.set(true);
|
||||
migrateBatches(batches, unMarkMigrationInProgress);
|
||||
},
|
||||
unMarkMigrationInProgress::onFailure
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -296,6 +307,7 @@ public class MlConfigMigrator {
|
|||
private void addJobIndexRequests(Collection<Job> jobs, BulkRequestBuilder bulkRequestBuilder) {
|
||||
ToXContent.Params params = new ToXContent.MapParams(JobConfigProvider.TO_XCONTENT_PARAMS);
|
||||
for (Job job : jobs) {
|
||||
logger.debug("adding job to migrate: " + job.getId());
|
||||
bulkRequestBuilder.add(indexRequest(job, Job.documentId(job.getId()), params));
|
||||
}
|
||||
}
|
||||
|
@ -303,6 +315,7 @@ public class MlConfigMigrator {
|
|||
private void addDatafeedIndexRequests(Collection<DatafeedConfig> datafeedConfigs, BulkRequestBuilder bulkRequestBuilder) {
|
||||
ToXContent.Params params = new ToXContent.MapParams(DatafeedConfigProvider.TO_XCONTENT_PARAMS);
|
||||
for (DatafeedConfig datafeedConfig : datafeedConfigs) {
|
||||
logger.debug("adding datafeed to migrate: " + datafeedConfig.getId());
|
||||
bulkRequestBuilder.add(indexRequest(datafeedConfig, DatafeedConfig.documentId(datafeedConfig.getId()), params));
|
||||
}
|
||||
}
|
||||
|
@ -318,7 +331,6 @@ public class MlConfigMigrator {
|
|||
return indexRequest;
|
||||
}
|
||||
|
||||
|
||||
// public for testing
|
||||
public void snapshotMlMeta(MlMetadata mlMetadata, ActionListener<Boolean> listener) {
|
||||
|
||||
|
@ -361,6 +373,30 @@ public class MlConfigMigrator {
|
|||
);
|
||||
}
|
||||
|
||||
private void createConfigIndex(ActionListener<Boolean> listener) {
|
||||
logger.info("creating the .ml-config index");
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest(AnomalyDetectorsIndex.configIndexName());
|
||||
try
|
||||
{
|
||||
createIndexRequest.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, "0-1")
|
||||
.put(IndexSettings.MAX_RESULT_WINDOW_SETTING.getKey(), AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
);
|
||||
createIndexRequest.mapping(ElasticsearchMappings.DOC_TYPE, ElasticsearchMappings.configMapping());
|
||||
} catch (Exception e) {
|
||||
logger.error("error writing the .ml-config mappings", e);
|
||||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, createIndexRequest,
|
||||
ActionListener.<CreateIndexResponse>wrap(
|
||||
r -> listener.onResponse(r.isAcknowledged()),
|
||||
listener::onFailure
|
||||
), client.admin().indices()::create);
|
||||
}
|
||||
|
||||
public static Job updateJobForMigration(Job job) {
|
||||
Job.Builder builder = new Job.Builder(job);
|
||||
|
|
|
@ -19,6 +19,8 @@ import org.elasticsearch.gateway.GatewayService;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.ml.annotations.AnnotationIndex;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
class MlInitializationService implements LocalNodeMasterListener, ClusterStateListener {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(MlInitializationService.class);
|
||||
|
@ -27,6 +29,7 @@ class MlInitializationService implements LocalNodeMasterListener, ClusterStateLi
|
|||
private final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
private final Client client;
|
||||
private final AtomicBoolean isIndexCreationInProgress = new AtomicBoolean(false);
|
||||
|
||||
private volatile MlDailyMaintenanceService mlDailyMaintenanceService;
|
||||
|
||||
|
@ -55,14 +58,20 @@ class MlInitializationService implements LocalNodeMasterListener, ClusterStateLi
|
|||
return;
|
||||
}
|
||||
|
||||
if (event.localNodeMaster()) {
|
||||
AnnotationIndex.createAnnotationsIndex(settings, client, event.state(), ActionListener.wrap(
|
||||
// The atomic flag prevents multiple simultaneous attempts to create the
|
||||
// index if there is a flurry of cluster state updates in quick succession
|
||||
if (event.localNodeMaster() && isIndexCreationInProgress.compareAndSet(false, true)) {
|
||||
AnnotationIndex.createAnnotationsIndexIfNecessary(settings, client, event.state(), ActionListener.wrap(
|
||||
r -> {
|
||||
isIndexCreationInProgress.set(false);
|
||||
if (r) {
|
||||
logger.info("Created ML annotations index and aliases");
|
||||
}
|
||||
},
|
||||
e -> logger.error("Error creating ML annotations index or aliases", e)));
|
||||
e -> {
|
||||
isIndexCreationInProgress.set(false);
|
||||
logger.error("Error creating ML annotations index or aliases", e);
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -73,6 +73,15 @@ import java.util.stream.Collectors;
|
|||
import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN;
|
||||
import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
||||
|
||||
/**
|
||||
* This class implements CRUD operation for the
|
||||
* datafeed configuration document
|
||||
*
|
||||
* The number of datafeeds returned in a search it limited to
|
||||
* {@link AnomalyDetectorsIndex#CONFIG_INDEX_MAX_RESULTS_WINDOW}.
|
||||
* In most cases we expect 10s or 100s of datafeeds to be defined and
|
||||
* a search for all datafeeds should return all.
|
||||
*/
|
||||
public class DatafeedConfigProvider {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatafeedConfigProvider.class);
|
||||
|
@ -87,13 +96,6 @@ public class DatafeedConfigProvider {
|
|||
TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable);
|
||||
}
|
||||
|
||||
/**
|
||||
* In most cases we expect 10s or 100s of datafeeds to be defined and
|
||||
* a search for all datafeeds should return all.
|
||||
* TODO this is a temporary fix
|
||||
*/
|
||||
public int searchSize = 1000;
|
||||
|
||||
public DatafeedConfigProvider(Client client, NamedXContentRegistry xContentRegistry) {
|
||||
this.client = client;
|
||||
this.xContentRegistry = xContentRegistry;
|
||||
|
@ -368,7 +370,9 @@ public class DatafeedConfigProvider {
|
|||
|
||||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder).request();
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoDatafeeds);
|
||||
|
||||
|
@ -407,7 +411,6 @@ public class DatafeedConfigProvider {
|
|||
* wildcard then setting this true will not suppress the exception
|
||||
* @param listener The expanded datafeed config listener
|
||||
*/
|
||||
// NORELEASE datafeed configs should be paged or have a mechanism to return all jobs if there are many of them
|
||||
public void expandDatafeedConfigs(String expression, boolean allowNoDatafeeds, ActionListener<List<DatafeedConfig.Builder>> listener) {
|
||||
String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression);
|
||||
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildDatafeedIdQuery(tokens));
|
||||
|
@ -416,7 +419,7 @@ public class DatafeedConfigProvider {
|
|||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(searchSize)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoDatafeeds);
|
||||
|
|
|
@ -89,6 +89,11 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin;
|
|||
/**
|
||||
* This class implements CRUD operation for the
|
||||
* anomaly detector job configuration document
|
||||
*
|
||||
* The number of jobs returned in a search it limited to
|
||||
* {@link AnomalyDetectorsIndex#CONFIG_INDEX_MAX_RESULTS_WINDOW}.
|
||||
* In most cases we expect 10s or 100s of jobs to be defined and
|
||||
* a search for all jobs should return all.
|
||||
*/
|
||||
public class JobConfigProvider {
|
||||
|
||||
|
@ -101,13 +106,6 @@ public class JobConfigProvider {
|
|||
TO_XCONTENT_PARAMS = Collections.unmodifiableMap(modifiable);
|
||||
}
|
||||
|
||||
/**
|
||||
* In most cases we expect 10s or 100s of jobs to be defined and
|
||||
* a search for all jobs should return all.
|
||||
* TODO this is a temporary fix
|
||||
*/
|
||||
private int searchSize = 1000;
|
||||
|
||||
private final Client client;
|
||||
|
||||
public JobConfigProvider(Client client) {
|
||||
|
@ -565,7 +563,7 @@ public class JobConfigProvider {
|
|||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(searchSize)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs);
|
||||
|
@ -599,6 +597,21 @@ public class JobConfigProvider {
|
|||
|
||||
}
|
||||
|
||||
private SearchRequest makeExpandIdsSearchRequest(String expression, boolean excludeDeleting) {
|
||||
String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression);
|
||||
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting));
|
||||
sourceBuilder.sort(Job.ID.getPreferredName());
|
||||
sourceBuilder.fetchSource(false);
|
||||
sourceBuilder.docValueField(Job.ID.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT);
|
||||
sourceBuilder.docValueField(Job.GROUPS.getPreferredName(), DocValueFieldsContext.USE_DEFAULT_FORMAT);
|
||||
|
||||
return client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
}
|
||||
|
||||
/**
|
||||
* The same logic as {@link #expandJobsIds(String, boolean, boolean, ActionListener)} but
|
||||
* the full anomaly detector job configuration is returned.
|
||||
|
@ -612,7 +625,6 @@ public class JobConfigProvider {
|
|||
* @param excludeDeleting If true exclude jobs marked as deleting
|
||||
* @param listener The expanded jobs listener
|
||||
*/
|
||||
// NORELEASE jobs should be paged or have a mechanism to return all jobs if there are many of them
|
||||
public void expandJobs(String expression, boolean allowNoJobs, boolean excludeDeleting, ActionListener<List<Job.Builder>> listener) {
|
||||
String [] tokens = ExpandedIdsMatcher.tokenizeExpression(expression);
|
||||
SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(buildQuery(tokens, excludeDeleting));
|
||||
|
@ -621,7 +633,7 @@ public class JobConfigProvider {
|
|||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(searchSize)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
ExpandedIdsMatcher requiredMatches = new ExpandedIdsMatcher(tokens, allowNoJobs);
|
||||
|
@ -679,7 +691,7 @@ public class JobConfigProvider {
|
|||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(searchSize)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
|
||||
|
@ -741,7 +753,7 @@ public class JobConfigProvider {
|
|||
SearchRequest searchRequest = client.prepareSearch(AnomalyDetectorsIndex.configIndexName())
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setSource(sourceBuilder)
|
||||
.setSize(searchSize)
|
||||
.setSize(AnomalyDetectorsIndex.CONFIG_INDEX_MAX_RESULTS_WINDOW)
|
||||
.request();
|
||||
|
||||
executeAsyncWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN, searchRequest,
|
||||
|
|
|
@ -45,13 +45,12 @@ import org.elasticsearch.xpack.ml.MachineLearning;
|
|||
import org.elasticsearch.xpack.ml.action.TransportOpenJobAction.JobTask;
|
||||
import org.elasticsearch.xpack.ml.job.JobManager;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobDataCountsPersister;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobRenormalizedResultsPersister;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.ScheduledEventsQueryBuilder;
|
||||
import org.elasticsearch.xpack.ml.job.persistence.StateStreamer;
|
||||
import org.elasticsearch.xpack.ml.job.process.DataCountsReporter;
|
||||
import org.elasticsearch.xpack.ml.process.NativeStorageProvider;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.output.AutoDetectResultProcessor;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.AutodetectParams;
|
||||
import org.elasticsearch.xpack.ml.job.process.autodetect.params.DataLoadParams;
|
||||
|
@ -62,6 +61,7 @@ import org.elasticsearch.xpack.ml.job.process.normalizer.Renormalizer;
|
|||
import org.elasticsearch.xpack.ml.job.process.normalizer.ScoresUpdater;
|
||||
import org.elasticsearch.xpack.ml.job.process.normalizer.ShortCircuitingRenormalizer;
|
||||
import org.elasticsearch.xpack.ml.notifications.Auditor;
|
||||
import org.elasticsearch.xpack.ml.process.NativeStorageProvider;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
@ -408,7 +408,6 @@ public class AutodetectProcessManager {
|
|||
logger.info("Opening job [{}]", jobId);
|
||||
|
||||
jobManager.getJob(jobId, ActionListener.wrap(
|
||||
// NORELEASE JIndex. Should not be doing this work on the network thread
|
||||
job -> {
|
||||
if (job.getJobVersion() == null) {
|
||||
closeHandler.accept(ExceptionsHelper.badRequestException("Cannot open job [" + jobId
|
||||
|
|
|
@ -8,13 +8,22 @@ package org.elasticsearch.xpack.ml;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
|
@ -24,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction;
|
|||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.JobTests;
|
||||
import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.net.InetAddress;
|
||||
|
@ -53,12 +63,18 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCanStartMigration_givenNodesNotUpToVersion() {
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
|
||||
// mixed 6.5 and 6.6 nodes
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0))
|
||||
.add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0)))
|
||||
.build();
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_5_0))
|
||||
.add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0)))
|
||||
.routingTable(routingTable.build())
|
||||
.metaData(metaData)
|
||||
.build();
|
||||
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
@ -69,12 +85,18 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCanStartMigration_givenNodesNotUpToVersionAndMigrationIsEnabled() {
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
|
||||
// mixed 6.5 and 6.6 nodes
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0))
|
||||
.add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0)))
|
||||
.build();
|
||||
.nodes(DiscoveryNodes.builder()
|
||||
.add(new DiscoveryNode("node_id1", new TransportAddress(InetAddress.getLoopbackAddress(), 9300), Version.V_6_6_0))
|
||||
.add(new DiscoveryNode("node_id2", new TransportAddress(InetAddress.getLoopbackAddress(), 9301), Version.V_6_6_0)))
|
||||
.routingTable(routingTable.build())
|
||||
.metaData(metaData)
|
||||
.build();
|
||||
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
@ -84,6 +106,52 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase {
|
|||
assertTrue(check.canStartMigration(clusterState));
|
||||
}
|
||||
|
||||
public void testCanStartMigration_givenMissingIndex() {
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
|
||||
.build();
|
||||
|
||||
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
|
||||
assertFalse(check.canStartMigration(clusterState));
|
||||
}
|
||||
|
||||
public void testCanStartMigration_givenInactiveShards() {
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
||||
// index is present but no routing
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
|
||||
.metaData(metaData)
|
||||
.build();
|
||||
|
||||
MlConfigMigrationEligibilityCheck check = new MlConfigMigrationEligibilityCheck(settings, clusterService);
|
||||
assertFalse(check.canStartMigration(clusterState));
|
||||
}
|
||||
|
||||
private void addMlConfigIndex(MetaData.Builder metaData, RoutingTable.Builder routingTable) {
|
||||
IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName());
|
||||
indexMetaData.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
);
|
||||
metaData.put(indexMetaData);
|
||||
Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid");
|
||||
ShardId shardId = new ShardId(index, 0);
|
||||
ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""));
|
||||
shardRouting = shardRouting.initialize("node_id", null, 0L);
|
||||
shardRouting = shardRouting.moveToStarted();
|
||||
routingTable.add(IndexRoutingTable.builder(index)
|
||||
.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build()));
|
||||
}
|
||||
|
||||
|
||||
public void testJobIsEligibleForMigration_givenNodesNotUpToVersion() {
|
||||
// mixed 6.5 and 6.6 nodes
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
|
@ -185,11 +253,14 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase {
|
|||
Job closedJob = JobTests.buildJobBuilder("closed-job").build();
|
||||
MlMetadata.Builder mlMetadata = new MlMetadata.Builder().putJob(closedJob, false);
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(MlMetadata.TYPE, mlMetadata.build())
|
||||
)
|
||||
.build();
|
||||
.metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.routingTable(routingTable.build())
|
||||
.build();
|
||||
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
@ -283,11 +354,14 @@ public class MlConfigMigrationEligibilityCheckTests extends ESTestCase {
|
|||
mlMetadata.putDatafeed(createCompatibleDatafeed(job.getId()), Collections.emptyMap());
|
||||
String datafeedId = "df-" + job.getId();
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("migratortests"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(MlMetadata.TYPE, mlMetadata.build())
|
||||
)
|
||||
.build();
|
||||
.metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.routingTable(routingTable.build())
|
||||
.build();
|
||||
|
||||
Settings settings = newSettings(true);
|
||||
givenClusterSettings(settings);
|
||||
|
|
|
@ -5,12 +5,20 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.ml.integration;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -19,6 +27,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.xpack.core.ml.MlMetadata;
|
||||
import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig;
|
||||
import org.elasticsearch.xpack.core.ml.job.config.Job;
|
||||
|
@ -117,9 +127,12 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase {
|
|||
builder.setIndices(Collections.singletonList("beats*"));
|
||||
mlMetadata.putDatafeed(builder.build(), Collections.emptyMap());
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.routingTable(routingTable.build())
|
||||
.build();
|
||||
|
||||
doAnswer(invocation -> {
|
||||
|
@ -180,10 +193,13 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase {
|
|||
mlMetadata.putDatafeed(builder.build(), Collections.emptyMap());
|
||||
}
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
addMlConfigIndex(metaData, routingTable);
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.build();
|
||||
.metaData(metaData.putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.routingTable(routingTable.build())
|
||||
.build();
|
||||
|
||||
doAnswer(invocation -> {
|
||||
ClusterStateUpdateTask listener = (ClusterStateUpdateTask) invocation.getArguments()[1];
|
||||
|
@ -304,6 +320,49 @@ public class MlConfigMigratorIT extends MlSingleNodeTestCase {
|
|||
assertEquals(expectedMlMetadata, recoveredMeta);
|
||||
}
|
||||
}
|
||||
|
||||
private void addMlConfigIndex(MetaData.Builder metaData, RoutingTable.Builder routingTable) {
|
||||
IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName());
|
||||
indexMetaData.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
);
|
||||
metaData.put(indexMetaData);
|
||||
Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid");
|
||||
ShardId shardId = new ShardId(index, 0);
|
||||
ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""));
|
||||
shardRouting = shardRouting.initialize("node_id", null, 0L);
|
||||
shardRouting = shardRouting.moveToStarted();
|
||||
routingTable.add(IndexRoutingTable.builder(index)
|
||||
.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build()));
|
||||
}
|
||||
|
||||
public void testConfigIndexIsCreated() throws Exception {
|
||||
// and jobs and datafeeds clusterstate
|
||||
MlMetadata.Builder mlMetadata = new MlMetadata.Builder();
|
||||
mlMetadata.putJob(buildJobBuilder("job-foo").build(), false);
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder().putCustom(MlMetadata.TYPE, mlMetadata.build()))
|
||||
.build();
|
||||
|
||||
AtomicReference<Exception> exceptionHolder = new AtomicReference<>();
|
||||
AtomicReference<Boolean> responseHolder = new AtomicReference<>();
|
||||
MlConfigMigrator mlConfigMigrator = new MlConfigMigrator(nodeSettings(), client(), clusterService);
|
||||
|
||||
// if the cluster state has a job config and the index does not
|
||||
// exist it should be created
|
||||
blockingCall(actionListener -> mlConfigMigrator.migrateConfigsWithoutTasks(clusterState, actionListener),
|
||||
responseHolder, exceptionHolder);
|
||||
|
||||
assertBusy(() -> assertTrue(configIndexExists()));
|
||||
}
|
||||
|
||||
private boolean configIndexExists() {
|
||||
return client().admin().indices().prepareExists(AnomalyDetectorsIndex.configIndexName()).get().isExists();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8,13 +8,21 @@ package org.elasticsearch.xpack.ml.job;
|
|||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
|
@ -25,7 +33,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.TestEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.persistent.PersistentTasksCustomMetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -448,9 +458,29 @@ public class JobManagerTests extends ESTestCase {
|
|||
|
||||
public void testUpdateJob_notAllowedPreMigration() {
|
||||
MlMetadata.Builder mlmetadata = new MlMetadata.Builder().putJob(buildJobBuilder("closed-job-not-migrated").build(), false);
|
||||
|
||||
MetaData.Builder metaData = MetaData.builder();
|
||||
RoutingTable.Builder routingTable = RoutingTable.builder();
|
||||
|
||||
IndexMetaData.Builder indexMetaData = IndexMetaData.builder(AnomalyDetectorsIndex.configIndexName());
|
||||
indexMetaData.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
);
|
||||
metaData.put(indexMetaData);
|
||||
Index index = new Index(AnomalyDetectorsIndex.configIndexName(), "_uuid");
|
||||
ShardId shardId = new ShardId(index, 0);
|
||||
ShardRouting shardRouting = ShardRouting.newUnassigned(shardId, true, RecoverySource.EmptyStoreRecoverySource.INSTANCE,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""));
|
||||
shardRouting = shardRouting.initialize("node_id", null, 0L);
|
||||
shardRouting = shardRouting.moveToStarted();
|
||||
routingTable.add(IndexRoutingTable.builder(index)
|
||||
.addIndexShard(new IndexShardRoutingTable.Builder(shardId).addShard(shardRouting).build()));
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(new ClusterName("_name"))
|
||||
.metaData(MetaData.builder()
|
||||
.putCustom(MlMetadata.TYPE, mlmetadata.build()))
|
||||
.metaData(metaData.putCustom(MlMetadata.TYPE, mlmetadata.build()))
|
||||
.routingTable(routingTable.build())
|
||||
.build();
|
||||
when(clusterService.state()).thenReturn(clusterState);
|
||||
|
||||
|
|
|
@ -634,6 +634,9 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
}
|
||||
Set<String> headers = new HashSet<>();
|
||||
headers.add(UsernamePasswordToken.BASIC_AUTH_HEADER);
|
||||
if (XPackSettings.AUDIT_ENABLED.get(settings)) {
|
||||
headers.add(AuditTrail.X_FORWARDED_FOR_HEADER);
|
||||
}
|
||||
if (AuthenticationServiceField.RUN_AS_ENABLED.get(settings)) {
|
||||
headers.add(AuthenticationServiceField.RUN_AS_USER_HEADER);
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@ import java.net.InetAddress;
|
|||
|
||||
public interface AuditTrail {
|
||||
|
||||
String X_FORWARDED_FOR_HEADER = "X-Forwarded-For";
|
||||
|
||||
String name();
|
||||
|
||||
void authenticationSuccess(String requestId, String realm, User user, RestRequest request);
|
||||
|
|
|
@ -108,6 +108,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
public static final String TRANSPORT_PROFILE_FIELD_NAME = "transport.profile";
|
||||
public static final String RULE_FIELD_NAME = "rule";
|
||||
public static final String OPAQUE_ID_FIELD_NAME = "opaque_id";
|
||||
public static final String X_FORWARDED_FOR_FIELD_NAME = "x_forwarded_for";
|
||||
|
||||
public static final String NAME = "logfile";
|
||||
public static final Setting<Boolean> EMIT_HOST_ADDRESS_SETTING = Setting.boolSetting(setting("audit.logfile.emit_node_host_address"),
|
||||
|
@ -216,6 +217,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrigin(request)
|
||||
.withRequestBody(request)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -238,6 +240,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -259,6 +262,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -277,6 +281,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -298,6 +303,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -315,6 +321,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -335,6 +342,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -354,6 +362,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -376,6 +385,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -396,6 +406,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -420,6 +431,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.with(PRINCIPAL_ROLES_FIELD_NAME, roleNames)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -443,6 +455,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.with(PRINCIPAL_ROLES_FIELD_NAME, roleNames)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -460,6 +473,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -480,6 +494,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRestOrTransportOrigin(message, threadContext)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -502,6 +517,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withPrincipal(user)
|
||||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -520,6 +536,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(TRANSPORT_PROFILE_FIELD_NAME, profile)
|
||||
.with(RULE_FIELD_NAME, rule.toString())
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -537,6 +554,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(TRANSPORT_PROFILE_FIELD_NAME, profile)
|
||||
.with(RULE_FIELD_NAME, rule.toString())
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -559,6 +577,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.with(PRINCIPAL_ROLES_FIELD_NAME, roleNames)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -582,6 +601,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.with(INDICES_FIELD_NAME, indices.orElse(null))
|
||||
.with(PRINCIPAL_ROLES_FIELD_NAME, roleNames)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -603,6 +623,7 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
.withRequestBody(request)
|
||||
.withRequestId(requestId)
|
||||
.withOpaqueId(threadContext)
|
||||
.withXForwardedFor(threadContext)
|
||||
.build();
|
||||
logger.info(logEntry);
|
||||
}
|
||||
|
@ -696,6 +717,14 @@ public class LoggingAuditTrail implements AuditTrail, ClusterStateListener {
|
|||
return this;
|
||||
}
|
||||
|
||||
LogEntryBuilder withXForwardedFor(ThreadContext threadContext) {
|
||||
final String xForwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER);
|
||||
if (xForwardedFor != null) {
|
||||
logEntry.with(X_FORWARDED_FOR_FIELD_NAME, xForwardedFor);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
LogEntryBuilder withPrincipal(User user) {
|
||||
logEntry.with(PRINCIPAL_FIELD_NAME, user.principal());
|
||||
if (user.isRunAs()) {
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication.RealmRef;
|
|||
import org.elasticsearch.xpack.core.security.authc.AuthenticationToken;
|
||||
import org.elasticsearch.xpack.core.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrail;
|
||||
import org.elasticsearch.xpack.security.audit.AuditUtil;
|
||||
import org.elasticsearch.xpack.security.rest.RemoteHostHeader;
|
||||
import org.elasticsearch.xpack.security.transport.filter.IPFilter;
|
||||
|
@ -189,6 +190,10 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
threadContext.putHeader(Task.X_OPAQUE_ID, randomAlphaOfLengthBetween(1, 4));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
threadContext.putHeader(AuditTrail.X_FORWARDED_FOR_HEADER,
|
||||
randomFrom("2001:db8:85a3:8d3:1319:8a2e:370:7348", "203.0.113.195", "203.0.113.195, 70.41.3.18, 150.172.238.178"));
|
||||
}
|
||||
logger = CapturingLogger.newCapturingLogger(Level.INFO, patternLayout);
|
||||
auditTrail = new LoggingAuditTrail(settings, clusterService, logger, threadContext);
|
||||
}
|
||||
|
@ -212,6 +217,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -245,6 +251,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri")
|
||||
.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, null);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -275,6 +282,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -303,6 +311,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -343,6 +352,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri")
|
||||
.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "foo=bar");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -382,6 +392,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri")
|
||||
.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "bar=baz");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -422,6 +433,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -462,6 +474,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri")
|
||||
.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "_param=baz");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -484,6 +497,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -525,6 +539,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -547,6 +562,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -579,6 +595,8 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -615,6 +633,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.URL_PATH_FIELD_NAME, "_uri")
|
||||
.put(LoggingAuditTrail.URL_QUERY_FIELD_NAME, params.isEmpty() ? null : "_param=baz");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -643,6 +662,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -684,6 +704,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -713,6 +734,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile)
|
||||
.put(LoggingAuditTrail.RULE_FIELD_NAME, "deny _all");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -751,6 +773,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
.put(LoggingAuditTrail.TRANSPORT_PROFILE_FIELD_NAME, profile)
|
||||
.put(LoggingAuditTrail.RULE_FIELD_NAME, "allow default:accept_all");
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -779,6 +802,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -817,6 +841,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
|
||||
// test disabled
|
||||
|
@ -878,6 +903,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
checkedFields.put(LoggingAuditTrail.PRINCIPAL_FIELD_NAME, "_username");
|
||||
}
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -919,6 +945,7 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
restOrTransportOrigin(message, threadContext, checkedFields);
|
||||
indicesRequest(message, checkedFields, checkedArrayFields);
|
||||
opaqueId(threadContext, checkedFields);
|
||||
forwardedFor(threadContext, checkedFields);
|
||||
assertMsg(logger, checkedFields.immutableMap(), checkedArrayFields.immutableMap());
|
||||
}
|
||||
|
||||
|
@ -1177,6 +1204,15 @@ public class LoggingAuditTrailTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void forwardedFor(ThreadContext threadContext ,MapBuilder<String, String> checkedFields) {
|
||||
final String forwardedFor = threadContext.getHeader(AuditTrail.X_FORWARDED_FOR_HEADER);
|
||||
if (forwardedFor != null) {
|
||||
checkedFields.put(LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME, forwardedFor);
|
||||
} else {
|
||||
checkedFields.put(LoggingAuditTrail.X_FORWARDED_FOR_FIELD_NAME, null);
|
||||
}
|
||||
}
|
||||
|
||||
private static void indicesRequest(TransportMessage message, MapBuilder<String, String> checkedFields,
|
||||
MapBuilder<String, String[]> checkedArrayFields) {
|
||||
if (message instanceof IndicesRequest) {
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.sql.jdbc;
|
||||
|
||||
import java.sql.Date;
|
||||
import java.sql.Time;
|
||||
import java.sql.Timestamp;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.format.DateTimeFormatterBuilder;
|
||||
import java.util.Locale;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE;
|
||||
import static java.time.temporal.ChronoField.HOUR_OF_DAY;
|
||||
import static java.time.temporal.ChronoField.MILLI_OF_SECOND;
|
||||
import static java.time.temporal.ChronoField.MINUTE_OF_HOUR;
|
||||
import static java.time.temporal.ChronoField.SECOND_OF_MINUTE;
|
||||
|
||||
/**
|
||||
* JDBC specific datetime specific utility methods. Because of lack of visibility, this class borrows code
|
||||
* from {@code org.elasticsearch.xpack.sql.util.DateUtils} and {@code org.elasticsearch.xpack.sql.proto.StringUtils}.
|
||||
*/
|
||||
final class JdbcDateUtils {
|
||||
|
||||
private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000;
|
||||
|
||||
static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder()
|
||||
.parseCaseInsensitive()
|
||||
.append(ISO_LOCAL_DATE)
|
||||
.appendLiteral('T')
|
||||
.appendValue(HOUR_OF_DAY, 2)
|
||||
.appendLiteral(':')
|
||||
.appendValue(MINUTE_OF_HOUR, 2)
|
||||
.appendLiteral(':')
|
||||
.appendValue(SECOND_OF_MINUTE, 2)
|
||||
.appendFraction(MILLI_OF_SECOND, 3, 3, true)
|
||||
.appendOffsetId()
|
||||
.toFormatter(Locale.ROOT);
|
||||
|
||||
static long asMillisSinceEpoch(String date) {
|
||||
ZonedDateTime zdt = ISO_WITH_MILLIS.parse(date, ZonedDateTime::from);
|
||||
return zdt.toInstant().toEpochMilli();
|
||||
}
|
||||
|
||||
static Date asDate(String date) {
|
||||
return new Date(utcMillisRemoveTime(asMillisSinceEpoch(date)));
|
||||
}
|
||||
|
||||
static Time asTime(String date) {
|
||||
return new Time(utcMillisRemoveDate(asMillisSinceEpoch(date)));
|
||||
}
|
||||
|
||||
static Timestamp asTimestamp(String date) {
|
||||
return new Timestamp(asMillisSinceEpoch(date));
|
||||
}
|
||||
|
||||
/*
|
||||
* Handles the value received as parameter, as either String (a ZonedDateTime formatted in ISO 8601 standard with millis) -
|
||||
* date fields being returned formatted like this. Or a Long value, in case of Histograms.
|
||||
*/
|
||||
static <R> R asDateTimeField(Object value, Function<String, R> asDateTimeMethod, Function<Long, R> ctor) {
|
||||
if (value instanceof String) {
|
||||
return asDateTimeMethod.apply((String) value);
|
||||
} else {
|
||||
return ctor.apply(((Number) value).longValue());
|
||||
}
|
||||
}
|
||||
|
||||
private static long utcMillisRemoveTime(long l) {
|
||||
return l - (l % DAY_IN_MILLIS);
|
||||
}
|
||||
|
||||
private static long utcMillisRemoveDate(long l) {
|
||||
return l % DAY_IN_MILLIS;
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@ import java.util.LinkedHashMap;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.lang.String.format;
|
||||
|
||||
|
@ -248,7 +249,10 @@ class JdbcResultSet implements ResultSet, JdbcWrapper {
|
|||
// the cursor can return an Integer if the date-since-epoch is small enough, XContentParser (Jackson) will
|
||||
// return the "smallest" data type for numbers when parsing
|
||||
// TODO: this should probably be handled server side
|
||||
return val == null ? null : ((Number) val).longValue();
|
||||
if (val == null) {
|
||||
return null;
|
||||
}
|
||||
return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asMillisSinceEpoch, Function.identity());
|
||||
};
|
||||
return val == null ? null : (Long) val;
|
||||
} catch (ClassCastException cce) {
|
||||
|
|
|
@ -48,8 +48,6 @@ final class TypeConverter {
|
|||
|
||||
private TypeConverter() {}
|
||||
|
||||
private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000;
|
||||
|
||||
/**
|
||||
* Converts millisecond after epoc to date
|
||||
*/
|
||||
|
@ -216,7 +214,7 @@ final class TypeConverter {
|
|||
case FLOAT:
|
||||
return floatValue(v); // Float might be represented as string for infinity and NaN values
|
||||
case DATE:
|
||||
return new Timestamp(((Number) v).longValue());
|
||||
return JdbcDateUtils.asDateTimeField(v, JdbcDateUtils::asTimestamp, Timestamp::new);
|
||||
case INTERVAL_YEAR:
|
||||
case INTERVAL_MONTH:
|
||||
case INTERVAL_YEAR_TO_MONTH:
|
||||
|
@ -470,21 +468,21 @@ final class TypeConverter {
|
|||
|
||||
private static Date asDate(Object val, EsType columnType, String typeString) throws SQLException {
|
||||
if (columnType == EsType.DATE) {
|
||||
return new Date(utcMillisRemoveTime(((Number) val).longValue()));
|
||||
return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asDate, Date::new);
|
||||
}
|
||||
return failConversion(val, columnType, typeString, Date.class);
|
||||
}
|
||||
|
||||
private static Time asTime(Object val, EsType columnType, String typeString) throws SQLException {
|
||||
if (columnType == EsType.DATE) {
|
||||
return new Time(utcMillisRemoveDate(((Number) val).longValue()));
|
||||
return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTime, Time::new);
|
||||
}
|
||||
return failConversion(val, columnType, typeString, Time.class);
|
||||
}
|
||||
|
||||
private static Timestamp asTimestamp(Object val, EsType columnType, String typeString) throws SQLException {
|
||||
if (columnType == EsType.DATE) {
|
||||
return new Timestamp(((Number) val).longValue());
|
||||
return JdbcDateUtils.asDateTimeField(val, JdbcDateUtils::asTimestamp, Timestamp::new);
|
||||
}
|
||||
return failConversion(val, columnType, typeString, Timestamp.class);
|
||||
}
|
||||
|
@ -513,14 +511,6 @@ final class TypeConverter {
|
|||
throw new SQLFeatureNotSupportedException();
|
||||
}
|
||||
|
||||
private static long utcMillisRemoveTime(long l) {
|
||||
return l - (l % DAY_IN_MILLIS);
|
||||
}
|
||||
|
||||
private static long utcMillisRemoveDate(long l) {
|
||||
return l % DAY_IN_MILLIS;
|
||||
}
|
||||
|
||||
private static byte safeToByte(long x) throws SQLException {
|
||||
if (x > Byte.MAX_VALUE || x < Byte.MIN_VALUE) {
|
||||
throw new SQLException(format(Locale.ROOT, "Numeric %s out of range", Long.toString(x)));
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.Set;
|
|||
import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.assertNoSearchContexts;
|
||||
|
||||
public abstract class JdbcIntegrationTestCase extends ESRestTestCase {
|
||||
|
||||
@After
|
||||
public void checkSearchContent() throws Exception {
|
||||
// Some context might linger due to fire and forget nature of scroll cleanup
|
||||
|
|
|
@ -12,6 +12,9 @@ import org.elasticsearch.xpack.sql.proto.ColumnInfo;
|
|||
import java.sql.ResultSet;
|
||||
import java.sql.ResultSetMetaData;
|
||||
import java.sql.SQLException;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneId;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -20,6 +23,8 @@ public abstract class JdbcTestUtils {
|
|||
public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE";
|
||||
|
||||
public static final String JDBC_TIMEZONE = "timezone";
|
||||
|
||||
public static ZoneId UTC = ZoneId.of("Z");
|
||||
|
||||
public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException {
|
||||
ResultSetMetaData metaData = rs.getMetaData();
|
||||
|
@ -128,4 +133,8 @@ public abstract class JdbcTestUtils {
|
|||
CliFormatter formatter = new CliFormatter(cols, data);
|
||||
logger.info("\n" + formatter.formatWithHeader(cols, data));
|
||||
}
|
||||
|
||||
public static ZonedDateTime of(long millis) {
|
||||
return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC);
|
||||
}
|
||||
}
|
|
@ -58,6 +58,7 @@ import static java.util.Calendar.MONTH;
|
|||
import static java.util.Calendar.SECOND;
|
||||
import static java.util.Calendar.YEAR;
|
||||
import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE;
|
||||
import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.of;
|
||||
|
||||
public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
||||
|
||||
|
@ -200,10 +201,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getByte("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Byte.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Byte]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -323,10 +324,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getShort("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Short.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Short]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -438,10 +439,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getInt("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Integer.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Integer]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -540,10 +541,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getLong("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Long.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Long]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -623,10 +624,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getDouble("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Double.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Double]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -706,10 +707,10 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getFloat("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Float.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", randomDate),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Float]", of(randomDate)),
|
||||
sqle.getMessage());
|
||||
});
|
||||
}
|
||||
|
@ -767,7 +768,7 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
assertEquals("Expected: <true> but was: <false> for field " + fld, true, results.getObject(fld, Boolean.class));
|
||||
}
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate1),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate1)),
|
||||
sqle.getMessage());
|
||||
|
||||
results.next();
|
||||
|
@ -777,11 +778,11 @@ public class ResultSetTestCase extends JdbcIntegrationTestCase {
|
|||
assertEquals("Expected: <false> but was: <true> for field " + fld, false, results.getObject(fld, Boolean.class));
|
||||
}
|
||||
sqle = expectThrows(SQLException.class, () -> results.getBoolean("test_date"));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate2),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate2)),
|
||||
sqle.getMessage());
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> results.getObject("test_date", Boolean.class));
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", randomDate2),
|
||||
assertEquals(format(Locale.ROOT, "Unable to convert value [%.128s] of type [DATE] to [Boolean]", of(randomDate2)),
|
||||
sqle.getMessage());
|
||||
|
||||
results.next();
|
||||
|
|
|
@ -171,15 +171,8 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject
|
|||
public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException {
|
||||
if (value instanceof ZonedDateTime) {
|
||||
ZonedDateTime zdt = (ZonedDateTime) value;
|
||||
if (Mode.isDriver(mode)) {
|
||||
// JDBC cannot parse dates in string format and ODBC can have issues with it
|
||||
// so instead, use the millis since epoch (in UTC)
|
||||
builder.value(zdt.toInstant().toEpochMilli());
|
||||
}
|
||||
// otherwise use the ISO format
|
||||
else {
|
||||
builder.value(StringUtils.toString(zdt));
|
||||
}
|
||||
// use the ISO format
|
||||
builder.value(StringUtils.toString(zdt));
|
||||
} else {
|
||||
builder.value(value);
|
||||
}
|
||||
|
|
|
@ -223,12 +223,13 @@ public final class Verifier {
|
|||
validateInExpression(p, localFailures);
|
||||
validateConditional(p, localFailures);
|
||||
|
||||
checkFilterOnAggs(p, localFailures);
|
||||
|
||||
if (!groupingFailures.contains(p)) {
|
||||
checkGroupBy(p, localFailures, resolvedFunctions, groupingFailures);
|
||||
}
|
||||
|
||||
checkForScoreInsideFunctions(p, localFailures);
|
||||
|
||||
checkNestedUsedInGroupByOrHaving(p, localFailures);
|
||||
|
||||
// everything checks out
|
||||
|
@ -370,7 +371,7 @@ public final class Verifier {
|
|||
if (!missing.isEmpty()) {
|
||||
String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY;
|
||||
localFailures.add(
|
||||
fail(condition, "Cannot filter HAVING on non-aggregate" + plural + " %s; consider using WHERE instead",
|
||||
fail(condition, "Cannot use HAVING filter on non-aggregate" + plural + " %s; use WHERE instead",
|
||||
Expressions.names(missing.keySet())));
|
||||
groupingFailures.add(a);
|
||||
return false;
|
||||
|
@ -542,6 +543,23 @@ public final class Verifier {
|
|||
return false;
|
||||
}
|
||||
|
||||
private static void checkFilterOnAggs(LogicalPlan p, Set<Failure> localFailures) {
|
||||
if (p instanceof Filter) {
|
||||
Filter filter = (Filter) p;
|
||||
if ((filter.child() instanceof Aggregate) == false) {
|
||||
filter.condition().forEachDown(f -> {
|
||||
if (Functions.isAggregate(f) || Functions.isGrouping(f)) {
|
||||
String type = Functions.isAggregate(f) ? "aggregate" : "grouping";
|
||||
localFailures.add(fail(f,
|
||||
"Cannot use WHERE filtering on %s function [%s], use HAVING instead", type, Expressions.name(f)));
|
||||
}
|
||||
|
||||
}, Function.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static void checkForScoreInsideFunctions(LogicalPlan p, Set<Failure> localFailures) {
|
||||
// Make sure that SCORE is only used in "top level" functions
|
||||
p.forEachExpressions(e ->
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.expression.function;
|
|||
|
||||
import org.elasticsearch.xpack.sql.expression.Expression;
|
||||
import org.elasticsearch.xpack.sql.expression.function.aggregate.AggregateFunction;
|
||||
import org.elasticsearch.xpack.sql.expression.function.grouping.GroupingFunction;
|
||||
import org.elasticsearch.xpack.sql.plan.QueryPlan;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
|
@ -18,6 +19,10 @@ public abstract class Functions {
|
|||
return e instanceof AggregateFunction;
|
||||
}
|
||||
|
||||
public static boolean isGrouping(Expression e) {
|
||||
return e instanceof GroupingFunction;
|
||||
}
|
||||
|
||||
public static Map<String, Function> collectFunctions(QueryPlan<?> plan) {
|
||||
Map<String, Function> resolvedFunctions = new LinkedHashMap<>();
|
||||
plan.forEachExpressionsDown(e -> {
|
||||
|
|
|
@ -227,7 +227,7 @@ public class VerifierErrorMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGroupByHavingNonGrouped() {
|
||||
assertEquals("1:48: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead",
|
||||
assertEquals("1:48: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead",
|
||||
error("SELECT AVG(int) FROM test GROUP BY text HAVING int > 10"));
|
||||
}
|
||||
|
||||
|
@ -296,12 +296,12 @@ public class VerifierErrorMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testHavingOnColumn() {
|
||||
assertEquals("1:42: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead",
|
||||
assertEquals("1:42: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead",
|
||||
error("SELECT int FROM test GROUP BY int HAVING int > 2"));
|
||||
}
|
||||
|
||||
public void testHavingOnScalar() {
|
||||
assertEquals("1:42: Cannot filter HAVING on non-aggregate [int]; consider using WHERE instead",
|
||||
assertEquals("1:42: Cannot use HAVING filter on non-aggregate [int]; use WHERE instead",
|
||||
error("SELECT int FROM test GROUP BY int HAVING 2 < ABS(int)"));
|
||||
}
|
||||
|
||||
|
@ -474,4 +474,15 @@ public class VerifierErrorMessagesTests extends ESTestCase {
|
|||
": expected data type [KEYWORD], value provided is of type [INTEGER]",
|
||||
error("SELECT * FROM test WHERE " + arbirtraryArgsfunction + "(null, null, 'foo', 4) > 1"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAggsInWhere() {
|
||||
assertEquals("1:33: Cannot use WHERE filtering on aggregate function [MAX(int)], use HAVING instead",
|
||||
error("SELECT MAX(int) FROM test WHERE MAX(int) > 10 GROUP BY bool"));
|
||||
}
|
||||
|
||||
public void testHistogramInFilter() {
|
||||
assertEquals("1:63: Cannot use WHERE filtering on grouping function [HISTOGRAM(date)], use HAVING instead",
|
||||
error("SELECT HISTOGRAM(date, INTERVAL 1 MONTH) AS h FROM test WHERE "
|
||||
+ "HISTOGRAM(date, INTERVAL 1 MONTH) > CAST('2000-01-01' AS DATE) GROUP BY h"));
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue