diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index 1e8b6948ea6..b74f2b71e39 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -56,10 +56,6 @@
-
-
-
-
diff --git a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc
index 3cf06dc6e88..361160ac6ac 100644
--- a/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/common-grams-tokenfilter.asciidoc
@@ -58,12 +58,12 @@ PUT /common_grams_example
"filter": {
"common_grams": {
"type": "common_grams",
- "common_words": ["a", "an", "the"]
+ "common_words": ["the", "is", "a"]
},
"common_grams_query": {
"type": "common_grams",
"query_mode": true,
- "common_words": ["a", "an", "the"]
+ "common_words": ["the", "is", "a"]
}
}
}
@@ -71,3 +71,101 @@ PUT /common_grams_example
}
--------------------------------------------------
// CONSOLE
+
+You can see the output by using e.g. the `_analyze` endpoint:
+
+[source,js]
+--------------------------------------------------
+POST /common_grams_example/_analyze
+{
+ "analyzer" : "index_grams",
+ "text" : "the quick brown is a fox"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[continued]
+
+And the response will be:
+
+[source,js]
+--------------------------------------------------
+{
+ "tokens" : [
+ {
+ "token" : "the",
+ "start_offset" : 0,
+ "end_offset" : 3,
+ "type" : "word",
+ "position" : 0
+ },
+ {
+ "token" : "the_quick",
+ "start_offset" : 0,
+ "end_offset" : 9,
+ "type" : "gram",
+ "position" : 0,
+ "positionLength" : 2
+ },
+ {
+ "token" : "quick",
+ "start_offset" : 4,
+ "end_offset" : 9,
+ "type" : "word",
+ "position" : 1
+ },
+ {
+ "token" : "brown",
+ "start_offset" : 10,
+ "end_offset" : 15,
+ "type" : "word",
+ "position" : 2
+ },
+ {
+ "token" : "brown_is",
+ "start_offset" : 10,
+ "end_offset" : 18,
+ "type" : "gram",
+ "position" : 2,
+ "positionLength" : 2
+ },
+ {
+ "token" : "is",
+ "start_offset" : 16,
+ "end_offset" : 18,
+ "type" : "word",
+ "position" : 3
+ },
+ {
+ "token" : "is_a",
+ "start_offset" : 16,
+ "end_offset" : 20,
+ "type" : "gram",
+ "position" : 3,
+ "positionLength" : 2
+ },
+ {
+ "token" : "a",
+ "start_offset" : 19,
+ "end_offset" : 20,
+ "type" : "word",
+ "position" : 4
+ },
+ {
+ "token" : "a_fox",
+ "start_offset" : 19,
+ "end_offset" : 24,
+ "type" : "gram",
+ "position" : 4,
+ "positionLength" : 2
+ },
+ {
+ "token" : "fox",
+ "start_offset" : 21,
+ "end_offset" : 24,
+ "type" : "word",
+ "position" : 5
+ }
+ ]
+}
+--------------------------------------------------
+// TESTRESPONSE
\ No newline at end of file
diff --git a/docs/reference/query-dsl/intervals-query.asciidoc b/docs/reference/query-dsl/intervals-query.asciidoc
index 790fdf08bfd..27609e85659 100644
--- a/docs/reference/query-dsl/intervals-query.asciidoc
+++ b/docs/reference/query-dsl/intervals-query.asciidoc
@@ -154,6 +154,35 @@ Produces intervals that are not contained by an interval from the filter rule
`not_overlapping`::
Produces intervals that do not overlap with an interval from the filter rule
+[[interval-script-filter]]
+==== Script filters
+
+You can also filter intervals based on their start position, end position and
+internal gap count, using a script. The script has access to an `interval`
+variable, with `start`, `end` and `gaps` methods:
+
+[source,js]
+--------------------------------------------------
+POST _search
+{
+ "query": {
+ "intervals" : {
+ "my_text" : {
+ "match" : {
+ "query" : "hot porridge",
+ "filter" : {
+ "script" : {
+ "source" : "interval.start > 10 && interval.end < 20 && interval.gaps == 0"
+ }
+ }
+ }
+ }
+ }
+ }
+}
+--------------------------------------------------
+// CONSOLE
+
[[interval-minimization]]
==== Minimization
diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt
index d5ced84ebcb..2b4946d1ca8 100644
--- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt
+++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.txt
@@ -234,6 +234,12 @@ class org.elasticsearch.index.similarity.ScriptedSimilarity$Doc {
float getFreq()
}
+class org.elasticsearch.index.query.IntervalFilterScript$Interval {
+ int getStart()
+ int getEnd()
+ int getGaps()
+}
+
# for testing
class org.elasticsearch.painless.FeatureTest no_import {
int z
diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml
new file mode 100644
index 00000000000..8aa10e92a24
--- /dev/null
+++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/90_interval_query_filter.yml
@@ -0,0 +1,46 @@
+setup:
+ - skip:
+ version: " - 6.99.99"
+ reason: "Implemented in 7.0"
+
+ - do:
+ indices.create:
+ index: test
+ body:
+ mappings:
+ test:
+ properties:
+ text:
+ type: text
+ analyzer: standard
+ - do:
+ bulk:
+ refresh: true
+ body:
+ - '{"index": {"_index": "test", "_type": "test", "_id": "1"}}'
+ - '{"text" : "Some like it hot, some like it cold"}'
+ - '{"index": {"_index": "test", "_type": "test", "_id": "2"}}'
+ - '{"text" : "Its cold outside, theres no kind of atmosphere"}'
+ - '{"index": {"_index": "test", "_type": "test", "_id": "3"}}'
+ - '{"text" : "Baby its cold there outside"}'
+ - '{"index": {"_index": "test", "_type": "test", "_id": "4"}}'
+ - '{"text" : "Outside it is cold and wet"}'
+
+---
+"Test filtering by script":
+ - do:
+ search:
+ index: test
+ body:
+ query:
+ intervals:
+ text:
+ match:
+ query: "cold"
+ filter:
+ script:
+ source: "interval.start > 3"
+
+ - match: { hits.total.value: 1 }
+
+
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml
index 016bb7c6046..7b89dd620a9 100644
--- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml
@@ -58,6 +58,21 @@ setup:
- match: {hits.max_score: 2}
- match: {hits.hits.0._score: 2}
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ index: test
+ body:
+ query:
+ query_string:
+ default_field: text
+ query: s*
+ boost: 2
+
+ - match: {hits.total: 1}
+ - match: {hits.max_score: 2}
+ - match: {hits.hits.0._score: 2}
+
- do:
search:
rest_total_hits_as_int: true
diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShape.java b/server/src/main/java/org/apache/lucene/document/XLatLonShape.java
index 4fd92f4508c..87ebb6a753f 100644
--- a/server/src/main/java/org/apache/lucene/document/XLatLonShape.java
+++ b/server/src/main/java/org/apache/lucene/document/XLatLonShape.java
@@ -52,7 +52,6 @@ import static org.apache.lucene.geo.GeoEncodingUtils.encodeLongitude;
* @see PointValues
* @see LatLonDocValuesField
*
- * @lucene.experimental
*/
public class XLatLonShape {
public static final int BYTES = LatLonPoint.BYTES;
diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java
index bcf664719b7..8e30302225f 100644
--- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java
+++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeBoundingBoxQuery.java
@@ -26,7 +26,6 @@ import org.apache.lucene.index.PointValues.Relation;
*
The field must be indexed using
* {@link XLatLonShape#createIndexableFields} added per document.
*
- * @lucene.experimental
**/
final class XLatLonShapeBoundingBoxQuery extends XLatLonShapeQuery {
final XRectangle2D rectangle2D;
diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java
index 90905e0d83f..4bbb077303e 100644
--- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java
+++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeLineQuery.java
@@ -41,7 +41,6 @@ import java.util.Arrays;
*
The field must be indexed using
* {@link XLatLonShape#createIndexableFields} added per document.
*
- * @lucene.experimental
**/
final class XLatLonShapeLineQuery extends XLatLonShapeQuery {
final Line[] lines;
diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java
index 5e97828aae2..5b67d8c0bc9 100644
--- a/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java
+++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapePolygonQuery.java
@@ -31,7 +31,6 @@ import java.util.Arrays;
*
The field must be indexed using
* {@link XLatLonShape#createIndexableFields} added per document.
*
- * @lucene.experimental
**/
final class XLatLonShapePolygonQuery extends XLatLonShapeQuery {
final Polygon[] polygons;
diff --git a/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java b/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java
index 7aded1337e4..f4c67872cdc 100644
--- a/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java
+++ b/server/src/main/java/org/apache/lucene/document/XLatLonShapeQuery.java
@@ -45,7 +45,6 @@ import java.util.Objects;
*
* Note: this class implements the majority of the INTERSECTS, WITHIN, DISJOINT relation logic
*
- * @lucene.experimental
**/
abstract class XLatLonShapeQuery extends Query {
/** field name */
diff --git a/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java b/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java
index af06d0a0e39..0267ba29b86 100644
--- a/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java
+++ b/server/src/main/java/org/apache/lucene/geo/XRectangle2D.java
@@ -38,7 +38,6 @@ import static org.apache.lucene.geo.GeoUtils.orient;
/**
* 2D rectangle implementation containing spatial logic.
*
- * @lucene.internal
*/
public class XRectangle2D {
final byte[] bbox;
diff --git a/server/src/main/java/org/apache/lucene/geo/XTessellator.java b/server/src/main/java/org/apache/lucene/geo/XTessellator.java
index 416b501202b..48091439ba9 100644
--- a/server/src/main/java/org/apache/lucene/geo/XTessellator.java
+++ b/server/src/main/java/org/apache/lucene/geo/XTessellator.java
@@ -64,7 +64,6 @@ import static org.apache.lucene.geo.GeoUtils.orient;
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
* THIS SOFTWARE.
*
- * @lucene.experimental
*/
public final class XTessellator {
// this is a dumb heuristic to control whether we cut over to sorted morton values
diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java
index fec82dd12b3..07df1b646d5 100644
--- a/server/src/main/java/org/elasticsearch/Version.java
+++ b/server/src/main/java/org/elasticsearch/Version.java
@@ -118,6 +118,8 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_6_5_4 = new Version(V_6_5_4_ID, org.apache.lucene.util.Version.LUCENE_7_5_0);
public static final int V_6_6_0_ID = 6060099;
public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0);
+ public static final int V_6_7_0_ID = 6070099;
+ public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0);
public static final int V_7_0_0_ID = 7000099;
public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version CURRENT = V_7_0_0;
@@ -136,6 +138,8 @@ public class Version implements Comparable, ToXContentFragment {
switch (id) {
case V_7_0_0_ID:
return V_7_0_0;
+ case V_6_7_0_ID:
+ return V_6_7_0;
case V_6_6_0_ID:
return V_6_6_0;
case V_6_5_4_ID:
diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
index 5008dbe36c3..11ffad1c93d 100644
--- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
+++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java
@@ -43,6 +43,8 @@ import java.net.URLEncoder;
import java.util.Locale;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
/**
* A base class for the response of a write operation that involves a single doc
@@ -266,8 +268,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
seqNo = in.readZLong();
primaryTerm = in.readVLong();
} else {
- seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- primaryTerm = 0;
+ seqNo = UNASSIGNED_SEQ_NO;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
}
forcedRefresh = in.readBoolean();
result = Result.readFrom(in);
@@ -378,8 +380,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
protected Result result = null;
protected boolean forcedRefresh;
protected ShardInfo shardInfo = null;
- protected Long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- protected Long primaryTerm = 0L;
+ protected Long seqNo = UNASSIGNED_SEQ_NO;
+ protected Long primaryTerm = UNASSIGNED_PRIMARY_TERM;
public ShardId getShardId() {
return shardId;
diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
index fd11b586666..01b9c22b4d1 100644
--- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java
@@ -56,6 +56,7 @@ import java.util.Objects;
import java.util.Set;
import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
/**
* A bulk request holds an ordered {@link IndexRequest}s, {@link DeleteRequest}s and {@link UpdateRequest}s
@@ -351,7 +352,7 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
long version = Versions.MATCH_ANY;
VersionType versionType = VersionType.INTERNAL;
long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- long ifPrimaryTerm = 0;
+ long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
int retryOnConflict = 0;
String pipeline = valueOrDefault(defaultPipeline, globalPipeline);
diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java
index 7d80b769aa5..7fb19bbf9eb 100644
--- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java
@@ -31,12 +31,13 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.MapperService;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
/**
* A request to delete a document from an index based on its type and id. Best created using
@@ -58,8 +59,8 @@ public class DeleteRequest extends ReplicatedWriteRequest
private String routing;
private long version = Versions.MATCH_ANY;
private VersionType versionType = VersionType.INTERNAL;
- private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- private long ifPrimaryTerm = 0;
+ private long ifSeqNo = UNASSIGNED_SEQ_NO;
+ private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
public DeleteRequest() {
}
@@ -116,16 +117,16 @@ public class DeleteRequest extends ReplicatedWriteRequest
validationException = addValidationError("version type [force] may no longer be used", validationException);
}
- if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && (
+ if (ifSeqNo != UNASSIGNED_SEQ_NO && (
versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY
)) {
validationException = addValidationError("compare and write operations can not use versioning", validationException);
}
- if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) {
validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException);
}
- if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) {
validationException =
addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException);
}
@@ -239,7 +240,7 @@ public class DeleteRequest extends ReplicatedWriteRequest
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public DeleteRequest setIfSeqNo(long seqNo) {
- if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) {
throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "].");
}
ifSeqNo = seqNo;
@@ -286,8 +287,8 @@ public class DeleteRequest extends ReplicatedWriteRequest
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
- ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- ifPrimaryTerm = 0;
+ ifSeqNo = UNASSIGNED_SEQ_NO;
+ ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
}
@@ -305,7 +306,7 @@ public class DeleteRequest extends ReplicatedWriteRequest
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeZLong(ifSeqNo);
out.writeVLong(ifPrimaryTerm);
- } else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
+ } else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]";
throw new IllegalStateException(
"sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " +
diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
index 46028a7c036..195f6863b3d 100644
--- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
+++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java
@@ -43,7 +43,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.ShardId;
@@ -54,6 +53,8 @@ import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
/**
* Index request to index a typed JSON document into a specific index and make it searchable. Best
@@ -106,8 +107,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement
private long autoGeneratedTimestamp = UNSET_AUTO_GENERATED_TIMESTAMP;
private boolean isRetry = false;
- private long ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- private long ifPrimaryTerm = 0;
+ private long ifSeqNo = UNASSIGNED_SEQ_NO;
+ private long ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
public IndexRequest() {
@@ -174,7 +175,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement
return validationException;
}
- if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
+ if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
validationException = addValidationError("create operations do not support compare and set. use index instead",
validationException);
return validationException;
@@ -207,15 +208,15 @@ public class IndexRequest extends ReplicatedWriteRequest implement
validationException = addValidationError("pipeline cannot be an empty string", validationException);
}
- if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO && (
+ if (ifSeqNo != UNASSIGNED_SEQ_NO && (
versionType != VersionType.INTERNAL || version != Versions.MATCH_ANY
)) {
validationException = addValidationError("compare and write operations can not use versioning", validationException);
}
- if (ifPrimaryTerm == 0 && ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM && ifSeqNo != UNASSIGNED_SEQ_NO) {
validationException = addValidationError("ifSeqNo is set, but primary term is [0]", validationException);
}
- if (ifPrimaryTerm != 0 && ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM && ifSeqNo == UNASSIGNED_SEQ_NO) {
validationException =
addValidationError("ifSeqNo is unassigned, but primary term is [" + ifPrimaryTerm + "]", validationException);
}
@@ -511,7 +512,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement
* {@link org.elasticsearch.index.engine.VersionConflictEngineException} will be thrown.
*/
public IndexRequest setIfSeqNo(long seqNo) {
- if (seqNo < 0 && seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) {
+ if (seqNo < 0 && seqNo != UNASSIGNED_SEQ_NO) {
throw new IllegalArgumentException("sequence numbers must be non negative. got [" + seqNo + "].");
}
ifSeqNo = seqNo;
@@ -573,8 +574,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement
// generate id if not already provided
if (id == null) {
assert autoGeneratedTimestamp == -1 : "timestamp has already been generated!";
- assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO;
- assert ifPrimaryTerm == 0;
+ assert ifSeqNo == UNASSIGNED_SEQ_NO;
+ assert ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM;
autoGeneratedTimestamp = Math.max(0, System.currentTimeMillis()); // extra paranoia
String uid;
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_beta1)) {
@@ -620,8 +621,8 @@ public class IndexRequest extends ReplicatedWriteRequest implement
ifSeqNo = in.readZLong();
ifPrimaryTerm = in.readVLong();
} else {
- ifSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- ifPrimaryTerm = 0;
+ ifSeqNo = UNASSIGNED_SEQ_NO;
+ ifPrimaryTerm = UNASSIGNED_PRIMARY_TERM;
}
}
@@ -657,7 +658,7 @@ public class IndexRequest extends ReplicatedWriteRequest implement
if (out.getVersion().onOrAfter(Version.V_6_6_0)) {
out.writeZLong(ifSeqNo);
out.writeVLong(ifPrimaryTerm);
- } else if (ifSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO || ifPrimaryTerm != 0) {
+ } else if (ifSeqNo != UNASSIGNED_SEQ_NO || ifPrimaryTerm != UNASSIGNED_PRIMARY_TERM) {
assert false : "setIfMatch [" + ifSeqNo + "], currentDocTem [" + ifPrimaryTerm + "]";
throw new IllegalStateException(
"sequence number based compare and write is not supported until all nodes are on version 7.0 or higher. " +
diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
index 917ff06c573..da481b7a4a8 100644
--- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
+++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java
@@ -91,7 +91,8 @@ final class ExpandSearchPhase extends SearchPhase {
SearchSourceBuilder sourceBuilder = buildExpandSearchSourceBuilder(innerHitBuilder, innerCollapseBuilder)
.query(groupQuery)
.postFilter(searchRequest.source().postFilter());
- SearchRequest groupRequest = buildExpandSearchRequest(searchRequest, sourceBuilder);
+ SearchRequest groupRequest = new SearchRequest(searchRequest);
+ groupRequest.source(sourceBuilder);
multiRequest.add(groupRequest);
}
}
@@ -120,22 +121,6 @@ final class ExpandSearchPhase extends SearchPhase {
}
}
- private SearchRequest buildExpandSearchRequest(SearchRequest orig, SearchSourceBuilder sourceBuilder) {
- SearchRequest groupRequest = new SearchRequest(orig.indices())
- .types(orig.types())
- .source(sourceBuilder)
- .indicesOptions(orig.indicesOptions())
- .requestCache(orig.requestCache())
- .preference(orig.preference())
- .routing(orig.routing())
- .searchType(orig.searchType());
- if (orig.allowPartialSearchResults() != null){
- groupRequest.allowPartialSearchResults(orig.allowPartialSearchResults());
- }
- groupRequest.setMaxConcurrentShardRequests(orig.getMaxConcurrentShardRequests());
- return groupRequest;
- }
-
private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder options, CollapseBuilder innerCollapseBuilder) {
SearchSourceBuilder groupSource = new SearchSourceBuilder();
groupSource.from(options.getFrom());
diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
index e9c8e7e47ea..4894ca1f772 100644
--- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
+++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java
@@ -87,6 +87,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Consumer;
import java.util.function.Supplier;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+
/**
* Base class for requests that should be executed on a primary copy followed by replica copies.
* Subclasses can resolve the target shard and provide implementation for primary and replica operations.
@@ -1248,7 +1250,7 @@ public abstract class TransportReplicationAction<
request = requestSupplier.get();
// null now, but will be populated by reading from the streams
targetAllocationID = null;
- primaryTerm = 0L;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
}
public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
index 559b554b007..e8261ca9f09 100644
--- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
+++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java
@@ -345,17 +345,19 @@ public class InternalClusterInfoService implements ClusterInfoService, LocalNode
});
try {
- nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
+ if (nodeLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) {
+ logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
+ }
} catch (InterruptedException e) {
Thread.currentThread().interrupt(); // restore interrupt status
- logger.warn("Failed to update node information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
}
try {
- indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS);
+ if (indicesLatch.await(fetchTimeout.getMillis(), TimeUnit.MILLISECONDS) == false) {
+ logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
+ }
} catch (InterruptedException e) {
Thread.currentThread().interrupt(); // restore interrupt status
- logger.warn("Failed to update shard information for ClusterInfoUpdateJob within {} timeout", fetchTimeout);
}
ClusterInfo clusterInfo = getClusterInfo();
try {
diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
index 4f13c485fde..e5c46cbb0ee 100644
--- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
+++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java
@@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.MasterNodeChangePredicate;
import org.elasticsearch.cluster.NotMasterException;
+import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService;
@@ -48,7 +49,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
-import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.tasks.Task;
@@ -74,6 +74,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.function.Predicate;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+
public class ShardStateAction {
private static final Logger logger = LogManager.getLogger(ShardStateAction.class);
@@ -603,7 +605,7 @@ public class ShardStateAction {
allocationId = in.readString();
if (in.getVersion().before(Version.V_6_3_0)) {
final long primaryTerm = in.readVLong();
- assert primaryTerm == 0L : "shard is only started by itself: primary term [" + primaryTerm + "]";
+ assert primaryTerm == UNASSIGNED_PRIMARY_TERM : "shard is only started by itself: primary term [" + primaryTerm + "]";
}
this.message = in.readString();
if (in.getVersion().before(Version.V_6_3_0)) {
diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
index 5d23971dddb..06d5cd6f85d 100644
--- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
+++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java
@@ -55,6 +55,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.MapperService;
+import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@@ -1079,6 +1080,7 @@ public class IndexMetaData implements Diffable, ToXContentFragmen
throw new IllegalStateException("you must set the number of shards before setting/reading primary terms");
}
primaryTerms = new long[numberOfShards()];
+ Arrays.fill(primaryTerms, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
}
diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java
index bdb794cbf0a..889721a49e0 100644
--- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java
+++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java
@@ -33,10 +33,12 @@ import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqN
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import java.io.IOException;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
+
/** Utility class to do efficient primary-key (only 1 doc contains the
* given term) lookups by segment, re-using the enums. This class is
@@ -116,18 +118,18 @@ final class PerThreadIDVersionAndSeqNoLookup {
if (seqNos != null && seqNos.advanceExact(docID)) {
seqNo = seqNos.longValue();
} else {
- seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
+ seqNo = UNASSIGNED_SEQ_NO;
}
NumericDocValues terms = context.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
if (terms != null && terms.advanceExact(docID)) {
term = terms.longValue();
} else {
- term = 0;
+ term = UNASSIGNED_PRIMARY_TERM;
}
} else {
- seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- term = 0;
+ seqNo = UNASSIGNED_SEQ_NO;
+ term = UNASSIGNED_PRIMARY_TERM;
}
return new DocIdAndVersion(docID, versions.longValue(), seqNo, term, context.reader(), context.docBase);
} else {
@@ -175,7 +177,7 @@ final class PerThreadIDVersionAndSeqNoLookup {
if (seqNoDV != null && seqNoDV.advanceExact(docID)) {
seqNo = seqNoDV.longValue();
} else {
- seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
+ seqNo = UNASSIGNED_SEQ_NO;
}
final boolean isLive = (liveDocs == null || liveDocs.get(docID));
if (isLive) {
diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
index f4a44a75789..1572548b1b1 100644
--- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
+++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
@@ -132,7 +132,8 @@ public class DiscoveryModule {
transportService, namedWriteableRegistry, allocationService, masterService,
() -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), hostsProvider, clusterApplier,
Randomness.get()));
- discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier));
+ discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier,
+ gatewayMetaState));
for (DiscoveryPlugin plugin : plugins) {
plugin.getDiscoveryTypes(threadPool, transportService, namedWriteableRegistry, masterService, clusterApplier, clusterSettings,
hostsProvider, allocationService, gatewayMetaState).forEach((key, value) -> {
diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java
index c759fbf4d15..238f72f72f4 100644
--- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java
+++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java
@@ -31,12 +31,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterApplier;
import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener;
+import org.elasticsearch.cluster.service.ClusterApplierService;
import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryStats;
+import org.elasticsearch.gateway.GatewayMetaState;
import org.elasticsearch.transport.TransportService;
import java.util.Objects;
@@ -55,12 +57,17 @@ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements D
private volatile ClusterState clusterState;
public SingleNodeDiscovery(final Settings settings, final TransportService transportService,
- final MasterService masterService, final ClusterApplier clusterApplier) {
+ final MasterService masterService, final ClusterApplier clusterApplier,
+ final GatewayMetaState gatewayMetaState) {
super(Objects.requireNonNull(settings));
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
this.transportService = Objects.requireNonNull(transportService);
masterService.setClusterStateSupplier(() -> clusterState);
this.clusterApplier = clusterApplier;
+
+ if (clusterApplier instanceof ClusterApplierService) {
+ ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState);
+ }
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
index f0d01574766..008b8533103 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java
@@ -72,7 +72,6 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.seqno.SeqNoStats;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
@@ -106,6 +105,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.BiFunction;
import java.util.stream.Stream;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
+
public abstract class Engine implements Closeable {
public static final String SYNC_COMMIT_ID = "sync_id";
@@ -147,7 +149,7 @@ public abstract class Engine implements Closeable {
* 1. A primary initializes this marker once using the max_seq_no from its history, then advances when processing an update or delete.
* 2. A replica never advances this marker by itself but only inherits from its primary (via advanceMaxSeqNoOfUpdatesOrDeletes).
*/
- private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO);
+ private final AtomicLong maxSeqNoOfUpdatesOrDeletes = new AtomicLong(UNASSIGNED_SEQ_NO);
protected Engine(EngineConfig engineConfig) {
Objects.requireNonNull(engineConfig.getStore(), "Store must be provided to the engine");
@@ -425,8 +427,8 @@ public abstract class Engine implements Closeable {
protected Result(Operation.TYPE operationType, Mapping requiredMappingUpdate) {
this.operationType = operationType;
this.version = Versions.NOT_FOUND;
- this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- this.term = 0L;
+ this.seqNo = UNASSIGNED_SEQ_NO;
+ this.term = UNASSIGNED_PRIMARY_TERM;
this.failure = null;
this.requiredMappingUpdate = requiredMappingUpdate;
this.resultType = Type.MAPPING_UPDATE_REQUIRED;
@@ -522,7 +524,7 @@ public abstract class Engine implements Closeable {
* use in case of the index operation failed before getting to internal engine
**/
public IndexResult(Exception failure, long version, long term) {
- this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO);
+ this(failure, version, term, UNASSIGNED_SEQ_NO);
}
public IndexResult(Exception failure, long version, long term, long seqNo) {
@@ -554,7 +556,7 @@ public abstract class Engine implements Closeable {
* use in case of the delete operation failed before getting to internal engine
**/
public DeleteResult(Exception failure, long version, long term) {
- this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
+ this(failure, version, term, UNASSIGNED_SEQ_NO, false);
}
public DeleteResult(Exception failure, long version, long term, long seqNo, boolean found) {
@@ -1353,9 +1355,9 @@ public abstract class Engine implements Closeable {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
- assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
+ assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
"ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
- assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) :
+ assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) :
"cas operations are only allowed if origin is primary. get [" + origin + "]";
this.doc = doc;
this.isRetry = isRetry;
@@ -1369,8 +1371,8 @@ public abstract class Engine implements Closeable {
} // TEST ONLY
Index(Term uid, long primaryTerm, ParsedDocument doc, long version) {
- this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL,
- Origin.PRIMARY, System.nanoTime(), -1, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
+ this(uid, doc, UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL,
+ Origin.PRIMARY, System.nanoTime(), -1, false, UNASSIGNED_SEQ_NO, 0);
} // TEST ONLY
public ParsedDocument parsedDoc() {
@@ -1447,9 +1449,9 @@ public abstract class Engine implements Closeable {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
- assert ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
+ assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >=0 :
"ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
- assert (origin == Origin.PRIMARY) || (ifSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && ifPrimaryTerm == 0) :
+ assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) :
"cas operations are only allowed if origin is primary. get [" + origin + "]";
this.type = Objects.requireNonNull(type);
this.id = Objects.requireNonNull(id);
@@ -1458,13 +1460,13 @@ public abstract class Engine implements Closeable {
}
public Delete(String type, String id, Term uid, long primaryTerm) {
- this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL,
- Origin.PRIMARY, System.nanoTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
+ this(type, id, uid, UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL,
+ Origin.PRIMARY, System.nanoTime(), UNASSIGNED_SEQ_NO, 0);
}
public Delete(Delete template, VersionType versionType) {
this(template.type(), template.id(), template.uid(), template.seqNo(), template.primaryTerm(), template.version(),
- versionType, template.origin(), template.startTime(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0);
+ versionType, template.origin(), template.startTime(), UNASSIGNED_SEQ_NO, 0);
}
@Override
diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java
index 5ec448888b3..869bc548f89 100644
--- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java
+++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java
@@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
import org.elasticsearch.index.mapper.SourceFieldMapper;
-import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.search.lookup.SourceLookup;
import java.io.IOException;
@@ -48,6 +47,8 @@ import java.util.Objects;
import static java.util.Collections.emptyMap;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
public class GetResult implements Streamable, Iterable, ToXContentObject {
@@ -82,9 +83,9 @@ public class GetResult implements Streamable, Iterable, ToXConten
this.id = id;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
- assert (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) || (seqNo >= 0 && primaryTerm >= 1) :
+ assert (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) || (seqNo >= 0 && primaryTerm >= 1) :
"seqNo: " + seqNo + " primaryTerm: " + primaryTerm;
- assert exists || (seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO && primaryTerm == 0) :
+ assert exists || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) :
"doc not found but seqNo/primaryTerm are set";
this.version = version;
this.exists = exists;
@@ -239,7 +240,7 @@ public class GetResult implements Streamable, Iterable, ToXConten
}
public XContentBuilder toXContentEmbedded(XContentBuilder builder, Params params) throws IOException {
- if (seqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node
+ if (seqNo != UNASSIGNED_SEQ_NO) { // seqNo may not be assigned if read from an old node
builder.field(_SEQ_NO, seqNo);
builder.field(_PRIMARY_TERM, primaryTerm);
}
@@ -313,8 +314,8 @@ public class GetResult implements Streamable, Iterable, ToXConten
String currentFieldName = parser.currentName();
long version = -1;
- long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- long primaryTerm = 0;
+ long seqNo = UNASSIGNED_SEQ_NO;
+ long primaryTerm = UNASSIGNED_PRIMARY_TERM;
Boolean found = null;
BytesReference source = null;
Map fields = new HashMap<>();
@@ -388,8 +389,8 @@ public class GetResult implements Streamable, Iterable, ToXConten
seqNo = in.readZLong();
primaryTerm = in.readVLong();
} else {
- seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
- primaryTerm = 0L;
+ seqNo = UNASSIGNED_SEQ_NO;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
}
version = in.readLong();
exists = in.readBoolean();
diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
index 5987e167dc9..8a9c141b0af 100644
--- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
+++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java
@@ -32,6 +32,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.Term;
+import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
@@ -40,6 +41,9 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.intervals.IntervalsSource;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.Version;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.settings.Settings;
@@ -360,7 +364,7 @@ public class TextFieldMapper extends FieldMapper {
}
boolean accept(int length) {
- return length >= minChars && length <= maxChars;
+ return length >= minChars - 1 && length <= maxChars;
}
void doXContent(XContentBuilder builder) throws IOException {
@@ -370,6 +374,22 @@ public class TextFieldMapper extends FieldMapper {
builder.endObject();
}
+ @Override
+ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) {
+ if (value.length() >= minChars) {
+ return super.termQuery(value, context);
+ }
+ List automata = new ArrayList<>();
+ automata.add(Automata.makeString(value));
+ for (int i = value.length(); i < minChars; i++) {
+ automata.add(Automata.makeAnyChar());
+ }
+ Automaton automaton = Operations.concatenate(automata);
+ AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton);
+ query.setRewriteMethod(method);
+ return query;
+ }
+
@Override
public PrefixFieldType clone() {
return new PrefixFieldType(name(), minChars, maxChars);
@@ -402,7 +422,6 @@ public class TextFieldMapper extends FieldMapper {
@Override
public int hashCode() {
-
return Objects.hash(super.hashCode(), minChars, maxChars);
}
}
@@ -564,7 +583,7 @@ public class TextFieldMapper extends FieldMapper {
if (prefixFieldType == null || prefixFieldType.accept(value.length()) == false) {
return super.prefixQuery(value, method, context);
}
- Query tq = prefixFieldType.termQuery(value, context);
+ Query tq = prefixFieldType.prefixQuery(value, method, context);
if (method == null || method == MultiTermQuery.CONSTANT_SCORE_REWRITE
|| method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) {
return new ConstantScoreQuery(tq);
diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java
new file mode 100644
index 00000000000..306560b3d5f
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/query/IntervalFilterScript.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.index.query;
+
+import org.apache.lucene.search.intervals.IntervalIterator;
+import org.elasticsearch.script.ScriptContext;
+
+/**
+ * Base class for scripts used as interval filters, see {@link IntervalsSourceProvider.IntervalFilter}
+ */
+public abstract class IntervalFilterScript {
+
+ public static class Interval {
+
+ private IntervalIterator iterator;
+
+ void setIterator(IntervalIterator iterator) {
+ this.iterator = iterator;
+ }
+
+ public int getStart() {
+ return iterator.start();
+ }
+
+ public int getEnd() {
+ return iterator.end();
+ }
+
+ public int getGaps() {
+ return iterator.gaps();
+ }
+ }
+
+ public abstract boolean execute(Interval interval);
+
+ public interface Factory {
+ IntervalFilterScript newInstance();
+ }
+
+ public static final String[] PARAMETERS = new String[]{ "interval" };
+ public static final ScriptContext CONTEXT = new ScriptContext<>("interval", Factory.class);
+
+}
diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
index 79bcbe26fbc..3fa608db37e 100644
--- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
+++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java
@@ -19,6 +19,8 @@
package org.elasticsearch.index.query;
+import org.apache.lucene.search.intervals.FilteredIntervalsSource;
+import org.apache.lucene.search.intervals.IntervalIterator;
import org.apache.lucene.search.intervals.Intervals;
import org.apache.lucene.search.intervals.IntervalsSource;
import org.elasticsearch.common.ParseField;
@@ -34,6 +36,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MappedFieldType;
+import org.elasticsearch.script.Script;
import java.io.IOException;
import java.util.ArrayList;
@@ -387,24 +390,59 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
}
}
+ static class ScriptFilterSource extends FilteredIntervalsSource {
+
+ final IntervalFilterScript script;
+ IntervalFilterScript.Interval interval = new IntervalFilterScript.Interval();
+
+ ScriptFilterSource(IntervalsSource in, String name, IntervalFilterScript script) {
+ super("FILTER(" + name + ")", in);
+ this.script = script;
+ }
+
+ @Override
+ protected boolean accept(IntervalIterator it) {
+ interval.setIterator(it);
+ return script.execute(interval);
+ }
+ }
+
public static class IntervalFilter implements ToXContent, Writeable {
public static final String NAME = "filter";
private final String type;
private final IntervalsSourceProvider filter;
+ private final Script script;
public IntervalFilter(IntervalsSourceProvider filter, String type) {
this.filter = filter;
this.type = type.toLowerCase(Locale.ROOT);
+ this.script = null;
+ }
+
+ IntervalFilter(Script script) {
+ this.script = script;
+ this.type = "script";
+ this.filter = null;
}
public IntervalFilter(StreamInput in) throws IOException {
this.type = in.readString();
- this.filter = in.readNamedWriteable(IntervalsSourceProvider.class);
+ this.filter = in.readOptionalNamedWriteable(IntervalsSourceProvider.class);
+ if (in.readBoolean()) {
+ this.script = new Script(in);
+ }
+ else {
+ this.script = null;
+ }
}
public IntervalsSource filter(IntervalsSource input, QueryShardContext context, MappedFieldType fieldType) throws IOException {
+ if (script != null) {
+ IntervalFilterScript ifs = context.getScriptService().compile(script, IntervalFilterScript.CONTEXT).newInstance();
+ return new ScriptFilterSource(input, script.getIdOrCode(), ifs);
+ }
IntervalsSource filterSource = filter.getSource(context, fieldType);
switch (type) {
case "containing":
@@ -439,7 +477,14 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(type);
- out.writeNamedWriteable(filter);
+ out.writeOptionalNamedWriteable(filter);
+ if (script == null) {
+ out.writeBoolean(false);
+ }
+ else {
+ out.writeBoolean(true);
+ script.writeTo(out);
+ }
}
@Override
@@ -458,6 +503,13 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
throw new ParsingException(parser.getTokenLocation(), "Expected [FIELD_NAME] but got [" + parser.currentToken() + "]");
}
String type = parser.currentName();
+ if (Script.SCRIPT_PARSE_FIELD.match(type, parser.getDeprecationHandler())) {
+ Script script = Script.parse(parser);
+ if (parser.nextToken() != XContentParser.Token.END_OBJECT) {
+ throw new ParsingException(parser.getTokenLocation(), "Expected [END_OBJECT] but got [" + parser.currentToken() + "]");
+ }
+ return new IntervalFilter(script);
+ }
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new ParsingException(parser.getTokenLocation(), "Expected [START_OBJECT] but got [" + parser.currentToken() + "]");
}
@@ -475,4 +527,6 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
}
}
+
+
}
diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java
index 7cffc8c1ac9..6336e83338f 100644
--- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java
+++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java
@@ -41,6 +41,11 @@ public class SequenceNumbers {
*/
public static final long NO_OPS_PERFORMED = -1L;
+ /**
+ * Represents an unassigned primary term (e.g., when a primary shard was not yet allocated)
+ */
+ public static final long UNASSIGNED_PRIMARY_TERM = 0L;
+
/**
* Reads the sequence number stats from the commit data (maximum sequence number and local checkpoint).
*
diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
index c61718332cd..bfd9e31abcd 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java
@@ -20,6 +20,7 @@
package org.elasticsearch.index.translog;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
+import org.elasticsearch.index.seqno.SequenceNumbers;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -113,7 +114,7 @@ public abstract class BaseTranslogReader implements Comparable getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) {
+ if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != SequenceNumbers.UNASSIGNED_PRIMARY_TERM) {
throw new TranslogCorruptedException(
path.toString(),
"operation's term is newer than translog header term; " +
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java
index d80a6729d30..d47f0d9d7fd 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java
@@ -34,6 +34,8 @@ import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
+
/**
* Each translog file is started with a translog header then followed by translog operations.
*/
@@ -45,8 +47,6 @@ final class TranslogHeader {
public static final int VERSION_PRIMARY_TERM = 3; // added primary term
public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM;
- public static final long UNKNOWN_PRIMARY_TERM = 0L;
-
private final String translogUUID;
private final long primaryTerm;
private final int headerSizeInBytes;
@@ -146,7 +146,7 @@ final class TranslogHeader {
primaryTerm = in.readLong();
} else {
assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]";
- primaryTerm = UNKNOWN_PRIMARY_TERM;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
}
// Verify the checksum
if (version >= VERSION_PRIMARY_TERM) {
diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java
index 87600f4441b..133055f2917 100644
--- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java
+++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogAction.java
@@ -207,7 +207,7 @@ public class TruncateTranslogAction {
*/
private static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException {
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) {
- TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM);
+ TranslogHeader header = new TranslogHeader(translogUUID, SequenceNumbers.UNASSIGNED_PRIMARY_TERM);
header.write(fc);
return header.sizeInBytes();
}
diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java
index 8e9d162c52e..da61fd98d52 100644
--- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java
+++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java
@@ -21,6 +21,7 @@ package org.elasticsearch.script;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.query.IntervalFilterScript;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.search.aggregations.pipeline.MovingFunctionScript;
@@ -60,7 +61,8 @@ public class ScriptModule {
ScriptedMetricAggContexts.InitScript.CONTEXT,
ScriptedMetricAggContexts.MapScript.CONTEXT,
ScriptedMetricAggContexts.CombineScript.CONTEXT,
- ScriptedMetricAggContexts.ReduceScript.CONTEXT
+ ScriptedMetricAggContexts.ReduceScript.CONTEXT,
+ IntervalFilterScript.CONTEXT
).collect(Collectors.toMap(c -> c.name, Function.identity()));
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java
index 15d5d1ca268..bc87c49dcca 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java
@@ -142,7 +142,8 @@ public class RestoreInfo implements ToXContentObject, Streamable {
return builder;
}
- private static final ObjectParser PARSER = new ObjectParser<>(RestoreInfo.class.getName(), true, RestoreInfo::new);
+ private static final ObjectParser PARSER = new ObjectParser<>(RestoreInfo.class.getName(),
+ true, RestoreInfo::new);
static {
ObjectParser shardsParser = new ObjectParser<>("shards", true, null);
diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index 4c6090758dd..eecac92d63e 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -103,8 +103,8 @@ import static org.elasticsearch.snapshots.SnapshotUtils.filterIndices;
* First {@link #restoreSnapshot(RestoreRequest, org.elasticsearch.action.ActionListener)}
* method reads information about snapshot and metadata from repository. In update cluster state task it checks restore
* preconditions, restores global state if needed, creates {@link RestoreInProgress} record with list of shards that needs
- * to be restored and adds this shard to the routing table using {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)}
- * method.
+ * to be restored and adds this shard to the routing table using
+ * {@link RoutingTable.Builder#addAsRestore(IndexMetaData, SnapshotRecoverySource)} method.
*
* Individual shards are getting restored as part of normal recovery process in
* {@link IndexShard#restoreFromRepository(Repository)} )}
@@ -256,15 +256,18 @@ public class RestoreService implements ClusterStateApplier {
for (Map.Entry indexEntry : indices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
- SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index);
+ SnapshotRecoverySource recoverySource =
+ new SnapshotRecoverySource(restoreUUID, snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
- snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
+ snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData,
+ request.indexSettings, request.ignoreIndexSettings);
try {
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData,
minIndexCompatibilityVersion);
} catch (Exception ex) {
- throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
+ throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be " +
+ "upgraded", ex);
}
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
@@ -274,9 +277,16 @@ public class RestoreService implements ClusterStateApplier {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
- createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings(), currentState, false);
- IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
- indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
+ createIndexService.validateIndexSettings(renamedIndexName,
+ snapshotIndexMetaData.getSettings(),
+ currentState,
+ false);
+ IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData)
+ .state(IndexMetaData.State.OPEN)
+ .index(renamedIndexName);
+ indexMdBuilder.settings(Settings.builder()
+ .put(snapshotIndexMetaData.getSettings())
+ .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
// Remove all aliases - they shouldn't be restored
indexMdBuilder.removeAllAliases();
@@ -296,10 +306,13 @@ public class RestoreService implements ClusterStateApplier {
} else {
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
// Index exists and it's closed - open it in metadata and start recovery
- IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
+ IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData)
+ .state(IndexMetaData.State.OPEN);
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
- indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(), currentIndexMetaData.getMappingVersion() + 1));
- indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(), currentIndexMetaData.getSettingsVersion() + 1));
+ indexMdBuilder.mappingVersion(Math.max(snapshotIndexMetaData.getMappingVersion(),
+ currentIndexMetaData.getMappingVersion() + 1));
+ indexMdBuilder.settingsVersion(Math.max(snapshotIndexMetaData.getSettingsVersion(),
+ currentIndexMetaData.getSettingsVersion() + 1));
if (!request.includeAliases()) {
// Remove all snapshot aliases
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
@@ -314,7 +327,10 @@ public class RestoreService implements ClusterStateApplier {
aliases.add(alias.value);
}
}
- indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
+ indexMdBuilder.settings(Settings.builder()
+ .put(snapshotIndexMetaData.getSettings())
+ .put(IndexMetaData.SETTING_INDEX_UUID,
+ currentIndexMetaData.getIndexUUID()));
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
blocks.updateBlocks(updatedIndexMetaData);
@@ -324,9 +340,12 @@ public class RestoreService implements ClusterStateApplier {
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
if (!ignoreShards.contains(shard)) {
- shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
+ shardsBuilder.put(new ShardId(renamedIndex, shard),
+ new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
} else {
- shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
+ shardsBuilder.put(new ShardId(renamedIndex, shard),
+ new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(),
+ RestoreInProgress.State.FAILURE));
}
}
}
@@ -390,7 +409,9 @@ public class RestoreService implements ClusterStateApplier {
private void checkAliasNameConflicts(Map renamedIndices, Set aliases) {
for (Map.Entry renamedIndex : renamedIndices.entrySet()) {
if (aliases.contains(renamedIndex.getKey())) {
- throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
+ throw new SnapshotRestoreException(snapshot,
+ "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of " +
+ "conflict with an alias with the same name");
}
}
}
@@ -409,28 +430,34 @@ public class RestoreService implements ClusterStateApplier {
if (request.partial()) {
return true;
} else {
- throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
+ throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot " +
+ "restore");
}
} else {
return false;
}
}
- private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
+ private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData,
+ String renamedIndex, boolean partial) {
// Index exist - checking that it's closed
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
- throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index with same name already exists in the cluster. " +
- "Either close or delete the existing index or restore the index under a different name by providing a rename pattern and replacement name");
+ throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because an open index " +
+ "with same name already exists in the cluster. Either close or delete the existing index or restore the " +
+ "index under a different name by providing a rename pattern and replacement name");
}
// Index exist - checking if it's partial restore
if (partial) {
- throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
+ throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such " +
+ "index already exists");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
- throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
- "] shards from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
+ throw new SnapshotRestoreException(snapshot,
+ "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shards " +
+ "from a snapshot of index [" + snapshotIndexMetaData.getIndex().getName() + "] with [" +
+ snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
}
@@ -442,7 +469,10 @@ public class RestoreService implements ClusterStateApplier {
if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
return indexMetaData;
}
- Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
+ Settings normalizedChangeSettings = Settings.builder()
+ .put(changeSettings)
+ .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX)
+ .build();
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
Settings settings = indexMetaData.getSettings();
Set keyFilters = new HashSet<>();
@@ -504,7 +534,8 @@ public class RestoreService implements ClusterStateApplier {
} catch (Exception e) {
- logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
+ logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot",
+ request.repositoryName + ":" + request.snapshotName), e);
listener.onFailure(e);
}
}
@@ -521,12 +552,14 @@ public class RestoreService implements ClusterStateApplier {
if (shardsBuilder == null) {
shardsBuilder = ImmutableOpenMap.builder(entry.shards());
}
- shardsBuilder.put(shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted"));
+ shardsBuilder.put(shardId,
+ new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted"));
}
}
if (shardsBuilder != null) {
ImmutableOpenMap shards = shardsBuilder.build();
- builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(), overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards));
+ builder.add(new RestoreInProgress.Entry(entry.uuid(), entry.snapshot(),
+ overallState(RestoreInProgress.State.STARTED, shards), entry.indices(), shards));
} else {
builder.add(entry);
}
@@ -602,8 +635,8 @@ public class RestoreService implements ClusterStateApplier {
initializedShard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
changes(unassignedShard.recoverySource()).shards.put(
unassignedShard.shardId(),
- new ShardRestoreStatus(null,
- RestoreInProgress.State.FAILURE, "recovery source type changed from snapshot to " + initializedShard.recoverySource())
+ new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE,
+ "recovery source type changed from snapshot to " + initializedShard.recoverySource())
);
}
}
@@ -672,7 +705,8 @@ public class RestoreService implements ClusterStateApplier {
return null;
}
- static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener {
+ static class CleanRestoreStateTaskExecutor implements ClusterStateTaskExecutor,
+ ClusterStateTaskListener {
static class Task {
final String uuid;
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
index b512a570a85..c1692f60617 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
@@ -232,16 +232,18 @@ public final class SnapshotInfo implements Comparable, ToXContent,
private final List shardFailures;
public SnapshotInfo(SnapshotId snapshotId, List indices, SnapshotState state) {
- this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0, Collections.emptyList(), null);
+ this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0,
+ Collections.emptyList(), null);
}
public SnapshotInfo(SnapshotId snapshotId, List indices, SnapshotState state, Version version) {
- this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0, Collections.emptyList(), null);
+ this(snapshotId, indices, state, null, version, 0L, 0L, 0, 0,
+ Collections.emptyList(), null);
}
public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime, Boolean includeGlobalState) {
- this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0,
- Collections.emptyList(), includeGlobalState);
+ this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L,
+ 0, 0, Collections.emptyList(), includeGlobalState);
}
public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime, String reason, long endTime,
@@ -306,7 +308,8 @@ public final class SnapshotInfo implements Comparable, ToXContent,
public static SnapshotInfo incompatible(SnapshotId snapshotId) {
return new SnapshotInfo(snapshotId, Collections.emptyList(), SnapshotState.INCOMPATIBLE,
"the snapshot is incompatible with the current version of Elasticsearch and its exact version is unknown",
- null, 0L, 0L, 0, 0, Collections.emptyList(), null);
+ null, 0L, 0L, 0, 0,
+ Collections.emptyList(), null);
}
/**
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
index 7e590bc4104..40c89f10ccb 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
@@ -116,8 +116,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
private final UpdateSnapshotStatusAction updateSnapshotStatusHandler;
@Inject
- public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool,
- TransportService transportService, IndicesService indicesService,
+ public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService,
+ ThreadPool threadPool, TransportService transportService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings);
this.indicesService = indicesService;
@@ -188,7 +188,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
for (Map.Entry> snapshotShards : snapshotShardsMap.entrySet()) {
Map shards = snapshotShards.getValue();
if (shards.containsKey(shardId)) {
- logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]", shardId, snapshotShards.getKey().getSnapshotId());
+ logger.debug("[{}] shard closing, abort snapshotting for snapshot [{}]",
+ shardId, snapshotShards.getKey().getSnapshotId());
shards.get(shardId).abortIfNotCompleted("shard is closing, aborting");
}
}
@@ -337,7 +338,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
@Override
public void onFailure(Exception e) {
- logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard", shardId, snapshot), e);
+ logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to snapshot shard",
+ shardId, snapshot), e);
failure.set(e);
}
@@ -367,7 +369,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
* @param snapshot snapshot
* @param snapshotStatus snapshot status
*/
- private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId, final IndexShardSnapshotStatus snapshotStatus) {
+ private void snapshot(final IndexShard indexShard, final Snapshot snapshot, final IndexId indexId,
+ final IndexShardSnapshotStatus snapshotStatus) {
final ShardId shardId = indexShard.shardId();
if (indexShard.routingEntry().primary() == false) {
throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary");
@@ -526,7 +529,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
*
* @param request update shard status request
*/
- private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request, ActionListener listener) {
+ private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request,
+ ActionListener listener) {
logger.trace("received updated snapshot restore state [{}]", request);
clusterService.submitStateUpdateTask(
"update snapshot state",
@@ -549,7 +553,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
class SnapshotStateExecutor implements ClusterStateTaskExecutor {
@Override
- public ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception {
+ public ClusterTasksResult
+ execute(ClusterState currentState, List tasks) throws Exception {
final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
if (snapshots != null) {
int changedCount = 0;
@@ -560,7 +565,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
for (UpdateIndexShardSnapshotStatusRequest updateSnapshotState : tasks) {
if (entry.snapshot().equals(updateSnapshotState.snapshot())) {
- logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state());
+ logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(),
+ updateSnapshotState.shardId(), updateSnapshotState.status().state());
if (updated == false) {
shards.putAll(entry.shards());
updated = true;
@@ -588,7 +594,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
if (changedCount > 0) {
logger.trace("changed cluster state triggered by {} snapshot state updates", changedCount);
- final SnapshotsInProgress updatedSnapshots = new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()]));
+ final SnapshotsInProgress updatedSnapshots =
+ new SnapshotsInProgress(entries.toArray(new SnapshotsInProgress.Entry[entries.size()]));
return ClusterTasksResult.builder().successes(tasks).build(
ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, updatedSnapshots).build());
}
@@ -606,8 +613,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
UpdateSnapshotStatusAction(TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(
- SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool, actionFilters,
- indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new
+ SnapshotShardsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, transportService, clusterService, threadPool,
+ actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new
);
}
@@ -622,7 +629,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
}
@Override
- protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, ActionListener listener) throws Exception {
+ protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state,
+ ActionListener listener) throws Exception {
innerUpdateSnapshotState(request, listener);
}
diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
index fa7c757aaca..8c505d20d17 100644
--- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
+++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java
@@ -92,16 +92,20 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed;
*
* A typical snapshot creating process looks like this:
*
- *
On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots is currently running
- * and registers the new snapshot in cluster state
- *
When cluster state is updated the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method
- * kicks in and initializes the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
+ *
On the master node the {@link #createSnapshot(SnapshotRequest, CreateSnapshotListener)} is called and makes sure that no snapshots
+ * is currently running and registers the new snapshot in cluster state
+ *
When cluster state is updated
+ * the {@link #beginSnapshot(ClusterState, SnapshotsInProgress.Entry, boolean, CreateSnapshotListener)} method kicks in and initializes
+ * the snapshot in the repository and then populates list of shards that needs to be snapshotted in cluster state
*
Each data node is watching for these shards and when new shards scheduled for snapshotting appear in the cluster state, data nodes
* start processing them through {@link SnapshotShardsService#processIndexShardSnapshots(ClusterChangedEvent)} method
- *
Once shard snapshot is created data node updates state of the shard in the cluster state using the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
- *
When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot as completed
+ *
Once shard snapshot is created data node updates state of the shard in the cluster state using
+ * the {@link SnapshotShardsService#sendSnapshotShardUpdate(Snapshot, ShardId, ShardSnapshotStatus)} method
+ *
When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot
+ * as completed
*
After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository,
- * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state
+ * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls
+ * {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Exception)} to remove snapshot from cluster state
*
*/
public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier {
@@ -118,7 +122,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
private final CopyOnWriteArrayList snapshotCompletionListeners = new CopyOnWriteArrayList<>();
@Inject
- public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, RepositoriesService repositoriesService, ThreadPool threadPool) {
+ public SnapshotsService(Settings settings, ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
+ RepositoriesService repositoriesService, ThreadPool threadPool) {
super(settings);
this.clusterService = clusterService;
this.indexNameExpressionResolver = indexNameExpressionResolver;
@@ -253,7 +258,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
if (snapshots == null || snapshots.entries().isEmpty()) {
// Store newSnapshot here to be processed in clusterStateProcessed
- List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
+ List indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState,
+ request.indicesOptions(), request.indices()));
logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices);
List snapshotIndices = repositoryData.resolveNewIndices(indices);
newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId),
@@ -393,9 +399,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
if (entry.state() != State.ABORTED) {
// Replace the snapshot that was just intialized
- ImmutableOpenMap shards = shards(currentState, entry.indices());
+ ImmutableOpenMap shards =
+ shards(currentState, entry.indices());
if (!partial) {
- Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData());
+ Tuple, Set> indicesWithMissingShards = indicesWithMissingShards(shards,
+ currentState.metaData());
Set missing = indicesWithMissingShards.v1();
Set closed = indicesWithMissingShards.v2();
if (missing.isEmpty() == false || closed.isEmpty() == false) {
@@ -437,8 +445,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
- removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
+ logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot",
+ snapshot.snapshot().getSnapshotId()), e);
+ removeSnapshotFromClusterState(snapshot.snapshot(), null, e,
+ new CleanupAfterErrorListener(snapshot, true, userCreateSnapshotListener, e));
}
@Override
@@ -471,8 +481,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(Exception e) {
- logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
- removeSnapshotFromClusterState(snapshot.snapshot(), null, e, new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
+ logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]",
+ snapshot.snapshot().getSnapshotId()), e);
+ removeSnapshotFromClusterState(snapshot.snapshot(), null, e,
+ new CleanupAfterErrorListener(snapshot, snapshotCreated, userCreateSnapshotListener, e));
}
});
}
@@ -484,7 +496,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
private final CreateSnapshotListener userCreateSnapshotListener;
private final Exception e;
- CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated, CreateSnapshotListener userCreateSnapshotListener, Exception e) {
+ CleanupAfterErrorListener(SnapshotsInProgress.Entry snapshot, boolean snapshotCreated,
+ CreateSnapshotListener userCreateSnapshotListener, Exception e) {
this.snapshot = snapshot;
this.snapshotCreated = snapshotCreated;
this.userCreateSnapshotListener = userCreateSnapshotListener;
@@ -520,7 +533,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
snapshot.includeGlobalState());
} catch (Exception inner) {
inner.addSuppressed(exception);
- logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository", snapshot.snapshot()), inner);
+ logger.warn(() -> new ParameterizedMessage("[{}] failed to close snapshot in repository",
+ snapshot.snapshot()), inner);
}
}
userCreateSnapshotListener.onFailure(e);
@@ -744,8 +758,10 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
} else {
// TODO: Restart snapshot on another node?
snapshotChanged = true;
- logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId());
- shards.put(shardEntry.key, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
+ logger.warn("failing snapshot of shard [{}] on closed node [{}]",
+ shardEntry.key, shardStatus.nodeId());
+ shards.put(shardEntry.key,
+ new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown"));
}
}
}
@@ -808,7 +824,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
for (final SnapshotsInProgress.Entry snapshot : snapshots.entries()) {
SnapshotsInProgress.Entry updatedSnapshot = snapshot;
if (snapshot.state() == State.STARTED) {
- ImmutableOpenMap shards = processWaitingShards(snapshot.shards(), routingTable);
+ ImmutableOpenMap shards = processWaitingShards(snapshot.shards(),
+ routingTable);
if (shards != null) {
changed = true;
if (!snapshot.state().completed() && completed(shards.values())) {
@@ -831,7 +848,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override
public void onFailure(String source, Exception e) {
- logger.warn(() -> new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
+ logger.warn(() ->
+ new ParameterizedMessage("failed to update snapshot state after shards started from [{}] ", source), e);
}
});
}
@@ -929,12 +947,14 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
* @param shards list of shard statuses
* @return list of failed and closed indices
*/
- private Tuple, Set> indicesWithMissingShards(ImmutableOpenMap shards, MetaData metaData) {
+ private Tuple, Set> indicesWithMissingShards(
+ ImmutableOpenMap shards, MetaData metaData) {
Set missing = new HashSet<>();
Set closed = new HashSet<>();
for (ObjectObjectCursor entry : shards) {
if (entry.value.state() == State.MISSING) {
- if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
+ if (metaData.hasIndex(entry.key.getIndex().getName()) &&
+ metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
closed.add(entry.key.getIndex().getName());
} else {
missing.add(entry.key.getIndex().getName());
@@ -1130,7 +1150,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
public ClusterState execute(ClusterState currentState) throws Exception {
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
- throw new ConcurrentSnapshotExecutionException(snapshot, "cannot delete - another snapshot is currently being deleted");
+ throw new ConcurrentSnapshotExecutionException(snapshot,
+ "cannot delete - another snapshot is currently being deleted");
}
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null) {
@@ -1236,7 +1257,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
listener, true);
} catch (Exception ex) {
- logger.warn(() -> new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
+ logger.warn(() ->
+ new ParameterizedMessage("[{}] failed to delete snapshot", snapshot), ex);
}
}
);
@@ -1384,7 +1406,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
IndexMetaData indexMetaData = metaData.index(indexName);
if (indexMetaData == null) {
// The index was deleted before we managed to start the snapshot - mark it as missing.
- builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0), new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index"));
+ builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0),
+ new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index"));
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
ShardId shardId = new ShardId(indexMetaData.getIndex(), i);
@@ -1397,17 +1420,22 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
if (indexRoutingTable != null) {
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
if (primary == null || !primary.assignedToNode()) {
- builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
+ builder.put(shardId,
+ new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated"));
} else if (primary.relocating() || primary.initializing()) {
- // The WAITING state was introduced in V1.2.0 - don't use it if there are nodes with older version in the cluster
+ // The WAITING state was introduced in V1.2.0 -
+ // don't use it if there are nodes with older version in the cluster
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING));
} else if (!primary.started()) {
- builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, "primary shard hasn't been started yet"));
+ builder.put(shardId,
+ new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING,
+ "primary shard hasn't been started yet"));
} else {
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId()));
}
} else {
- builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing routing table"));
+ builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING,
+ "missing routing table"));
}
}
}
diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java
index ae1caa787d4..99f89548524 100644
--- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java
+++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java
@@ -111,7 +111,6 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
.put(super.nodeSettings(nodeOrdinal))
// manual collection or upon cluster forming.
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
- .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s")
.build();
}
@@ -120,6 +119,11 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class);
}
+ private void setClusterInfoTimeout(String timeValue) {
+ assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
+ .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), timeValue).build()));
+ }
+
public void testClusterInfoServiceCollectsInformation() throws Exception {
internalCluster().startNodes(2);
assertAcked(prepareCreate("test").setSettings(Settings.builder()
@@ -204,6 +208,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
});
}
+ setClusterInfoTimeout("1s");
// timeouts shouldn't clear the info
timeout.set(true);
info = infoService.refresh();
@@ -237,6 +242,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
// check we recover
blockingActionFilter.blockActions();
+ setClusterInfoTimeout("15s");
info = infoService.refresh();
assertNotNull("info should not be null", info);
assertThat(info.getNodeLeastAvailableDiskUsages().size(), equalTo(2));
diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
index 5407cbc54c6..7a1e63245a6 100644
--- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
+++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java
@@ -105,13 +105,13 @@ import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERV
import static org.elasticsearch.node.Node.NODE_NAME_SETTING;
import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME;
import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR;
-import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
+import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
@@ -946,9 +946,10 @@ public class CoordinatorTests extends ESTestCase {
cluster.runFor(DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "committing setting update");
leader.disconnect();
- cluster.runFor(defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING) + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection");
+ cluster.runFor(defaultMillis(FOLLOWER_CHECK_TIMEOUT_SETTING) + defaultMillis(FOLLOWER_CHECK_INTERVAL_SETTING)
+ + DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "detecting disconnection");
- assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), contains(expectedBlock));
+ assertThat(leader.clusterApplier.lastAppliedClusterState.blocks().global(), hasItem(expectedBlock));
// TODO reboot the leader and verify that the same block is applied when it restarts
}
diff --git a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java
index cfcc7796c5e..48fb6947479 100644
--- a/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/SnapshotDisruptionIT.java
@@ -25,7 +25,9 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.cluster.ClusterChangedEvent;
+import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
+import org.elasticsearch.cluster.SnapshotDeletionsInProgress;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException;
import org.elasticsearch.cluster.service.ClusterService;
@@ -72,7 +74,6 @@ public class SnapshotDisruptionIT extends ESIntegTestCase {
.build();
}
- @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/36779")
public void testDisruptionOnSnapshotInitialization() throws Exception {
final String idxName = "test";
final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(3);
@@ -133,11 +134,15 @@ public class SnapshotDisruptionIT extends ESIntegTestCase {
logger.info("--> wait until the snapshot is done");
assertBusy(() -> {
- SnapshotsInProgress snapshots = dataNodeClient().admin().cluster().prepareState().setLocal(false).get().getState()
- .custom(SnapshotsInProgress.TYPE);
+ ClusterState state = dataNodeClient().admin().cluster().prepareState().get().getState();
+ SnapshotsInProgress snapshots = state.custom(SnapshotsInProgress.TYPE);
+ SnapshotDeletionsInProgress snapshotDeletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE);
if (snapshots != null && snapshots.entries().size() > 0) {
logger.info("Current snapshot state [{}]", snapshots.entries().get(0).state());
fail("Snapshot is still running");
+ } else if (snapshotDeletionsInProgress != null && snapshotDeletionsInProgress.hasDeletionsInProgress()) {
+ logger.info("Current snapshot deletion state [{}]", snapshotDeletionsInProgress);
+ fail("Snapshot deletion is still running");
} else {
logger.info("Snapshot is no longer in the cluster state");
}
diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
index c2a61263d3a..31005ea83cd 100644
--- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
+++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java
@@ -167,4 +167,10 @@ public class SingleNodeDiscoveryIT extends ESIntegTestCase {
}
}
+ public void testStatePersistence() throws Exception {
+ createIndex("test");
+ internalCluster().fullRestart();
+ assertTrue(client().admin().indices().prepareExists("test").get().isExists());
+ }
+
}
diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java
index d34d3e9d6a7..c3dfad2d437 100644
--- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java
+++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java
@@ -69,7 +69,7 @@ public class SingleNodeDiscoveryTests extends ESTestCase {
clusterState.set(clusterStateSupplier.get());
listener.onSuccess(source);
}
- });
+ }, null);
discovery.start();
discovery.startInitialJoin();
final DiscoveryNodes nodes = clusterState.get().nodes();
diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
index 05c02773067..696d50a93a9 100644
--- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java
@@ -172,6 +172,7 @@ import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANS
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy;
import static org.hamcrest.CoreMatchers.instanceOf;
@@ -1801,7 +1802,7 @@ public class InternalEngineTests extends EngineTestCase {
int opsPerformed = 0;
long lastOpVersion = currentOpVersion;
long lastOpSeqNo = UNASSIGNED_SEQ_NO;
- long lastOpTerm = 0;
+ long lastOpTerm = UNASSIGNED_PRIMARY_TERM;
final AtomicLong currentTerm = new AtomicLong(1);
BiFunction indexWithVersion = (version, index) -> new Engine.Index(index.uid(), index.parsedDoc(),
UNASSIGNED_SEQ_NO, currentTerm.get(), version, index.versionType(), index.origin(), index.startTime(),
@@ -1900,7 +1901,7 @@ public class InternalEngineTests extends EngineTestCase {
docDeleted = true;
lastOpVersion = result.getVersion();
lastOpSeqNo = UNASSIGNED_SEQ_NO;
- lastOpTerm = 0;
+ lastOpTerm = UNASSIGNED_PRIMARY_TERM;
opsPerformed++;
}
}
@@ -4309,7 +4310,7 @@ public class InternalEngineTests extends EngineTestCase {
final long seqNo;
DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), get.uid());
if (docIdAndSeqNo == null) {
- primaryTerm = 0;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
seqNo = UNASSIGNED_SEQ_NO;
} else {
seqNo = docIdAndSeqNo.seqNo;
diff --git a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java
index 0dc6b2573ea..ad8673d13ea 100644
--- a/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java
+++ b/server/src/test/java/org/elasticsearch/index/get/GetResultTests.java
@@ -44,6 +44,7 @@ import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
import static org.elasticsearch.index.get.DocumentFieldTests.randomDocumentField;
+import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
@@ -209,7 +210,7 @@ public class GetResultTests extends ESTestCase {
}
} else {
seqNo = UNASSIGNED_SEQ_NO;
- primaryTerm = 0;
+ primaryTerm = UNASSIGNED_PRIMARY_TERM;
version = -1;
exists = false;
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
index 56caa94466c..f149620b028 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java
@@ -31,10 +31,8 @@ import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermsEnum;
-import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
-import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
@@ -71,10 +69,8 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
-import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
-import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.core.Is.is;
public class TextFieldMapperTests extends ESSingleNodeTestCase {
@@ -817,18 +813,13 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
public void testIndexPrefixMapping() throws IOException {
- QueryShardContext queryShardContext = indexService.newQueryShardContext(
- randomInt(20), null, () -> {
- throw new UnsupportedOperationException();
- }, null);
-
{
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field")
.field("type", "text")
.field("analyzer", "standard")
.startObject("index_prefixes")
- .field("min_chars", 1)
+ .field("min_chars", 2)
.field("max_chars", 10)
.endObject()
.endObject().endObject()
@@ -837,16 +828,7 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping));
assertEquals(mapping, mapper.mappingSource().toString());
- assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=1:10"));
-
- FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field");
- MappedFieldType fieldType = fieldMapper.fieldType;
-
- Query q = fieldType.prefixQuery("goin", CONSTANT_SCORE_REWRITE, queryShardContext);
-
- assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q);
- q = fieldType.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, queryShardContext);
- assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q);
+ assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:10"));
ParsedDocument doc = mapper.parse(SourceToParse.source("test", "type", "1", BytesReference
.bytes(XContentFactory.jsonBuilder()
@@ -870,17 +852,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
CompressedXContent json = new CompressedXContent(mapping);
DocumentMapper mapper = parser.parse("type", json);
- FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field");
- MappedFieldType fieldType = fieldMapper.fieldType;
+ assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:5"));
- Query q1 = fieldType.prefixQuery("g", CONSTANT_SCORE_REWRITE, queryShardContext);
- assertThat(q1, instanceOf(PrefixQuery.class));
- Query q2 = fieldType.prefixQuery("go", CONSTANT_SCORE_REWRITE, queryShardContext);
- assertThat(q2, instanceOf(ConstantScoreQuery.class));
- Query q5 = fieldType.prefixQuery("going", CONSTANT_SCORE_REWRITE, queryShardContext);
- assertThat(q5, instanceOf(ConstantScoreQuery.class));
- Query q6 = fieldType.prefixQuery("goings", CONSTANT_SCORE_REWRITE, queryShardContext);
- assertThat(q6, instanceOf(PrefixQuery.class));
}
{
@@ -898,10 +871,8 @@ public class TextFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject()
.endObject().endObject());
- IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
- indexService.mapperService()
- .merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE);
- });
+ IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () ->
+ indexService.mapperService().merge("type", new CompressedXContent(illegalMapping), MergeReason.MAPPING_UPDATE));
assertThat(e.getMessage(), containsString("Field [field._index_prefix] is defined twice in [type]"));
}
diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
index 877553bacf9..2af659b6e20 100644
--- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
+++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldTypeTests.java
@@ -20,11 +20,18 @@ package org.elasticsearch.index.mapper;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Term;
+import org.apache.lucene.search.AutomatonQuery;
+import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.PrefixQuery;
+import org.apache.lucene.search.Query;
import org.apache.lucene.search.RegexpQuery;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.Automata;
+import org.apache.lucene.util.automaton.Automaton;
+import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.common.unit.Fuzziness;
import org.junit.Before;
@@ -32,6 +39,8 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
+import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE;
+
public class TextFieldTypeTests extends FieldTypeTestCase {
@Override
protected MappedFieldType createDefaultFieldType() {
@@ -143,4 +152,21 @@ public class TextFieldTypeTests extends FieldTypeTestCase {
() -> ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true));
assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage());
}
+
+ public void testIndexPrefixes() {
+ TextFieldMapper.TextFieldType ft = new TextFieldMapper.TextFieldType();
+ ft.setName("field");
+ ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType("field._index_prefix", 2, 10));
+
+ Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, null);
+ assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field._index_prefix", "goin"))), q);
+
+ q = ft.prefixQuery("internationalisatio", CONSTANT_SCORE_REWRITE, null);
+ assertEquals(new PrefixQuery(new Term("field", "internationalisatio")), q);
+
+ q = ft.prefixQuery("g", CONSTANT_SCORE_REWRITE, null);
+ Automaton automaton
+ = Operations.concatenate(Arrays.asList(Automata.makeChar('g'), Automata.makeAnyChar()));
+ assertEquals(new ConstantScoreQuery(new AutomatonQuery(new Term("field._index_prefix", "g*"), automaton)), q);
+ }
}
diff --git a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
index 06ab542ebc0..11f704e3b12 100644
--- a/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/query/IntervalQueryBuilderTests.java
@@ -25,11 +25,16 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.intervals.IntervalQuery;
import org.apache.lucene.search.intervals.Intervals;
import org.elasticsearch.common.ParsingException;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.script.Script;
+import org.elasticsearch.script.ScriptContext;
+import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import static org.hamcrest.Matchers.equalTo;
@@ -277,4 +282,48 @@ public class IntervalQueryBuilderTests extends AbstractQueryTestCase new IntervalFilterScript() {
+ @Override
+ public boolean execute(Interval interval) {
+ return interval.getStart() > 3;
+ }
+ };
+
+ ScriptService scriptService = new ScriptService(Settings.EMPTY, Collections.emptyMap(), Collections.emptyMap()){
+ @Override
+ @SuppressWarnings("unchecked")
+ public FactoryType compile(Script script, ScriptContext context) {
+ assertEquals(IntervalFilterScript.CONTEXT, context);
+ assertEquals(new Script("interval.start > 3"), script);
+ return (FactoryType) factory;
+ }
+ };
+
+ QueryShardContext baseContext = createShardContext();
+ QueryShardContext context = new QueryShardContext(baseContext.getShardId(), baseContext.getIndexSettings(),
+ null, null, baseContext.getMapperService(), null,
+ scriptService,
+ null, null, null, null, null, null);
+
+ String json = "{ \"intervals\" : { \"" + STRING_FIELD_NAME + "\": { " +
+ "\"match\" : { " +
+ " \"query\" : \"term1\"," +
+ " \"filter\" : { " +
+ " \"script\" : { " +
+ " \"source\" : \"interval.start > 3\" } } } } } }";
+
+ IntervalQueryBuilder builder = (IntervalQueryBuilder) parseQuery(json);
+ Query q = builder.toQuery(context);
+
+
+ IntervalQuery expected = new IntervalQuery(STRING_FIELD_NAME,
+ new IntervalsSourceProvider.ScriptFilterSource(Intervals.term("term1"), "interval.start > 3", null));
+ assertEquals(expected, q);
+
+ }
+
+
}
diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java
index 99e21d47604..7d06e25519a 100644
--- a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java
+++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java
@@ -83,7 +83,7 @@ public class TranslogHeaderTests extends ESTestCase {
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel);
assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID));
- assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM));
+ assertThat(inHeader.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM));
assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position()));
}
expectThrows(TranslogCorruptedException.class, () -> {
diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 619a09cc188..ccb9fb05dfd 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -367,7 +367,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
logger.info("--> start 2 nodes");
Client client = client();
- assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0)));
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
+ .put("number_of_replicas", 0)));
ensureGreen();
logger.info("--> indexing some data");
@@ -392,7 +393,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
- client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
+ .setWaitForCompletion(false)
+ .setIndices("test-idx")
+ .get();
logger.info("--> waiting for block to kick in");
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
@@ -415,7 +419,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
nodes.add(internalCluster().startNode());
Client client = client();
- assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0)));
+ assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
+ .put("number_of_replicas", 0)));
ensureGreen();
logger.info("--> indexing some data");
@@ -443,7 +448,10 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
int numberOfFilesBeforeSnapshot = numberOfFiles(repo);
logger.info("--> snapshot");
- client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
+ client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
+ .setWaitForCompletion(false)
+ .setIndices("test-idx")
+ .get();
logger.info("--> waiting for block to kick in");
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
@@ -509,7 +517,8 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
ensureGreen("test-idx-all");
logger.info("--> create an index that will be closed");
- assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4).put("number_of_replicas", 0)));
+ assertAcked(prepareCreate("test-idx-closed", 1, Settings.builder().put("number_of_shards", 4)
+ .put("number_of_replicas", 0)));
ensureGreen("test-idx-closed");
logger.info("--> indexing some data into test-idx-all");
diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
index 35e813756bc..fcb06acd60a 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
@@ -1345,7 +1345,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
logger.info("--> make sure snapshot doesn't exist");
- assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
+ assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo")
+ .addSnapshots("test-snap-1"), SnapshotMissingException.class);
for (String index : indices) {
assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId())));
@@ -1384,7 +1385,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap-1").get();
logger.info("--> make sure snapshot doesn't exist");
- assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1"), SnapshotMissingException.class);
+ assertThrows(client.admin().cluster().prepareGetSnapshots("test-repo")
+ .addSnapshots("test-snap-1"), SnapshotMissingException.class);
}
public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception {
@@ -2014,7 +2016,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
logger.info("--> waiting for block to kick in");
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
- logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot", blockedNode);
+ logger.info("--> execution was blocked on node [{}], checking snapshot status with specified repository and snapshot",
+ blockedNode);
SnapshotsStatusResponse response = client.admin().cluster().prepareSnapshotStatus("test-repo").execute().actionGet();
assertThat(response.getSnapshots().size(), equalTo(1));
SnapshotStatus snapshotStatus = response.getSnapshots().get(0);
diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
index 9dca64d5b83..2b634385b29 100644
--- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
+++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
@@ -346,7 +346,8 @@ public class MockRepository extends FsRepository {
}
@Override
- public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
+ public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
+ throws IOException {
maybeIOExceptionOrBlock(blobName);
super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
if (RandomizedContext.current().getRandom().nextBoolean()) {
diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
index 0379012d2b8..8044655b44e 100644
--- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
+++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
@@ -21,6 +21,7 @@ package org.elasticsearch.script;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Scorable;
+import org.elasticsearch.index.query.IntervalFilterScript;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Doc;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Field;
import org.elasticsearch.index.similarity.ScriptedSimilarity.Query;
@@ -287,6 +288,9 @@ public class MockScriptEngine implements ScriptEngine {
} else if (context.instanceClazz.equals(ScriptedMetricAggContexts.ReduceScript.class)) {
ScriptedMetricAggContexts.ReduceScript.Factory factory = mockCompiled::createMetricAggReduceScript;
return context.factoryClazz.cast(factory);
+ } else if (context.instanceClazz.equals(IntervalFilterScript.class)) {
+ IntervalFilterScript.Factory factory = mockCompiled::createIntervalFilterScript;
+ return context.factoryClazz.cast(factory);
}
ContextCompiler compiler = contexts.get(context);
if (compiler != null) {
@@ -353,6 +357,15 @@ public class MockScriptEngine implements ScriptEngine {
public ScriptedMetricAggContexts.ReduceScript createMetricAggReduceScript(Map params, List