Bump 7.x branch to version 7.2.0
This commit adds the 7.2.0 version constant to the 7.x branch, and bumps BWC logic accordingly.
This commit is contained in:
parent
c86f797df8
commit
7f3ab4524f
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 7.1.0
|
||||
elasticsearch = 7.2.0
|
||||
lucene = 8.0.0
|
||||
|
||||
bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
:version: 7.1.0
|
||||
:version: 7.2.0
|
||||
////
|
||||
bare_version never includes -alpha or -beta
|
||||
////
|
||||
:bare_version: 7.1.0
|
||||
:bare_version: 7.2.0
|
||||
:major-version: 7.x
|
||||
:prev-major-version: 6.x
|
||||
:lucene_version: 8.0.0
|
||||
|
|
|
@ -6,10 +6,12 @@
|
|||
This section summarizes the most important changes in each release. For the
|
||||
full list, see <<es-release-notes>> and <<breaking-changes>>.
|
||||
|
||||
* <<release-highlights-7.2.0>>
|
||||
* <<release-highlights-7.1.0>>
|
||||
* <<release-highlights-7.0.0>>
|
||||
|
||||
--
|
||||
|
||||
include::highlights-7.2.0.asciidoc[]
|
||||
include::highlights-7.1.0.asciidoc[]
|
||||
include::highlights-7.0.0.asciidoc[]
|
|
@ -23,8 +23,8 @@ setup:
|
|||
---
|
||||
"Test use_field":
|
||||
- skip:
|
||||
version: " - 7.9.99" # TODO change to 7.0.99 after backport
|
||||
reason: "Implemented in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "Implemented in 7.2"
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
setup:
|
||||
- skip:
|
||||
features: headers
|
||||
version: " - 7.0.99"
|
||||
reason: "dense_vector functions were introduced in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "dense_vector functions were introduced in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
setup:
|
||||
- skip:
|
||||
features: headers
|
||||
version: " - 7.0.99"
|
||||
reason: "dense_vector functions were introduced in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "dense_vector functions were introduced in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.1.0"
|
||||
reason: "added in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "added in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.1.0"
|
||||
reason: "added in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "added in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
setup:
|
||||
- skip:
|
||||
features: headers
|
||||
version: " - 7.0.99"
|
||||
reason: "sparse_vector functions were introduced in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "sparse_vector functions were introduced in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
setup:
|
||||
- skip:
|
||||
features: headers
|
||||
version: " - 7.0.99"
|
||||
reason: "sparse_vector functions were introduced in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "sparse_vector functions were introduced in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1058,7 +1058,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
closeIndex(index);
|
||||
}
|
||||
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
ensureGreenLongWait(index);
|
||||
assertClosedIndex(index, true);
|
||||
} else {
|
||||
|
|
|
@ -365,7 +365,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
}
|
||||
|
||||
final Version indexVersionCreated = indexVersionCreated(indexName);
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_1_0)) {
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_2_0)) {
|
||||
// index was created on a version that supports the replication of closed indices,
|
||||
// so we expect the index to be closed and replicated
|
||||
ensureGreen(indexName);
|
||||
|
@ -398,7 +398,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
closeIndex(indexName);
|
||||
}
|
||||
|
||||
if (minimumNodeVersion.onOrAfter(Version.V_7_1_0)) {
|
||||
if (minimumNodeVersion.onOrAfter(Version.V_7_2_0)) {
|
||||
// index is created on a version that supports the replication of closed indices,
|
||||
// so we expect the index to be closed and replicated
|
||||
ensureGreen(indexName);
|
||||
|
|
|
@ -50,10 +50,10 @@
|
|||
)
|
||||
$/
|
||||
---
|
||||
"Test cat indices output for closed index (pre 7.1.0)":
|
||||
"Test cat indices output for closed index (pre 7.2.0)":
|
||||
- skip:
|
||||
version: "7.1.0 - "
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
version: "7.2.0 - "
|
||||
reason: "closed indices are replicated starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -93,8 +93,8 @@
|
|||
---
|
||||
"Test cat indices output for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "closed indices are replicated starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -79,8 +79,8 @@
|
|||
---
|
||||
"Test cat recovery output for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: closed indices are replicated starting version 7.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
"Help":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: external refresh stats were added in 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: external refresh stats were added in 7.2.0
|
||||
- do:
|
||||
cat.shards:
|
||||
help: true
|
||||
|
|
|
@ -50,8 +50,8 @@
|
|||
---
|
||||
"Cluster shard allocation explanation test with a closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: closed indices are replicated starting version 7.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -133,10 +133,10 @@
|
|||
- is_true: indices.test_index.shards
|
||||
|
||||
---
|
||||
"cluster health with closed index (pre 7.1.0)":
|
||||
"cluster health with closed index (pre 7.2.0)":
|
||||
- skip:
|
||||
version: "7.1.0 - "
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
version: "7.2.0 - "
|
||||
reason: "closed indices are replicated starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -209,8 +209,8 @@
|
|||
---
|
||||
"cluster health with closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "closed indices are replicated starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -31,8 +31,8 @@ setup:
|
|||
---
|
||||
"cluster health with expand_wildcards":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "indices options has been introduced in cluster health request starting version 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "indices options has been introduced in cluster health request starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
|
|
@ -82,8 +82,8 @@
|
|||
"get cluster stats returns packaging types":
|
||||
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "packaging types are added for v7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "packaging types are added for v7.2.0"
|
||||
|
||||
- do:
|
||||
cluster.stats: {}
|
||||
|
|
|
@ -303,8 +303,8 @@ setup:
|
|||
---
|
||||
"Field caps with include_unmapped":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: include_unmapped has been added in 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: include_unmapped has been added in 7.2.0
|
||||
|
||||
- do:
|
||||
field_caps:
|
||||
|
|
|
@ -56,8 +56,8 @@
|
|||
---
|
||||
"Flush parameters validation":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: flush parameters validation is introduced in 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: flush parameters validation is introduced in 7.2.0
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
|
|
|
@ -65,8 +65,9 @@
|
|||
---
|
||||
"Close index with wait_for_active_shards set to all":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
version: " - ,
|
||||
"
|
||||
reason: "closed indices are replicated starting version 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -42,8 +42,8 @@
|
|||
---
|
||||
"Indices recovery test for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
version: " - 7.1.99"
|
||||
reason: closed indices are replicated starting version 7.2.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -16,7 +16,7 @@ setup:
|
|||
"Segment Stats":
|
||||
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
version: " - 7.1.99"
|
||||
reason: forbid_closed_indices is not supported in ealier version
|
||||
|
||||
- do:
|
||||
|
|
|
@ -445,8 +445,8 @@ setup:
|
|||
---
|
||||
"Composite aggregation with unmapped field":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: starting in 7.1 the composite aggregation handles unmapped fields as keywords
|
||||
version: " - 7.1.99"
|
||||
reason: starting in 7.2 the composite aggregation handles unmapped fields as keywords
|
||||
|
||||
- do:
|
||||
search:
|
||||
|
@ -526,8 +526,8 @@ setup:
|
|||
---
|
||||
"Missing source":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: null/empty sources disallowed in 7.1
|
||||
version: " - 7.1.99"
|
||||
reason: null/empty sources disallowed in 7.2
|
||||
|
||||
- do:
|
||||
catch: /Composite \[sources\] cannot be null or empty/
|
||||
|
@ -555,8 +555,8 @@ setup:
|
|||
---
|
||||
"Duplicate sources":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: duplicate names disallowed in 7.1
|
||||
version: " - 7.1.99"
|
||||
reason: duplicate names disallowed in 7.2
|
||||
|
||||
- do:
|
||||
catch: /Composite source names must be unique, found duplicates[:] \[keyword\]/
|
||||
|
|
|
@ -326,8 +326,8 @@ setup:
|
|||
---
|
||||
"Test overlapping":
|
||||
- skip:
|
||||
version: " - 7.9.99"
|
||||
reason: "Implemented in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "Implemented in 7.2"
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
|
@ -349,8 +349,8 @@ setup:
|
|||
---
|
||||
"Test before":
|
||||
- skip:
|
||||
version: " - 7.9.99"
|
||||
reason: "Implemented in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "Implemented in 7.2"
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
|
@ -369,8 +369,8 @@ setup:
|
|||
---
|
||||
"Test after":
|
||||
- skip:
|
||||
version: " - 7.9.99"
|
||||
reason: "Implemented in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "Implemented in 7.2"
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "Implemented in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "Implemented in 7.2"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
setup:
|
||||
- skip:
|
||||
version: " - 7.1.0"
|
||||
reason: "added in 7.1.0"
|
||||
version: " - 7.1.99"
|
||||
reason: "added in 7.2.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
|
|
|
@ -134,8 +134,9 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final int V_7_1_0_ID = 7010099;
|
||||
public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final Version CURRENT = V_7_1_0;
|
||||
|
||||
public static final int V_7_2_0_ID = 7020099;
|
||||
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final Version CURRENT = V_7_2_0;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -148,6 +149,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_7_2_0_ID:
|
||||
return V_7_2_0;
|
||||
case V_7_1_0_ID:
|
||||
return V_7_1_0;
|
||||
case V_7_0_1_ID:
|
||||
|
|
|
@ -84,7 +84,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
waitForNoInitializingShards = in.readBoolean();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
} else {
|
||||
indicesOptions = IndicesOptions.lenientExpandOpen();
|
||||
|
@ -121,7 +121,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
|||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeBoolean(waitForNoInitializingShards);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
|
|||
super.readFrom(in);
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
} else {
|
||||
waitForActiveShards = ActiveShardCount.NONE;
|
||||
|
@ -130,7 +130,7 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
waitForActiveShards.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
readShardsAcknowledged(in);
|
||||
}
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
writeShardsAcknowledged(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class CommonStatsFlags implements Writeable, Cloneable {
|
|||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
includeUnloadedSegments = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ public class CommonStatsFlags implements Writeable, Cloneable {
|
|||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeBoolean(includeUnloadedSegments);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
mergeResults = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
includeUnmapped = in.readBoolean();
|
||||
} else {
|
||||
includeUnmapped = false;
|
||||
|
@ -98,7 +98,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeBoolean(mergeResults);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeBoolean(includeUnmapped);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
indices = in.readStringArray();
|
||||
} else {
|
||||
indices = Strings.EMPTY_ARRAY;
|
||||
|
@ -124,7 +124,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeStringArray(indices);
|
||||
}
|
||||
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
||||
|
|
|
@ -719,7 +719,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
} else {
|
||||
settingsVersion = 1;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
aliasesVersion = in.readVLong();
|
||||
} else {
|
||||
aliasesVersion = 1;
|
||||
|
@ -755,7 +755,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
out.writeVLong(settingsVersion);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeVLong(aliasesVersion);
|
||||
}
|
||||
out.writeByte(state.id);
|
||||
|
@ -803,7 +803,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
} else {
|
||||
builder.settingsVersion(1);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
builder.aliasesVersion(in.readVLong());
|
||||
} else {
|
||||
builder.aliasesVersion(1);
|
||||
|
@ -860,7 +860,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
|
||||
out.writeVLong(settingsVersion);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeVLong(aliasesVersion);
|
||||
}
|
||||
out.writeInt(routingNumShards);
|
||||
|
@ -1462,8 +1462,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
|
|||
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) {
|
||||
assert settingsVersion : "settings version should be present for indices created on or after 6.5.0";
|
||||
}
|
||||
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_7_1_0)) {
|
||||
assert aliasesVersion : "aliases version should be present for indices created on or after 7.1.0";
|
||||
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_7_2_0)) {
|
||||
assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0";
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ public class MetaDataIndexStateService {
|
|||
|
||||
// Remove the index routing table of closed indices if the cluster is in a mixed version
|
||||
// that does not support the replication of closed indices
|
||||
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_7_1_0);
|
||||
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_7_2_0);
|
||||
|
||||
final MetaData.Builder metadata = MetaData.builder(currentState.metaData());
|
||||
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
|
|
|
@ -130,7 +130,7 @@ public class ReadOnlyEngine extends Engine {
|
|||
// created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
|
||||
// that guarantee that all operations have been flushed to Lucene.
|
||||
final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_1_0) ||
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_2_0) ||
|
||||
(seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) {
|
||||
if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) {
|
||||
throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo()
|
||||
|
|
|
@ -120,7 +120,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
this.ordered = in.readBoolean();
|
||||
this.analyzer = in.readOptionalString();
|
||||
this.filter = in.readOptionalWriteable(IntervalFilter::new);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
this.useField = in.readOptionalString();
|
||||
}
|
||||
else {
|
||||
|
@ -186,7 +186,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
|
|||
out.writeBoolean(ordered);
|
||||
out.writeOptionalString(analyzer);
|
||||
out.writeOptionalWriteable(filter);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeOptionalString(useField);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class RefreshStats implements Streamable, Writeable, ToXContentFragment {
|
|||
public RefreshStats(StreamInput in) throws IOException {
|
||||
total = in.readVLong();
|
||||
totalTimeInMillis = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
externalTotal = in.readVLong();
|
||||
externalTotalTimeInMillis = in.readVLong();
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ public class RefreshStats implements Streamable, Writeable, ToXContentFragment {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(total);
|
||||
out.writeVLong(totalTimeInMillis);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeVLong(externalTotal);
|
||||
out.writeVLong(externalTotalTimeInMillis);
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ public class RecoveryCleanFilesRequest extends TransportRequest {
|
|||
shardId = ShardId.readShardId(in);
|
||||
snapshotFiles = new Store.MetadataSnapshot(in);
|
||||
totalTranslogOps = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
globalCheckpoint = in.readZLong();
|
||||
} else {
|
||||
globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
|
@ -66,7 +66,7 @@ public class RecoveryCleanFilesRequest extends TransportRequest {
|
|||
shardId.writeTo(out);
|
||||
snapshotFiles.writeTo(out);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeZLong(globalCheckpoint);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
indexShard().openEngineAndSkipTranslogRecovery();
|
||||
assert indexShard.getGlobalCheckpoint() >= indexShard.seqNoStats().getMaxSeqNo() ||
|
||||
indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0)
|
||||
indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0)
|
||||
: "global checkpoint is not initialized [" + indexShard.seqNoStats() + "]";
|
||||
return null;
|
||||
});
|
||||
|
@ -400,7 +400,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
store.ensureIndexHasHistoryUUID();
|
||||
}
|
||||
assert globalCheckpoint >= Long.parseLong(sourceMetaData.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO))
|
||||
|| indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0) :
|
||||
|| indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) :
|
||||
"invalid global checkpoint[" + globalCheckpoint + "] source_meta_data [" + sourceMetaData.getCommitUserData() + "]";
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm());
|
||||
|
|
|
@ -121,7 +121,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest {
|
|||
} else {
|
||||
retentionLeases = RetentionLeases.EMPTY;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
mappingVersionOnPrimary = in.readVLong();
|
||||
} else {
|
||||
mappingVersionOnPrimary = Long.MAX_VALUE;
|
||||
|
@ -144,7 +144,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest {
|
|||
if (out.getVersion().onOrAfter(Version.V_6_7_0)) {
|
||||
retentionLeases.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeVLong(mappingVersionOnPrimary);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
|
|||
super(in);
|
||||
count = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
centroid = new GeoPoint(in.readDouble(), in.readDouble());
|
||||
} else {
|
||||
final long hash = in.readLong();
|
||||
|
@ -87,7 +87,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
|
|||
out.writeVLong(count);
|
||||
if (centroid != null) {
|
||||
out.writeBoolean(true);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeDouble(centroid.lat());
|
||||
out.writeDouble(centroid.lon());
|
||||
} else {
|
||||
|
|
|
@ -286,7 +286,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
if (hasAggs = in.readBoolean()) {
|
||||
aggregations = InternalAggregations.readAggregations(in);
|
||||
}
|
||||
if (in.getVersion().before(Version.V_7_1_0)) {
|
||||
if (in.getVersion().before(Version.V_7_2_0)) {
|
||||
List<SiblingPipelineAggregator> pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream()
|
||||
.map(a -> (SiblingPipelineAggregator) a).collect(Collectors.toList());
|
||||
if (hasAggs && pipelineAggregators.isEmpty() == false) {
|
||||
|
@ -339,7 +339,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
|||
out.writeBoolean(true);
|
||||
aggregations.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().before(Version.V_7_1_0)) {
|
||||
if (out.getVersion().before(Version.V_7_2_0)) {
|
||||
//Earlier versions expect sibling pipeline aggs separately as they used to be set to QuerySearchResult directly,
|
||||
//while later versions expect them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of
|
||||
//InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1 on.
|
||||
|
|
|
@ -130,7 +130,7 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
|||
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
nestedSort = in.readOptionalWriteable(NestedSortBuilder::new);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
numericType = in.readOptionalString();
|
||||
}
|
||||
}
|
||||
|
@ -147,7 +147,7 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
|||
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
|
||||
out.writeOptionalWriteable(nestedSort);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeOptionalString(numericType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,9 +57,9 @@ public class ClusterHealthRequestTests extends ESTestCase {
|
|||
|
||||
public void testBwcSerialization() throws Exception {
|
||||
for (int runs = 0; runs < randomIntBetween(5, 20); runs++) {
|
||||
// Generate a random cluster health request in version < 7.1.0 and serializes it
|
||||
// Generate a random cluster health request in version < 7.2.0 and serializes it
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0)));
|
||||
|
||||
final ClusterHealthRequest expected = randomRequest();
|
||||
{
|
||||
|
@ -112,9 +112,9 @@ public class ClusterHealthRequestTests extends ESTestCase {
|
|||
// Generate a random cluster health request in current version
|
||||
final ClusterHealthRequest expected = randomRequest();
|
||||
|
||||
// Serialize to node in version < 7.1.0
|
||||
// Serialize to node in version < 7.2.0
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0)));
|
||||
expected.writeTo(out);
|
||||
|
||||
// Deserialize and check the cluster health request
|
||||
|
|
|
@ -54,7 +54,7 @@ public class CloseIndexRequestTests extends ESTestCase {
|
|||
{
|
||||
final CloseIndexRequest request = randomRequest();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
|
||||
request.writeTo(out);
|
||||
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
|
@ -77,7 +77,7 @@ public class CloseIndexRequestTests extends ESTestCase {
|
|||
|
||||
final CloseIndexRequest deserializedRequest = new CloseIndexRequest();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
|
||||
deserializedRequest.readFrom(in);
|
||||
}
|
||||
assertEquals(sample.getParentTask(), deserializedRequest.getParentTask());
|
||||
|
|
|
@ -48,7 +48,7 @@ public class CloseIndexResponseTests extends ESTestCase {
|
|||
{
|
||||
final CloseIndexResponse response = randomResponse();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
|
||||
response.writeTo(out);
|
||||
|
||||
final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse();
|
||||
|
@ -65,7 +65,7 @@ public class CloseIndexResponseTests extends ESTestCase {
|
|||
|
||||
final CloseIndexResponse deserializedResponse = new CloseIndexResponse();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
|
||||
deserializedResponse.readFrom(in);
|
||||
}
|
||||
assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
|
||||
|
|
|
@ -140,7 +140,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
|||
.add(new DiscoveryNode("old_node", buildNewFakeTransportAddress(), emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_0_0))
|
||||
.add(new DiscoveryNode("new_node", buildNewFakeTransportAddress(), emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_1_0)))
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_2_0)))
|
||||
.build();
|
||||
|
||||
state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results);
|
||||
|
|
|
@ -212,7 +212,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
|
|||
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6));
|
||||
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1));
|
||||
|
||||
if (Version.CURRENT.onOrAfter(Version.V_7_1_0)) {
|
||||
if (Version.CURRENT.onOrAfter(Version.V_7_2_0)) {
|
||||
assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
|
||||
assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
|
||||
} else {
|
||||
|
|
|
@ -47,7 +47,7 @@ public class DataFrameTransform extends AbstractDiffable<DataFrameTransform> imp
|
|||
|
||||
@Override
|
||||
public Version getMinimalSupportedVersion() {
|
||||
return Version.V_7_1_0;
|
||||
return Version.V_7_2_0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -43,7 +43,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
|
|||
realmsUsage = in.readMap();
|
||||
rolesStoreUsage = in.readMap();
|
||||
sslUsage = in.readMap();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
tokenServiceUsage = in.readMap();
|
||||
apiKeyServiceUsage = in.readMap();
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
|
|||
out.writeMap(realmsUsage);
|
||||
out.writeMap(rolesStoreUsage);
|
||||
out.writeMap(sslUsage);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeMap(tokenServiceUsage);
|
||||
out.writeMap(apiKeyServiceUsage);
|
||||
}
|
||||
|
|
|
@ -137,7 +137,7 @@ public class PutRoleMappingRequest extends ActionRequest
|
|||
this.name = in.readString();
|
||||
this.enabled = in.readBoolean();
|
||||
this.roles = in.readStringList();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
this.roleTemplates = in.readList(TemplateRoleName::new);
|
||||
}
|
||||
this.rules = ExpressionParser.readExpression(in);
|
||||
|
@ -151,7 +151,7 @@ public class PutRoleMappingRequest extends ActionRequest
|
|||
out.writeString(name);
|
||||
out.writeBoolean(enabled);
|
||||
out.writeStringCollection(roles);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeList(roleTemplates);
|
||||
}
|
||||
ExpressionParser.writeExpression(rules, out);
|
||||
|
|
|
@ -51,7 +51,7 @@ public class TokensInvalidationResult implements ToXContentObject, Writeable {
|
|||
this.invalidatedTokens = in.readStringList();
|
||||
this.previouslyInvalidatedTokens = in.readStringList();
|
||||
this.errors = in.readList(StreamInput::readException);
|
||||
if (in.getVersion().before(Version.V_7_1_0)) {
|
||||
if (in.getVersion().before(Version.V_7_2_0)) {
|
||||
in.readVInt();
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ public class TokensInvalidationResult implements ToXContentObject, Writeable {
|
|||
out.writeStringCollection(invalidatedTokens);
|
||||
out.writeStringCollection(previouslyInvalidatedTokens);
|
||||
out.writeCollection(errors, StreamOutput::writeException);
|
||||
if (out.getVersion().before(Version.V_7_1_0)) {
|
||||
if (out.getVersion().before(Version.V_7_2_0)) {
|
||||
out.writeVInt(5);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,7 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable {
|
|||
this.name = in.readString();
|
||||
this.enabled = in.readBoolean();
|
||||
this.roles = in.readStringList();
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
this.roleTemplates = in.readList(TemplateRoleName::new);
|
||||
} else {
|
||||
this.roleTemplates = Collections.emptyList();
|
||||
|
@ -105,7 +105,7 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable {
|
|||
out.writeString(name);
|
||||
out.writeBoolean(enabled);
|
||||
out.writeStringCollection(roles);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
|
||||
out.writeList(roleTemplates);
|
||||
}
|
||||
ExpressionParser.writeExpression(expression, out);
|
||||
|
|
|
@ -139,7 +139,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
|
|||
boolean isMemoryTrackerRecentlyRefreshed,
|
||||
Logger logger) {
|
||||
// TODO: remove in 8.0.0
|
||||
boolean allNodesHaveDynamicMaxWorkers = clusterState.getNodes().getMinNodeVersion().onOrAfter(Version.V_7_1_0);
|
||||
boolean allNodesHaveDynamicMaxWorkers = clusterState.getNodes().getMinNodeVersion().onOrAfter(Version.V_7_2_0);
|
||||
|
||||
// Try to allocate jobs according to memory usage, but if that's not possible (maybe due to a mixed version cluster or maybe
|
||||
// because of some weird OS problem) then fall back to the old mechanism of only considering numbers of assigned jobs
|
||||
|
|
|
@ -180,9 +180,9 @@ public final class TokenService {
|
|||
private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_";
|
||||
static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1;
|
||||
static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue();
|
||||
static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_1_0;
|
||||
static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_1_0;
|
||||
static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_1_0;
|
||||
static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0;
|
||||
static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0;
|
||||
static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0;
|
||||
// UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars
|
||||
private static final int TOKEN_ID_LENGTH = 22;
|
||||
private static final Logger logger = LogManager.getLogger(TokenService.class);
|
||||
|
|
|
@ -288,7 +288,7 @@ public class ExpressionRoleMappingTests extends ESTestCase {
|
|||
public void testSerialization() throws Exception {
|
||||
final ExpressionRoleMapping original = randomRoleMapping(true);
|
||||
|
||||
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_1_0, null);
|
||||
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_2_0, null);
|
||||
BytesStreamOutput output = new BytesStreamOutput();
|
||||
output.setVersion(version);
|
||||
original.writeTo(output);
|
||||
|
|
|
@ -884,8 +884,8 @@ setup:
|
|||
---
|
||||
"Obsolete Timezone":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "IANA TZ deprecations in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "IANA TZ deprecations in 7.2"
|
||||
features: "warnings"
|
||||
- do:
|
||||
indices.create:
|
||||
|
@ -1032,8 +1032,8 @@ setup:
|
|||
---
|
||||
"Obsolete BWC Timezone":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "IANA TZ deprecations in 7.1"
|
||||
version: " - 7.1.99"
|
||||
reason: "IANA TZ deprecations in 7.2"
|
||||
- do:
|
||||
indices.create:
|
||||
index: tz_rollup
|
||||
|
|
Loading…
Reference in New Issue