Bump 7.x branch to version 7.2.0

This commit adds the 7.2.0 version constant to the 7.x branch, and bumps
BWC logic accordingly.
This commit is contained in:
Jason Tedor 2019-04-09 10:54:36 -04:00
parent c86f797df8
commit 7f3ab4524f
No known key found for this signature in database
GPG Key ID: FA89F05560F16BC5
60 changed files with 137 additions and 131 deletions

View File

@ -1,4 +1,4 @@
elasticsearch = 7.1.0
elasticsearch = 7.2.0
lucene = 8.0.0
bundled_jdk = 12.0.1+12@69cfe15208a647278a19ef0990eea691

View File

@ -1,8 +1,8 @@
:version: 7.1.0
:version: 7.2.0
////
bare_version never includes -alpha or -beta
////
:bare_version: 7.1.0
:bare_version: 7.2.0
:major-version: 7.x
:prev-major-version: 6.x
:lucene_version: 8.0.0

View File

@ -6,10 +6,12 @@
This section summarizes the most important changes in each release. For the
full list, see <<es-release-notes>> and <<breaking-changes>>.
* <<release-highlights-7.2.0>>
* <<release-highlights-7.1.0>>
* <<release-highlights-7.0.0>>
--
include::highlights-7.2.0.asciidoc[]
include::highlights-7.1.0.asciidoc[]
include::highlights-7.0.0.asciidoc[]

View File

@ -23,8 +23,8 @@ setup:
---
"Test use_field":
- skip:
version: " - 7.9.99" # TODO change to 7.0.99 after backport
reason: "Implemented in 7.1"
version: " - 7.1.99"
reason: "Implemented in 7.2"
- do:
search:
index: test

View File

@ -1,8 +1,8 @@
setup:
- skip:
features: headers
version: " - 7.0.99"
reason: "dense_vector functions were introduced in 7.1.0"
version: " - 7.1.99"
reason: "dense_vector functions were introduced in 7.2.0"
- do:
indices.create:

View File

@ -1,8 +1,8 @@
setup:
- skip:
features: headers
version: " - 7.0.99"
reason: "dense_vector functions were introduced in 7.1.0"
version: " - 7.1.99"
reason: "dense_vector functions were introduced in 7.2.0"
- do:
indices.create:

View File

@ -1,7 +1,7 @@
setup:
- skip:
version: " - 7.1.0"
reason: "added in 7.1.0"
version: " - 7.1.99"
reason: "added in 7.2.0"
- do:
indices.create:

View File

@ -1,7 +1,7 @@
setup:
- skip:
version: " - 7.1.0"
reason: "added in 7.1.0"
version: " - 7.1.99"
reason: "added in 7.2.0"
- do:
indices.create:

View File

@ -1,8 +1,8 @@
setup:
- skip:
features: headers
version: " - 7.0.99"
reason: "sparse_vector functions were introduced in 7.1.0"
version: " - 7.1.99"
reason: "sparse_vector functions were introduced in 7.2.0"
- do:
indices.create:

View File

@ -1,8 +1,8 @@
setup:
- skip:
features: headers
version: " - 7.0.99"
reason: "sparse_vector functions were introduced in 7.1.0"
version: " - 7.1.99"
reason: "sparse_vector functions were introduced in 7.2.0"
- do:
indices.create:

View File

@ -1058,7 +1058,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
closeIndex(index);
}
if (getOldClusterVersion().onOrAfter(Version.V_7_1_0)) {
if (getOldClusterVersion().onOrAfter(Version.V_7_2_0)) {
ensureGreenLongWait(index);
assertClosedIndex(index, true);
} else {

View File

@ -365,7 +365,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
}
final Version indexVersionCreated = indexVersionCreated(indexName);
if (indexVersionCreated.onOrAfter(Version.V_7_1_0)) {
if (indexVersionCreated.onOrAfter(Version.V_7_2_0)) {
// index was created on a version that supports the replication of closed indices,
// so we expect the index to be closed and replicated
ensureGreen(indexName);
@ -398,7 +398,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
closeIndex(indexName);
}
if (minimumNodeVersion.onOrAfter(Version.V_7_1_0)) {
if (minimumNodeVersion.onOrAfter(Version.V_7_2_0)) {
// index is created on a version that supports the replication of closed indices,
// so we expect the index to be closed and replicated
ensureGreen(indexName);

View File

@ -50,10 +50,10 @@
)
$/
---
"Test cat indices output for closed index (pre 7.1.0)":
"Test cat indices output for closed index (pre 7.2.0)":
- skip:
version: "7.1.0 - "
reason: "closed indices are replicated starting version 7.1.0"
version: "7.2.0 - "
reason: "closed indices are replicated starting version 7.2.0"
- do:
indices.create:
@ -93,8 +93,8 @@
---
"Test cat indices output for closed index":
- skip:
version: " - 7.0.99"
reason: "closed indices are replicated starting version 7.1.0"
version: " - 7.1.99"
reason: "closed indices are replicated starting version 7.2.0"
- do:
indices.create:

View File

@ -79,8 +79,8 @@
---
"Test cat recovery output for closed index":
- skip:
version: " - 7.0.99"
reason: closed indices are replicated starting version 7.1.0
version: " - 7.1.99"
reason: closed indices are replicated starting version 7.2.0
- do:
indices.create:

View File

@ -1,8 +1,8 @@
---
"Help":
- skip:
version: " - 7.0.99"
reason: external refresh stats were added in 7.1.0
version: " - 7.1.99"
reason: external refresh stats were added in 7.2.0
- do:
cat.shards:
help: true

View File

@ -50,8 +50,8 @@
---
"Cluster shard allocation explanation test with a closed index":
- skip:
version: " - 7.0.99"
reason: closed indices are replicated starting version 7.1.0
version: " - 7.1.99"
reason: closed indices are replicated starting version 7.2.0
- do:
indices.create:

View File

@ -133,10 +133,10 @@
- is_true: indices.test_index.shards
---
"cluster health with closed index (pre 7.1.0)":
"cluster health with closed index (pre 7.2.0)":
- skip:
version: "7.1.0 - "
reason: "closed indices are replicated starting version 7.1.0"
version: "7.2.0 - "
reason: "closed indices are replicated starting version 7.2.0"
- do:
indices.create:
@ -209,8 +209,8 @@
---
"cluster health with closed index":
- skip:
version: " - 7.0.99"
reason: "closed indices are replicated starting version 7.1.0"
version: " - 7.1.99"
reason: "closed indices are replicated starting version 7.2.0"
- do:
indices.create:

View File

@ -31,8 +31,8 @@ setup:
---
"cluster health with expand_wildcards":
- skip:
version: " - 7.0.99"
reason: "indices options has been introduced in cluster health request starting version 7.1.0"
version: " - 7.1.99"
reason: "indices options has been introduced in cluster health request starting version 7.2.0"
- do:
cluster.health:

View File

@ -82,8 +82,8 @@
"get cluster stats returns packaging types":
- skip:
version: " - 7.0.99"
reason: "packaging types are added for v7.1.0"
version: " - 7.1.99"
reason: "packaging types are added for v7.2.0"
- do:
cluster.stats: {}

View File

@ -303,8 +303,8 @@ setup:
---
"Field caps with include_unmapped":
- skip:
version: " - 7.0.99"
reason: include_unmapped has been added in 7.1.0
version: " - 7.1.99"
reason: include_unmapped has been added in 7.2.0
- do:
field_caps:

View File

@ -56,8 +56,8 @@
---
"Flush parameters validation":
- skip:
version: " - 7.0.99"
reason: flush parameters validation is introduced in 7.1.0
version: " - 7.1.99"
reason: flush parameters validation is introduced in 7.2.0
- do:
indices.create:
index: test

View File

@ -65,8 +65,9 @@
---
"Close index with wait_for_active_shards set to all":
- skip:
version: " - 7.0.99"
reason: "closed indices are replicated starting version 7.1.0"
version: " - ,
"
reason: "closed indices are replicated starting version 7.2.0"
- do:
indices.create:

View File

@ -42,8 +42,8 @@
---
"Indices recovery test for closed index":
- skip:
version: " - 7.0.99"
reason: closed indices are replicated starting version 7.1.0
version: " - 7.1.99"
reason: closed indices are replicated starting version 7.2.0
- do:
indices.create:

View File

@ -16,7 +16,7 @@ setup:
"Segment Stats":
- skip:
version: " - 7.0.99"
version: " - 7.1.99"
reason: forbid_closed_indices is not supported in ealier version
- do:

View File

@ -445,8 +445,8 @@ setup:
---
"Composite aggregation with unmapped field":
- skip:
version: " - 7.0.99"
reason: starting in 7.1 the composite aggregation handles unmapped fields as keywords
version: " - 7.1.99"
reason: starting in 7.2 the composite aggregation handles unmapped fields as keywords
- do:
search:
@ -526,8 +526,8 @@ setup:
---
"Missing source":
- skip:
version: " - 7.0.99"
reason: null/empty sources disallowed in 7.1
version: " - 7.1.99"
reason: null/empty sources disallowed in 7.2
- do:
catch: /Composite \[sources\] cannot be null or empty/
@ -555,8 +555,8 @@ setup:
---
"Duplicate sources":
- skip:
version: " - 7.0.99"
reason: duplicate names disallowed in 7.1
version: " - 7.1.99"
reason: duplicate names disallowed in 7.2
- do:
catch: /Composite source names must be unique, found duplicates[:] \[keyword\]/

View File

@ -326,8 +326,8 @@ setup:
---
"Test overlapping":
- skip:
version: " - 7.9.99"
reason: "Implemented in 7.1"
version: " - 7.1.99"
reason: "Implemented in 7.2"
- do:
search:
index: test
@ -349,8 +349,8 @@ setup:
---
"Test before":
- skip:
version: " - 7.9.99"
reason: "Implemented in 7.1"
version: " - 7.1.99"
reason: "Implemented in 7.2"
- do:
search:
index: test
@ -369,8 +369,8 @@ setup:
---
"Test after":
- skip:
version: " - 7.9.99"
reason: "Implemented in 7.1"
version: " - 7.1.99"
reason: "Implemented in 7.2"
- do:
search:
index: test

View File

@ -1,7 +1,7 @@
setup:
- skip:
version: " - 7.0.99"
reason: "Implemented in 7.1"
version: " - 7.1.99"
reason: "Implemented in 7.2"
- do:
indices.create:

View File

@ -1,7 +1,7 @@
setup:
- skip:
version: " - 7.1.0"
reason: "added in 7.1.0"
version: " - 7.1.99"
reason: "added in 7.2.0"
- do:
indices.create:

View File

@ -134,8 +134,9 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static final Version V_7_0_1 = new Version(V_7_0_1_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final int V_7_1_0_ID = 7010099;
public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version CURRENT = V_7_1_0;
public static final int V_7_2_0_ID = 7020099;
public static final Version V_7_2_0 = new Version(V_7_2_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
public static final Version CURRENT = V_7_2_0;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@ -148,6 +149,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
public static Version fromId(int id) {
switch (id) {
case V_7_2_0_ID:
return V_7_2_0;
case V_7_1_0_ID:
return V_7_1_0;
case V_7_0_1_ID:

View File

@ -84,7 +84,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
waitForNoInitializingShards = in.readBoolean();
}
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
indicesOptions = IndicesOptions.readIndicesOptions(in);
} else {
indicesOptions = IndicesOptions.lenientExpandOpen();
@ -121,7 +121,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
out.writeBoolean(waitForNoInitializingShards);
}
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
indicesOptions.writeIndicesOptions(out);
}
}

View File

@ -118,7 +118,7 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
super.readFrom(in);
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
waitForActiveShards = ActiveShardCount.readFrom(in);
} else {
waitForActiveShards = ActiveShardCount.NONE;
@ -130,7 +130,7 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
super.writeTo(out);
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
waitForActiveShards.writeTo(out);
}
}

View File

@ -37,7 +37,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse {
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
readShardsAcknowledged(in);
}
}
@ -45,7 +45,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
writeShardsAcknowledged(out);
}
}

View File

@ -64,7 +64,7 @@ public class CommonStatsFlags implements Writeable, Cloneable {
fieldDataFields = in.readStringArray();
completionDataFields = in.readStringArray();
includeSegmentFileSizes = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
includeUnloadedSegments = in.readBoolean();
}
}
@ -82,7 +82,7 @@ public class CommonStatsFlags implements Writeable, Cloneable {
out.writeStringArrayNullable(fieldDataFields);
out.writeStringArrayNullable(completionDataFields);
out.writeBoolean(includeSegmentFileSizes);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeBoolean(includeUnloadedSegments);
}
}

View File

@ -84,7 +84,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
mergeResults = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
includeUnmapped = in.readBoolean();
} else {
includeUnmapped = false;
@ -98,7 +98,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
out.writeStringArray(indices);
indicesOptions.writeIndicesOptions(out);
out.writeBoolean(mergeResults);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeBoolean(includeUnmapped);
}
}

View File

@ -108,7 +108,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
indices = in.readStringArray();
} else {
indices = Strings.EMPTY_ARRAY;
@ -124,7 +124,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeStringArray(indices);
}
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);

View File

@ -719,7 +719,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
} else {
settingsVersion = 1;
}
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
aliasesVersion = in.readVLong();
} else {
aliasesVersion = 1;
@ -755,7 +755,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
out.writeVLong(settingsVersion);
}
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeVLong(aliasesVersion);
}
out.writeByte(state.id);
@ -803,7 +803,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
} else {
builder.settingsVersion(1);
}
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
builder.aliasesVersion(in.readVLong());
} else {
builder.aliasesVersion(1);
@ -860,7 +860,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
if (out.getVersion().onOrAfter(Version.V_6_5_0)) {
out.writeVLong(settingsVersion);
}
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeVLong(aliasesVersion);
}
out.writeInt(routingNumShards);
@ -1462,8 +1462,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_6_5_0)) {
assert settingsVersion : "settings version should be present for indices created on or after 6.5.0";
}
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_7_1_0)) {
assert aliasesVersion : "aliases version should be present for indices created on or after 7.1.0";
if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(Version.V_7_2_0)) {
assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0";
}
return builder.build();
}

View File

@ -402,7 +402,7 @@ public class MetaDataIndexStateService {
// Remove the index routing table of closed indices if the cluster is in a mixed version
// that does not support the replication of closed indices
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_7_1_0);
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_7_2_0);
final MetaData.Builder metadata = MetaData.builder(currentState.metaData());
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());

View File

@ -130,7 +130,7 @@ public class ReadOnlyEngine extends Engine {
// created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction
// that guarantee that all operations have been flushed to Lucene.
final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated();
if (indexVersionCreated.onOrAfter(Version.V_7_1_0) ||
if (indexVersionCreated.onOrAfter(Version.V_7_2_0) ||
(seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO && indexVersionCreated.onOrAfter(Version.V_6_7_0))) {
if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) {
throw new IllegalStateException("Maximum sequence number [" + seqNoStats.getMaxSeqNo()

View File

@ -120,7 +120,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
this.ordered = in.readBoolean();
this.analyzer = in.readOptionalString();
this.filter = in.readOptionalWriteable(IntervalFilter::new);
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
this.useField = in.readOptionalString();
}
else {
@ -186,7 +186,7 @@ public abstract class IntervalsSourceProvider implements NamedWriteable, ToXCont
out.writeBoolean(ordered);
out.writeOptionalString(analyzer);
out.writeOptionalWriteable(filter);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeOptionalString(useField);
}
}

View File

@ -52,7 +52,7 @@ public class RefreshStats implements Streamable, Writeable, ToXContentFragment {
public RefreshStats(StreamInput in) throws IOException {
total = in.readVLong();
totalTimeInMillis = in.readVLong();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
externalTotal = in.readVLong();
externalTotalTimeInMillis = in.readVLong();
}
@ -63,7 +63,7 @@ public class RefreshStats implements Streamable, Writeable, ToXContentFragment {
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(total);
out.writeVLong(totalTimeInMillis);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeVLong(externalTotal);
out.writeVLong(externalTotalTimeInMillis);
}

View File

@ -52,7 +52,7 @@ public class RecoveryCleanFilesRequest extends TransportRequest {
shardId = ShardId.readShardId(in);
snapshotFiles = new Store.MetadataSnapshot(in);
totalTranslogOps = in.readVInt();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
globalCheckpoint = in.readZLong();
} else {
globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO;
@ -66,7 +66,7 @@ public class RecoveryCleanFilesRequest extends TransportRequest {
shardId.writeTo(out);
snapshotFiles.writeTo(out);
out.writeVInt(totalTranslogOps);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeZLong(globalCheckpoint);
}
}

View File

@ -289,7 +289,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
state().getTranslog().totalOperations(totalTranslogOps);
indexShard().openEngineAndSkipTranslogRecovery();
assert indexShard.getGlobalCheckpoint() >= indexShard.seqNoStats().getMaxSeqNo() ||
indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0)
indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0)
: "global checkpoint is not initialized [" + indexShard.seqNoStats() + "]";
return null;
});
@ -400,7 +400,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
store.ensureIndexHasHistoryUUID();
}
assert globalCheckpoint >= Long.parseLong(sourceMetaData.getCommitUserData().get(SequenceNumbers.MAX_SEQ_NO))
|| indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_1_0) :
|| indexShard.indexSettings().getIndexVersionCreated().before(Version.V_7_2_0) :
"invalid global checkpoint[" + globalCheckpoint + "] source_meta_data [" + sourceMetaData.getCommitUserData() + "]";
final String translogUUID = Translog.createEmptyTranslog(
indexShard.shardPath().resolveTranslog(), globalCheckpoint, shardId, indexShard.getPendingPrimaryTerm());

View File

@ -121,7 +121,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest {
} else {
retentionLeases = RetentionLeases.EMPTY;
}
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
mappingVersionOnPrimary = in.readVLong();
} else {
mappingVersionOnPrimary = Long.MAX_VALUE;
@ -144,7 +144,7 @@ public class RecoveryTranslogOperationsRequest extends TransportRequest {
if (out.getVersion().onOrAfter(Version.V_6_7_0)) {
retentionLeases.writeTo(out);
}
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeVLong(mappingVersionOnPrimary);
}
}

View File

@ -70,7 +70,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
super(in);
count = in.readVLong();
if (in.readBoolean()) {
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
centroid = new GeoPoint(in.readDouble(), in.readDouble());
} else {
final long hash = in.readLong();
@ -87,7 +87,7 @@ public class InternalGeoCentroid extends InternalAggregation implements GeoCentr
out.writeVLong(count);
if (centroid != null) {
out.writeBoolean(true);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeDouble(centroid.lat());
out.writeDouble(centroid.lon());
} else {

View File

@ -286,7 +286,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
if (hasAggs = in.readBoolean()) {
aggregations = InternalAggregations.readAggregations(in);
}
if (in.getVersion().before(Version.V_7_1_0)) {
if (in.getVersion().before(Version.V_7_2_0)) {
List<SiblingPipelineAggregator> pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream()
.map(a -> (SiblingPipelineAggregator) a).collect(Collectors.toList());
if (hasAggs && pipelineAggregators.isEmpty() == false) {
@ -339,7 +339,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
out.writeBoolean(true);
aggregations.writeTo(out);
}
if (out.getVersion().before(Version.V_7_1_0)) {
if (out.getVersion().before(Version.V_7_2_0)) {
//Earlier versions expect sibling pipeline aggs separately as they used to be set to QuerySearchResult directly,
//while later versions expect them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of
//InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1 on.

View File

@ -130,7 +130,7 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
nestedSort = in.readOptionalWriteable(NestedSortBuilder::new);
}
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
numericType = in.readOptionalString();
}
}
@ -147,7 +147,7 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
out.writeOptionalWriteable(nestedSort);
}
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeOptionalString(numericType);
}
}

View File

@ -57,9 +57,9 @@ public class ClusterHealthRequestTests extends ESTestCase {
public void testBwcSerialization() throws Exception {
for (int runs = 0; runs < randomIntBetween(5, 20); runs++) {
// Generate a random cluster health request in version < 7.1.0 and serializes it
// Generate a random cluster health request in version < 7.2.0 and serializes it
final BytesStreamOutput out = new BytesStreamOutput();
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0)));
final ClusterHealthRequest expected = randomRequest();
{
@ -112,9 +112,9 @@ public class ClusterHealthRequestTests extends ESTestCase {
// Generate a random cluster health request in current version
final ClusterHealthRequest expected = randomRequest();
// Serialize to node in version < 7.1.0
// Serialize to node in version < 7.2.0
final BytesStreamOutput out = new BytesStreamOutput();
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_2_0)));
expected.writeTo(out);
// Deserialize and check the cluster health request

View File

@ -54,7 +54,7 @@ public class CloseIndexRequestTests extends ESTestCase {
{
final CloseIndexRequest request = randomRequest();
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
request.writeTo(out);
try (StreamInput in = out.bytes().streamInput()) {
@ -77,7 +77,7 @@ public class CloseIndexRequestTests extends ESTestCase {
final CloseIndexRequest deserializedRequest = new CloseIndexRequest();
try (StreamInput in = out.bytes().streamInput()) {
in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
deserializedRequest.readFrom(in);
}
assertEquals(sample.getParentTask(), deserializedRequest.getParentTask());

View File

@ -48,7 +48,7 @@ public class CloseIndexResponseTests extends ESTestCase {
{
final CloseIndexResponse response = randomResponse();
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
response.writeTo(out);
final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse();
@ -65,7 +65,7 @@ public class CloseIndexResponseTests extends ESTestCase {
final CloseIndexResponse deserializedResponse = new CloseIndexResponse();
try (StreamInput in = out.bytes().streamInput()) {
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_2_0)));
deserializedResponse.readFrom(in);
}
assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged()));

View File

@ -140,7 +140,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
.add(new DiscoveryNode("old_node", buildNewFakeTransportAddress(), emptyMap(),
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_0_0))
.add(new DiscoveryNode("new_node", buildNewFakeTransportAddress(), emptyMap(),
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_1_0)))
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_2_0)))
.build();
state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results);

View File

@ -212,7 +212,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6));
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1));
if (Version.CURRENT.onOrAfter(Version.V_7_1_0)) {
if (Version.CURRENT.onOrAfter(Version.V_7_2_0)) {
assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
} else {

View File

@ -47,7 +47,7 @@ public class DataFrameTransform extends AbstractDiffable<DataFrameTransform> imp
@Override
public Version getMinimalSupportedVersion() {
return Version.V_7_1_0;
return Version.V_7_2_0;
}
@Override

View File

@ -43,7 +43,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
realmsUsage = in.readMap();
rolesStoreUsage = in.readMap();
sslUsage = in.readMap();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
tokenServiceUsage = in.readMap();
apiKeyServiceUsage = in.readMap();
}
@ -80,7 +80,7 @@ public class SecurityFeatureSetUsage extends XPackFeatureSet.Usage {
out.writeMap(realmsUsage);
out.writeMap(rolesStoreUsage);
out.writeMap(sslUsage);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeMap(tokenServiceUsage);
out.writeMap(apiKeyServiceUsage);
}

View File

@ -137,7 +137,7 @@ public class PutRoleMappingRequest extends ActionRequest
this.name = in.readString();
this.enabled = in.readBoolean();
this.roles = in.readStringList();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
this.roleTemplates = in.readList(TemplateRoleName::new);
}
this.rules = ExpressionParser.readExpression(in);
@ -151,7 +151,7 @@ public class PutRoleMappingRequest extends ActionRequest
out.writeString(name);
out.writeBoolean(enabled);
out.writeStringCollection(roles);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeList(roleTemplates);
}
ExpressionParser.writeExpression(rules, out);

View File

@ -51,7 +51,7 @@ public class TokensInvalidationResult implements ToXContentObject, Writeable {
this.invalidatedTokens = in.readStringList();
this.previouslyInvalidatedTokens = in.readStringList();
this.errors = in.readList(StreamInput::readException);
if (in.getVersion().before(Version.V_7_1_0)) {
if (in.getVersion().before(Version.V_7_2_0)) {
in.readVInt();
}
}
@ -97,7 +97,7 @@ public class TokensInvalidationResult implements ToXContentObject, Writeable {
out.writeStringCollection(invalidatedTokens);
out.writeStringCollection(previouslyInvalidatedTokens);
out.writeCollection(errors, StreamOutput::writeException);
if (out.getVersion().before(Version.V_7_1_0)) {
if (out.getVersion().before(Version.V_7_2_0)) {
out.writeVInt(5);
}
}

View File

@ -91,7 +91,7 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable {
this.name = in.readString();
this.enabled = in.readBoolean();
this.roles = in.readStringList();
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
if (in.getVersion().onOrAfter(Version.V_7_2_0)) {
this.roleTemplates = in.readList(TemplateRoleName::new);
} else {
this.roleTemplates = Collections.emptyList();
@ -105,7 +105,7 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable {
out.writeString(name);
out.writeBoolean(enabled);
out.writeStringCollection(roles);
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
if (out.getVersion().onOrAfter(Version.V_7_2_0)) {
out.writeList(roleTemplates);
}
ExpressionParser.writeExpression(expression, out);

View File

@ -139,7 +139,7 @@ public class TransportOpenJobAction extends TransportMasterNodeAction<OpenJobAct
boolean isMemoryTrackerRecentlyRefreshed,
Logger logger) {
// TODO: remove in 8.0.0
boolean allNodesHaveDynamicMaxWorkers = clusterState.getNodes().getMinNodeVersion().onOrAfter(Version.V_7_1_0);
boolean allNodesHaveDynamicMaxWorkers = clusterState.getNodes().getMinNodeVersion().onOrAfter(Version.V_7_2_0);
// Try to allocate jobs according to memory usage, but if that's not possible (maybe due to a mixed version cluster or maybe
// because of some weird OS problem) then fall back to the old mechanism of only considering numbers of assigned jobs

View File

@ -180,9 +180,9 @@ public final class TokenService {
private static final String TOKEN_DOC_ID_PREFIX = TOKEN_DOC_TYPE + "_";
static final int MINIMUM_BYTES = VERSION_BYTES + SALT_BYTES + IV_BYTES + 1;
static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue();
static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_1_0;
static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_1_0;
static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_1_0;
static final Version VERSION_TOKENS_INDEX_INTRODUCED = Version.V_7_2_0;
static final Version VERSION_ACCESS_TOKENS_AS_UUIDS = Version.V_7_2_0;
static final Version VERSION_MULTIPLE_CONCURRENT_REFRESHES = Version.V_7_2_0;
// UUIDs are 16 bytes encoded base64 without padding, therefore the length is (16 / 3) * 4 + ((16 % 3) * 8 + 5) / 6 chars
private static final int TOKEN_ID_LENGTH = 22;
private static final Logger logger = LogManager.getLogger(TokenService.class);

View File

@ -288,7 +288,7 @@ public class ExpressionRoleMappingTests extends ESTestCase {
public void testSerialization() throws Exception {
final ExpressionRoleMapping original = randomRoleMapping(true);
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_1_0, null);
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_7_2_0, null);
BytesStreamOutput output = new BytesStreamOutput();
output.setVersion(version);
original.writeTo(output);

View File

@ -884,8 +884,8 @@ setup:
---
"Obsolete Timezone":
- skip:
version: " - 7.0.99"
reason: "IANA TZ deprecations in 7.1"
version: " - 7.1.99"
reason: "IANA TZ deprecations in 7.2"
features: "warnings"
- do:
indices.create:
@ -1032,8 +1032,8 @@ setup:
---
"Obsolete BWC Timezone":
- skip:
version: " - 7.0.99"
reason: "IANA TZ deprecations in 7.1"
version: " - 7.1.99"
reason: "IANA TZ deprecations in 7.2"
- do:
indices.create:
index: tz_rollup