Remove the need for _UNRELEASED suffix in versions (#24798)
Removes the need for the `_UNRELEASED` suffix on versions by detecting if a version should be unreleased or not based on the versions around it. This should make it simpler to automate the task of adding a new version label.
This commit is contained in:
parent
23fb36cc87
commit
5da8ce8318
59
build.gradle
59
build.gradle
|
@ -62,7 +62,12 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) {
|
|||
}
|
||||
}
|
||||
|
||||
// introspect all versions of ES that may be tested agains for backwards compatibility
|
||||
/* Introspect all versions of ES that may be tested agains for backwards
|
||||
* compatibility. It is *super* important that this logic is the same as the
|
||||
* logic in VersionUtils.java, modulo alphas, betas, and rcs which are ignored
|
||||
* in gradle because they don't have any backwards compatibility guarantees
|
||||
* but are not ignored in VersionUtils.java because the tests expect them not
|
||||
* to be. */
|
||||
Version currentVersion = Version.fromString(VersionProperties.elasticsearch.minus('-SNAPSHOT'))
|
||||
int prevMajor = currentVersion.major - 1
|
||||
File versionFile = file('core/src/main/java/org/elasticsearch/Version.java')
|
||||
|
@ -72,13 +77,14 @@ List<Version> versions = []
|
|||
int prevMinorIndex = -1 // index in the versions list of the last minor from the prev major
|
||||
int lastPrevMinor = -1 // the minor version number from the prev major we most recently seen
|
||||
for (String line : versionLines) {
|
||||
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_UNRELEASED)? .*/
|
||||
/* Note that this skips alphas and betas which is fine because they aren't
|
||||
* compatible with anything. */
|
||||
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+) .*/
|
||||
if (match.matches()) {
|
||||
int major = Integer.parseInt(match.group(1))
|
||||
int minor = Integer.parseInt(match.group(2))
|
||||
int bugfix = Integer.parseInt(match.group(3))
|
||||
boolean unreleased = match.group(4) != null
|
||||
Version foundVersion = new Version(major, minor, bugfix, false, unreleased)
|
||||
Version foundVersion = new Version(major, minor, bugfix, false)
|
||||
if (currentVersion != foundVersion) {
|
||||
versions.add(foundVersion)
|
||||
}
|
||||
|
@ -98,8 +104,11 @@ if (currentVersion.bugfix == 0) {
|
|||
// unreleased version of closest branch. So for those cases, the version includes -SNAPSHOT,
|
||||
// and the bwc distribution will checkout and build that version.
|
||||
Version last = versions[-1]
|
||||
versions[-1] = new Version(last.major, last.minor, last.bugfix,
|
||||
true, last.unreleased)
|
||||
versions[-1] = new Version(last.major, last.minor, last.bugfix, true)
|
||||
if (last.bugfix == 0) {
|
||||
versions[-2] = new Version(
|
||||
versions[-2].major, versions[-2].minor, versions[-2].bugfix, true)
|
||||
}
|
||||
}
|
||||
|
||||
// injecting groovy property variables into all projects
|
||||
|
@ -114,6 +123,44 @@ allprojects {
|
|||
}
|
||||
}
|
||||
|
||||
task('verifyVersions') {
|
||||
description 'Verifies that all released versions that are indexed compatible are listed in Version.java.'
|
||||
group 'Verification'
|
||||
enabled = false == gradle.startParameter.isOffline()
|
||||
doLast {
|
||||
// Read the list from maven central
|
||||
Node xml
|
||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
Set<String> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ })
|
||||
|
||||
// Limit the known versions to those that should be index compatible
|
||||
knownVersions = knownVersions.findAll { Integer.parseInt(it.split('\\.')[0]) >= prevMajor }
|
||||
|
||||
/* Limit the listed versions to those that have been marked as released.
|
||||
* Versions not marked as released don't get the same testing and we want
|
||||
* to make sure that we flip all unreleased versions to released as soon
|
||||
* as possible after release. */
|
||||
Set<String> actualVersions = new TreeSet<>(
|
||||
indexCompatVersions
|
||||
.findAll { false == it.snapshot }
|
||||
.collect { it.toString() })
|
||||
|
||||
// TODO this is almost certainly going to fail on 5.4 when we release 5.5.0
|
||||
|
||||
// Finally, compare!
|
||||
if (!knownVersions.equals(actualVersions)) {
|
||||
throw new GradleException("out-of-date versions\nActual :" +
|
||||
actualVersions + "\nExpected:" + knownVersions +
|
||||
"; update Version.java")
|
||||
}
|
||||
}
|
||||
}
|
||||
task('precommit') {
|
||||
dependsOn(verifyVersions)
|
||||
}
|
||||
|
||||
subprojects {
|
||||
project.afterEvaluate {
|
||||
// include license and notice in jars
|
||||
|
|
|
@ -29,20 +29,14 @@ public class Version {
|
|||
final int bugfix
|
||||
final int id
|
||||
final boolean snapshot
|
||||
/**
|
||||
* Is the vesion listed as {@code _UNRELEASED} in Version.java.
|
||||
*/
|
||||
final boolean unreleased
|
||||
|
||||
public Version(int major, int minor, int bugfix, boolean snapshot,
|
||||
boolean unreleased) {
|
||||
public Version(int major, int minor, int bugfix, boolean snapshot) {
|
||||
this.major = major
|
||||
this.minor = minor
|
||||
this.bugfix = bugfix
|
||||
this.snapshot = snapshot
|
||||
this.id = major * 100000 + minor * 1000 + bugfix * 10 +
|
||||
(snapshot ? 1 : 0)
|
||||
this.unreleased = unreleased
|
||||
}
|
||||
|
||||
public static Version fromString(String s) {
|
||||
|
@ -54,7 +48,7 @@ public class Version {
|
|||
bugfix = bugfix.split('-')[0]
|
||||
}
|
||||
return new Version(parts[0] as int, parts[1] as int, bugfix as int,
|
||||
snapshot, false)
|
||||
snapshot)
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -277,43 +277,3 @@ if (isEclipse == false || project.path == ":core-tests") {
|
|||
check.dependsOn integTest
|
||||
integTest.mustRunAfter test
|
||||
}
|
||||
|
||||
task('verifyVersions') {
|
||||
description 'Verifies that all released versions that are indexed compatible are listed in Version.java.'
|
||||
group 'Verification'
|
||||
enabled = false == gradle.startParameter.isOffline()
|
||||
doLast {
|
||||
// Read the list from maven central
|
||||
Node xml
|
||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
Set<String> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ })
|
||||
|
||||
// Limit the known versions to those that should be wire compatible
|
||||
String currentVersion = versions.elasticsearch.minus('-SNAPSHOT')
|
||||
int prevMajor = Integer.parseInt(currentVersion.split('\\.')[0]) - 1
|
||||
if (prevMajor == 4) {
|
||||
// 4 didn't exist, it was 2.
|
||||
prevMajor = 2;
|
||||
}
|
||||
knownVersions = knownVersions.findAll { Integer.parseInt(it.split('\\.')[0]) >= prevMajor }
|
||||
|
||||
/* Limit the listed versions to those that have been marked as released.
|
||||
* Versions not marked as released don't get the same testing and we want
|
||||
* to make sure that we flip all unreleased versions to released as soon
|
||||
* as possible after release. */
|
||||
Set<String> actualVersions = new TreeSet<>(
|
||||
indexCompatVersions
|
||||
.findAll { false == it.unreleased }
|
||||
.collect { it.toString() })
|
||||
|
||||
// Finally, compare!
|
||||
if (!knownVersions.equals(actualVersions)) {
|
||||
throw new GradleException("out-of-date versions\nActual :" +
|
||||
actualVersions + "\nExpected:" + knownVersions +
|
||||
"; update Version.java")
|
||||
}
|
||||
}
|
||||
}
|
||||
check.dependsOn(verifyVersions)
|
||||
|
|
|
@ -74,15 +74,17 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_3_2 = new Version(V_5_3_2_ID, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_4_0_ID = 5040099;
|
||||
public static final Version V_5_4_0 = new Version(V_5_4_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
||||
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_1);
|
||||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final int V_6_0_0_alpha2_ID_UNRELEASED = 6000002;
|
||||
public static final Version V_6_0_0_alpha2_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha2_UNRELEASED;
|
||||
public static final int V_5_4_1_ID = 5040199;
|
||||
public static final Version V_5_4_1 = new Version(V_5_4_1_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_5_0_ID = 5050099;
|
||||
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final int V_6_0_0_alpha2_ID = 6000002;
|
||||
public static final Version V_6_0_0_alpha2 =
|
||||
new Version(V_6_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha2;
|
||||
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
||||
|
@ -97,12 +99,14 @@ public class Version implements Comparable<Version> {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_6_0_0_alpha2_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha2_UNRELEASED;
|
||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_5_0_ID_UNRELEASED:
|
||||
return V_5_5_0_UNRELEASED;
|
||||
case V_6_0_0_alpha2_ID:
|
||||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_5_0_ID:
|
||||
return V_5_5_0;
|
||||
case V_5_4_1_ID:
|
||||
return V_5_4_1;
|
||||
case V_5_4_0_ID:
|
||||
return V_5_4_0;
|
||||
case V_5_3_2_ID:
|
||||
|
|
|
@ -261,7 +261,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
type = in.readString();
|
||||
id = in.readString();
|
||||
version = in.readZLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
seqNo = in.readZLong();
|
||||
primaryTerm = in.readVLong();
|
||||
} else {
|
||||
|
@ -279,7 +279,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
|
|||
out.writeString(type);
|
||||
out.writeString(id);
|
||||
out.writeZLong(version);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeZLong(seqNo);
|
||||
out.writeVLong(primaryTerm);
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ public class ClusterStateResponse extends ActionResponse {
|
|||
super.readFrom(in);
|
||||
clusterName = new ClusterName(in);
|
||||
clusterState = ClusterState.readFrom(in, null);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
totalCompressedSize = new ByteSizeValue(in);
|
||||
} else {
|
||||
// in a mixed cluster, if a pre 6.0 node processes the get cluster state
|
||||
|
@ -95,7 +95,7 @@ public class ClusterStateResponse extends ActionResponse {
|
|||
super.writeTo(out);
|
||||
clusterName.writeTo(out);
|
||||
clusterState.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
totalCompressedSize.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -492,7 +492,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
for (int i = 0; i < size; i++) {
|
||||
final String type = in.readString();
|
||||
String source = in.readString();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO change to 5.3.0 after backport
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO change to 5.3.0 after backport
|
||||
// we do not know the content type that comes from earlier versions so we autodetect and convert
|
||||
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.indices.shards;
|
|||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
|
@ -34,7 +35,6 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -165,7 +165,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
node = new DiscoveryNode(in);
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// legacy version
|
||||
in.readLong();
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// legacy version
|
||||
out.writeLong(-1L);
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public class ShardStats implements Streamable, Writeable, ToXContent {
|
|||
statePath = in.readString();
|
||||
dataPath = in.readString();
|
||||
isCustomDataPath = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
seqNoStats = in.readOptionalWriteable(SeqNoStats::new);
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ public class ShardStats implements Streamable, Writeable, ToXContent {
|
|||
out.writeString(statePath);
|
||||
out.writeString(dataPath);
|
||||
out.writeBoolean(isCustomDataPath);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeOptionalWriteable(seqNoStats);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -475,7 +475,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
cause = in.readString();
|
||||
name = in.readString();
|
||||
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
indexPatterns = in.readList(StreamInput::readString);
|
||||
} else {
|
||||
indexPatterns = Collections.singletonList(in.readString());
|
||||
|
@ -512,7 +512,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
super.writeTo(out);
|
||||
out.writeString(cause);
|
||||
out.writeString(name);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeStringList(indexPatterns);
|
||||
} else {
|
||||
out.writeString(indexPatterns.size() > 0 ? indexPatterns.get(0) : "");
|
||||
|
|
|
@ -78,7 +78,7 @@ public class BulkItemRequest implements Streamable {
|
|||
if (in.readBoolean()) {
|
||||
primaryResponse = BulkItemResponse.readBulkItem(in);
|
||||
}
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||
boolean ignoreOnReplica = in.readBoolean();
|
||||
if (ignoreOnReplica == false && primaryResponse != null) {
|
||||
assert primaryResponse.isFailed() == false : "expected no failure on the primary response";
|
||||
|
@ -89,7 +89,7 @@ public class BulkItemRequest implements Streamable {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(id);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||
// old nodes expect updated version and version type on the request
|
||||
if (primaryResponse != null) {
|
||||
request.version(primaryResponse.getVersion());
|
||||
|
@ -102,7 +102,7 @@ public class BulkItemRequest implements Streamable {
|
|||
DocWriteRequest.writeDocumentRequest(out, request);
|
||||
}
|
||||
out.writeOptionalStreamable(primaryResponse);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // TODO remove once backported
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) { // TODO remove once backported
|
||||
if (primaryResponse != null) {
|
||||
out.writeBoolean(primaryResponse.isFailed()
|
||||
|| primaryResponse.getResponse().getResult() == DocWriteResponse.Result.NOOP);
|
||||
|
|
|
@ -211,7 +211,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
id = in.readOptionalString();
|
||||
cause = in.readException();
|
||||
status = ExceptionsHelper.status(cause);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
seqNo = in.readZLong();
|
||||
} else {
|
||||
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
@ -224,7 +224,7 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
|
|||
out.writeString(getType());
|
||||
out.writeOptionalString(getId());
|
||||
out.writeException(getCause());
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeZLong(getSeqNo());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
fields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
mergeResults = in.readBoolean();
|
||||
|
@ -91,7 +91,7 @@ public final class FieldCapabilitiesRequest extends ActionRequest implements Ind
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(fields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeBoolean(mergeResults);
|
||||
|
|
|
@ -86,7 +86,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
super.readFrom(in);
|
||||
this.responseMap =
|
||||
in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
indexResponses = in.readList(FieldCapabilitiesIndexResponse::new);
|
||||
} else {
|
||||
indexResponses = Collections.emptyList();
|
||||
|
@ -101,7 +101,7 @@ public class FieldCapabilitiesResponse extends ActionResponse implements ToXCont
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
out.writeList(indexResponses);
|
||||
}
|
||||
|
||||
|
|
|
@ -523,7 +523,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
id = in.readOptionalString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readOptionalString(); // timestamp
|
||||
in.readOptionalWriteable(TimeValue::new); // ttl
|
||||
}
|
||||
|
@ -548,7 +548,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
out.writeOptionalString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// Serialize a fake timestamp. 5.x expect this value to be set by the #process method so we can't use null.
|
||||
// On the other hand, indices created on 5.x do not index the timestamp field. Therefore passing a 0 (or any value) for
|
||||
// the transport layer OK as it will be ignored.
|
||||
|
|
|
@ -1011,7 +1011,7 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
super.readFrom(in);
|
||||
localCheckpoint = in.readZLong();
|
||||
allocationId = in.readString();
|
||||
|
@ -1022,7 +1022,7 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
super.writeTo(out);
|
||||
out.writeZLong(localCheckpoint);
|
||||
out.writeString(allocationId);
|
||||
|
@ -1191,7 +1191,7 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
globalCheckpoint = in.readZLong();
|
||||
}
|
||||
}
|
||||
|
@ -1199,7 +1199,7 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeZLong(globalCheckpoint);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -138,7 +138,7 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
retryable = in.readBoolean();
|
||||
disableStatePersistence = in.readBoolean();
|
||||
status = RestStatus.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
allowReleaseResources = in.readBoolean();
|
||||
} else {
|
||||
allowReleaseResources = false;
|
||||
|
@ -156,7 +156,7 @@ public class ClusterBlock implements Streamable, ToXContent {
|
|||
out.writeBoolean(retryable);
|
||||
out.writeBoolean(disableStatePersistence);
|
||||
RestStatus.writeTo(out, status);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
out.writeBoolean(allowReleaseResources);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -210,7 +210,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException {
|
||||
Builder builder = new Builder(in.readString());
|
||||
builder.order(in.readInt());
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
builder.patterns(in.readList(StreamInput::readString));
|
||||
} else {
|
||||
builder.patterns(Collections.singletonList(in.readString()));
|
||||
|
@ -245,7 +245,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeInt(order);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeStringList(patterns);
|
||||
} else {
|
||||
out.writeString(patterns.size() > 0 ? patterns.get(0) : "");
|
||||
|
|
|
@ -196,7 +196,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
source().writeTo(out);
|
||||
// routing
|
||||
out.writeBoolean(routing().required());
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// timestamp
|
||||
out.writeBoolean(false); // enabled
|
||||
out.writeString(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format());
|
||||
|
@ -233,7 +233,7 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
source = CompressedXContent.readCompressedString(in);
|
||||
// routing
|
||||
routing = new Routing(in.readBoolean());
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// timestamp
|
||||
boolean enabled = in.readBoolean();
|
||||
if (enabled) {
|
||||
|
|
|
@ -179,7 +179,7 @@ public class OperationRouting extends AbstractComponent {
|
|||
}
|
||||
// if not, then use it as the index
|
||||
int routingHash = Murmur3HashFunction.hash(preference);
|
||||
if (nodes.getMinNodeVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (nodes.getMinNodeVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
// The AllocationService lists shards in a fixed order based on nodes
|
||||
// so earlier versions of this class would have a tendency to
|
||||
// select the same node across different shardIds.
|
||||
|
|
|
@ -344,7 +344,7 @@ public final class Settings implements ToXContent {
|
|||
final String setting,
|
||||
final Boolean defaultValue,
|
||||
final DeprecationLogger deprecationLogger) {
|
||||
if (indexVersion.before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (indexVersion.before(Version.V_6_0_0_alpha1)) {
|
||||
//Only emit a warning if the setting's value is not a proper boolean
|
||||
final String value = get(setting, "false");
|
||||
if (Booleans.isBoolean(value) == false) {
|
||||
|
|
|
@ -78,7 +78,7 @@ public final class TransportAddress implements Writeable {
|
|||
* {@link Version#V_5_0_2} as the hostString was not serialized
|
||||
*/
|
||||
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) { // bwc layer for 5.x where we had more than one transport address
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address
|
||||
final short i = in.readShort();
|
||||
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
|
||||
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
|
||||
|
@ -101,7 +101,7 @@ public final class TransportAddress implements Writeable {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
|
||||
}
|
||||
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
|
||||
|
|
|
@ -290,7 +290,7 @@ public class TransportNodesListGatewayStartedShards extends
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// legacy version
|
||||
in.readLong();
|
||||
}
|
||||
|
@ -304,7 +304,7 @@ public class TransportNodesListGatewayStartedShards extends
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// legacy version
|
||||
out.writeLong(-1L);
|
||||
}
|
||||
|
|
|
@ -120,13 +120,13 @@ public final class IndexSortConfig {
|
|||
.map((name) -> new FieldSortSpec(name))
|
||||
.toArray(FieldSortSpec[]::new);
|
||||
|
||||
if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (sortSpecs.length > 0 && indexSettings.getIndexVersionCreated().before(Version.V_6_0_0_alpha1)) {
|
||||
/**
|
||||
* This index might be assigned to a node where the index sorting feature is not available
|
||||
* (ie. versions prior to {@link Version.V_6_0_0_alpha1_UNRELEASED}) so we must fail here rather than later.
|
||||
*/
|
||||
throw new IllegalArgumentException("unsupported index.version.created:" + indexSettings.getIndexVersionCreated() +
|
||||
", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
", can't set index.sort on versions prior to " + Version.V_6_0_0_alpha1);
|
||||
}
|
||||
|
||||
if (INDEX_SORT_ORDER_SETTING.exists(settings)) {
|
||||
|
|
|
@ -550,14 +550,14 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
// legacy support
|
||||
assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no.;" +
|
||||
" index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ", seqNo: " + seqNo;
|
||||
} else if (origin == Operation.Origin.PRIMARY) {
|
||||
// sequence number should not be set when operation origin is primary
|
||||
assert seqNo == SequenceNumbersService.UNASSIGNED_SEQ_NO : "primary ops should never have an assigned seq no.; seqNo: " + seqNo;
|
||||
} else if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
} else if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
// sequence number should be set when operation origin is not primary
|
||||
assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin;
|
||||
}
|
||||
|
@ -565,7 +565,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private boolean assertSequenceNumberBeforeIndexing(final Engine.Operation.Origin origin, final long seqNo) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED) ||
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1) ||
|
||||
origin == Operation.Origin.PRIMARY) {
|
||||
// sequence number should be set when operation origin is primary or when all shards are on new nodes
|
||||
assert seqNo >= 0 : "ops should have an assigned seq no.; origin: " + origin;
|
||||
|
@ -679,7 +679,7 @@ public class InternalEngine extends Engine {
|
|||
} else {
|
||||
// This can happen if the primary is still on an old node and send traffic without seq# or we recover from translog
|
||||
// created by an old version.
|
||||
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) :
|
||||
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) :
|
||||
"index is newly created but op has no sequence numbers. op: " + index;
|
||||
opVsLucene = compareOpToLuceneDocBasedOnVersions(index);
|
||||
}
|
||||
|
@ -963,7 +963,7 @@ public class InternalEngine extends Engine {
|
|||
if (delete.seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete);
|
||||
} else {
|
||||
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1_UNRELEASED) :
|
||||
assert config().getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) :
|
||||
"index is newly created but op has no sequence numbers. op: " + delete;
|
||||
opVsLucene = compareOpToLuceneDocBasedOnVersions(delete);
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ public class Segment implements Streamable {
|
|||
// verbose mode
|
||||
ramTree = readRamTree(in);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
segmentSort = readSegmentSort(in);
|
||||
} else {
|
||||
segmentSort = null;
|
||||
|
@ -193,7 +193,7 @@ public class Segment implements Streamable {
|
|||
if (verbose) {
|
||||
writeRamTree(out, ramTree);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
writeSegmentSort(out, segmentSort);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,7 +104,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
if (node.isEmpty() == false &&
|
||||
parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
throw new IllegalArgumentException("[_all] is disabled in 6.0. As a replacement, you can use an [copy_to] " +
|
||||
"on mapping fields to create your own catch all field.");
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ public class BooleanFieldMapper extends FieldMapper {
|
|||
value = fieldType().nullValue();
|
||||
}
|
||||
} else {
|
||||
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (indexCreatedVersion.onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
value = context.parser().booleanValue();
|
||||
} else {
|
||||
value = context.parser().booleanValueLenient();
|
||||
|
|
|
@ -205,7 +205,7 @@ public class DynamicTemplate implements ToXContent {
|
|||
try {
|
||||
xcontentFieldType = XContentFieldType.fromString(matchMappingType);
|
||||
} catch (IllegalArgumentException e) {
|
||||
if (indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (indexVersionCreated.onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
throw e;
|
||||
} else {
|
||||
DEPRECATION_LOGGER.deprecated("match_mapping_type [" + matchMappingType + "] is invalid and will be ignored: "
|
||||
|
|
|
@ -102,7 +102,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
Function<Settings, String> defValue = settings -> {
|
||||
boolean singleType = true;
|
||||
if (settings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) != null) {
|
||||
singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
singleType = Version.indexCreated(settings).onOrAfter(Version.V_6_0_0_alpha1);
|
||||
}
|
||||
return Boolean.valueOf(singleType).toString();
|
||||
};
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TypeParsers {
|
|||
//TODO 22298: Remove this method and have all call-sites use <code>XContentMapValues.nodeBooleanValue(node)</code> directly.
|
||||
public static boolean nodeBooleanValue(String fieldName, String propertyName, Object node,
|
||||
Mapper.TypeParser.ParserContext parserContext) {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
return XContentMapValues.nodeBooleanValue(node, fieldName + "." + propertyName);
|
||||
} else {
|
||||
return nodeBooleanValueLenient(fieldName, propertyName, node);
|
||||
|
@ -247,7 +247,7 @@ public class TypeParsers {
|
|||
if (parserContext.isWithinMultiField()) {
|
||||
throw new MapperParsingException("include_in_all in multi fields is not allowed. Found the include_in_all in field ["
|
||||
+ name + "] which is within a multi field.");
|
||||
} else if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
} else if (parserContext.indexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
throw new MapperParsingException("[include_in_all] is not allowed for indices created on or after version 6.0.0 as " +
|
||||
"[_all] is deprecated. As a replacement, you can use an [copy_to] on mapping fields to create your " +
|
||||
"own catch all field.");
|
||||
|
|
|
@ -88,7 +88,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
|
|||
shouldClauses.addAll(readQueries(in));
|
||||
filterClauses.addAll(readQueries(in));
|
||||
adjustPureNegative = in.readBoolean();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readBoolean(); // disable_coord
|
||||
}
|
||||
minimumShouldMatch = in.readOptionalString();
|
||||
|
@ -101,7 +101,7 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
|
|||
writeQueries(out, shouldClauses);
|
||||
writeQueries(out, filterClauses);
|
||||
out.writeBoolean(adjustPureNegative);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeBoolean(true); // disable_coord
|
||||
}
|
||||
out.writeOptionalString(minimumShouldMatch);
|
||||
|
|
|
@ -111,7 +111,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
analyzer = in.readOptionalString();
|
||||
lowFreqMinimumShouldMatch = in.readOptionalString();
|
||||
highFreqMinimumShouldMatch = in.readOptionalString();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readBoolean(); // disable_coord
|
||||
}
|
||||
cutoffFrequency = in.readFloat();
|
||||
|
@ -126,7 +126,7 @@ public class CommonTermsQueryBuilder extends AbstractQueryBuilder<CommonTermsQue
|
|||
out.writeOptionalString(analyzer);
|
||||
out.writeOptionalString(lowFreqMinimumShouldMatch);
|
||||
out.writeOptionalString(highFreqMinimumShouldMatch);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeBoolean(true); // disable_coord
|
||||
}
|
||||
out.writeFloat(cutoffFrequency);
|
||||
|
|
|
@ -126,7 +126,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
*/
|
||||
public InnerHitBuilder(StreamInput in) throws IOException {
|
||||
name = in.readOptionalString();
|
||||
if (in.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_5_0)) {
|
||||
in.readOptionalString();
|
||||
in.readOptionalString();
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
}
|
||||
}
|
||||
highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new);
|
||||
if (in.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_5_5_0)) {
|
||||
/**
|
||||
* this is needed for BWC with nodes pre 5.5
|
||||
*/
|
||||
|
@ -168,8 +168,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
throw new IOException("Invalid output version, must >= " + Version.V_5_5_0_UNRELEASED.toString());
|
||||
if (out.getVersion().before(Version.V_5_5_0)) {
|
||||
throw new IOException("Invalid output version, must >= " + Version.V_5_5_0.toString());
|
||||
}
|
||||
out.writeOptionalString(name);
|
||||
out.writeBoolean(ignoreUnmapped);
|
||||
|
@ -207,8 +207,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
* Should only be used to send nested inner hits to nodes pre 5.5.
|
||||
*/
|
||||
protected void writeToNestedBWC(StreamOutput out, QueryBuilder query, String nestedPath) throws IOException {
|
||||
assert out.getVersion().before(Version.V_5_5_0_UNRELEASED) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0_UNRELEASED.toString();
|
||||
assert out.getVersion().before(Version.V_5_5_0) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0.toString();
|
||||
writeToBWC(out, query, nestedPath, null);
|
||||
}
|
||||
|
||||
|
@ -217,8 +217,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
* Should only be used to send collapsing inner hits to nodes pre 5.5.
|
||||
*/
|
||||
public void writeToCollapseBWC(StreamOutput out) throws IOException {
|
||||
assert out.getVersion().before(Version.V_5_5_0_UNRELEASED) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0_UNRELEASED.toString();
|
||||
assert out.getVersion().before(Version.V_5_5_0) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0.toString();
|
||||
writeToBWC(out, new MatchAllQueryBuilder(), null, null);
|
||||
}
|
||||
|
||||
|
@ -227,8 +227,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
* Should only be used to send hasParent or hasChild inner hits to nodes pre 5.5.
|
||||
*/
|
||||
public void writeToParentChildBWC(StreamOutput out, QueryBuilder query, String parentChildPath) throws IOException {
|
||||
assert(out.getVersion().before(Version.V_5_5_0_UNRELEASED)) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0_UNRELEASED.toString();
|
||||
assert(out.getVersion().before(Version.V_5_5_0)) :
|
||||
"invalid output version, must be < " + Version.V_5_5_0.toString();
|
||||
writeToBWC(out, query, null, parentChildPath);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
out.writeString(path);
|
||||
out.writeVInt(scoreMode.ordinal());
|
||||
out.writeNamedWriteable(query);
|
||||
if (out.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_5_0)) {
|
||||
final boolean hasInnerHit = innerHitBuilder != null;
|
||||
out.writeBoolean(hasInnerHit);
|
||||
if (hasInnerHit) {
|
||||
|
|
|
@ -86,7 +86,7 @@ public class GlobalCheckpointSyncAction extends TransportReplicationAction<
|
|||
final ConcreteReplicaRequest<Request> replicaRequest,
|
||||
final DiscoveryNode node,
|
||||
final ActionListener<ReplicationOperation.ReplicaResponse> listener) {
|
||||
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
super.sendReplicaRequest(replicaRequest, node, listener);
|
||||
} else {
|
||||
listener.onResponse(new ReplicaResponse(replicaRequest.getTargetAllocationID(), SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
|
|
|
@ -1092,7 +1092,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
private boolean assertMaxUnsafeAutoIdInCommit() throws IOException {
|
||||
final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_5_0_UNRELEASED) &&
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_5_0) &&
|
||||
// TODO: LOCAL_SHARDS need to transfer this information
|
||||
recoveryState().getRecoverySource().getType() != RecoverySource.Type.LOCAL_SHARDS) {
|
||||
// as of 5.5.0, the engine stores the maxUnsafeAutoIdTimestamp in the commit point.
|
||||
|
|
|
@ -68,7 +68,7 @@ public class StoreStats implements Streamable, ToXContent {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
sizeInBytes = in.readVLong();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readVLong(); // throttleTimeInNanos
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ public class StoreStats implements Streamable, ToXContent {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(sizeInBytes);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeVLong(0L); // throttleTimeInNanos
|
||||
}
|
||||
}
|
||||
|
|
|
@ -591,7 +591,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
final DiscoveryNodes nodes) {
|
||||
return shardRoutings
|
||||
.stream()
|
||||
.filter(sr -> nodes.get(sr.currentNodeId()).getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED))
|
||||
.filter(sr -> nodes.get(sr.currentNodeId()).getVersion().onOrAfter(Version.V_6_0_0_alpha1))
|
||||
.map(ShardRouting::allocationId)
|
||||
.map(AllocationId::getId)
|
||||
.collect(Collectors.toSet());
|
||||
|
|
|
@ -60,7 +60,7 @@ public class RecoveryFinalizeRecoveryRequest extends TransportRequest {
|
|||
super.readFrom(in);
|
||||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
globalCheckpoint = in.readZLong();
|
||||
} else {
|
||||
globalCheckpoint = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
@ -72,7 +72,7 @@ public class RecoveryFinalizeRecoveryRequest extends TransportRequest {
|
|||
super.writeTo(out);
|
||||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeZLong(globalCheckpoint);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
|
|||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
totalTranslogOps = in.readVInt();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readLong(); // maxUnsafeAutoIdTimestamp
|
||||
}
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
|
|||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ public class RecoveryTranslogOperationsResponse extends TransportResponse {
|
|||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
// before 6.0.0 we responded with an empty response so we have to maintain that
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeZLong(localCheckpoint);
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ public class RecoveryTranslogOperationsResponse extends TransportResponse {
|
|||
@Override
|
||||
public void readFrom(final StreamInput in) throws IOException {
|
||||
// before 6.0.0 we received an empty response so we have to maintain that
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
localCheckpoint = in.readZLong();
|
||||
}
|
||||
else {
|
||||
|
|
|
@ -119,7 +119,7 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
targetNode = new DiscoveryNode(in);
|
||||
metadataSnapshot = new Store.MetadataSnapshot(in);
|
||||
primaryRelocation = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
startingSeqNo = in.readLong();
|
||||
} else {
|
||||
startingSeqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
|
@ -136,7 +136,7 @@ public class StartRecoveryRequest extends TransportRequest {
|
|||
targetNode.writeTo(out);
|
||||
metadataSnapshot.writeTo(out);
|
||||
out.writeBoolean(primaryRelocation);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeLong(startingSeqNo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||
total = in.readLong();
|
||||
free = in.readLong();
|
||||
available = in.readLong();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
in.readOptionalBoolean();
|
||||
}
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||
out.writeLong(total);
|
||||
out.writeLong(free);
|
||||
out.writeLong(available);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
out.writeOptionalBoolean(null);
|
||||
}
|
||||
}
|
||||
|
@ -455,7 +455,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||
paths[i] = new Path(in);
|
||||
}
|
||||
this.total = total();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
this.leastDiskEstimate = in.readOptionalWriteable(DiskUsage::new);
|
||||
this.mostDiskEstimate = in.readOptionalWriteable(DiskUsage::new);
|
||||
} else {
|
||||
|
@ -472,7 +472,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContent {
|
|||
for (Path path : paths) {
|
||||
path.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
out.writeOptionalWriteable(this.leastDiskEstimate);
|
||||
out.writeOptionalWriteable(this.mostDiskEstimate);
|
||||
}
|
||||
|
|
|
@ -430,7 +430,7 @@ public class InternalOrder extends BucketOrder {
|
|||
* @throws IOException on error reading from the stream.
|
||||
*/
|
||||
public static BucketOrder readHistogramOrder(StreamInput in, boolean bwcOrderFlag) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
return Streams.readOrder(in);
|
||||
} else { // backwards compat logic
|
||||
if (bwcOrderFlag == false || in.readBoolean()) {
|
||||
|
@ -486,7 +486,7 @@ public class InternalOrder extends BucketOrder {
|
|||
* @throws IOException on error writing to the stream.
|
||||
*/
|
||||
public static void writeHistogramOrder(BucketOrder order, StreamOutput out, boolean bwcOrderFlag) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
order.writeTo(out);
|
||||
} else { // backwards compat logic
|
||||
if(bwcOrderFlag) { // need to add flag that determines if order exists
|
||||
|
|
|
@ -95,7 +95,7 @@ public class CollapseBuilder implements Writeable, ToXContentObject {
|
|||
public CollapseBuilder(StreamInput in) throws IOException {
|
||||
this.field = in.readString();
|
||||
this.maxConcurrentGroupRequests = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
this.innerHits = in.readList(InnerHitBuilder::new);
|
||||
} else {
|
||||
InnerHitBuilder innerHitBuilder = in.readOptionalWriteable(InnerHitBuilder::new);
|
||||
|
@ -111,7 +111,7 @@ public class CollapseBuilder implements Writeable, ToXContentObject {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(field);
|
||||
out.writeVInt(maxConcurrentGroupRequests);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
out.writeList(innerHits);
|
||||
} else {
|
||||
boolean hasInnerHit = innerHits.isEmpty() == false;
|
||||
|
|
|
@ -70,7 +70,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private static final String SUCCESSFUL_SHARDS = "successful_shards";
|
||||
|
||||
private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0;
|
||||
public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0_UNRELEASED;
|
||||
public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0;
|
||||
|
||||
private static final Comparator<SnapshotInfo> COMPARATOR =
|
||||
Comparator.comparing(SnapshotInfo::startTime).thenComparing(SnapshotInfo::snapshotId);
|
||||
|
|
|
@ -612,7 +612,7 @@ public class ThreadPool extends AbstractComponent implements Closeable {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
if (type == ThreadPoolType.FIXED_AUTO_QUEUE_SIZE &&
|
||||
out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.getVersion().before(Version.V_6_0_0_alpha1)) {
|
||||
// 5.x doesn't know about the "fixed_auto_queue_size" thread pool type, just write fixed.
|
||||
out.writeString(ThreadPoolType.FIXED.getType());
|
||||
} else {
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.Version.V_5_3_0;
|
||||
import static org.elasticsearch.Version.V_6_0_0_alpha2_UNRELEASED;
|
||||
import static org.elasticsearch.Version.V_6_0_0_alpha2;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -46,30 +46,30 @@ import static org.hamcrest.Matchers.sameInstance;
|
|||
public class VersionTests extends ESTestCase {
|
||||
|
||||
public void testVersionComparison() throws Exception {
|
||||
assertThat(V_5_3_0.before(V_6_0_0_alpha2_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0.before(V_6_0_0_alpha2), is(true));
|
||||
assertThat(V_5_3_0.before(V_5_3_0), is(false));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.before(V_5_3_0), is(false));
|
||||
assertThat(V_6_0_0_alpha2.before(V_5_3_0), is(false));
|
||||
|
||||
assertThat(V_5_3_0.onOrBefore(V_6_0_0_alpha2_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0.onOrBefore(V_6_0_0_alpha2), is(true));
|
||||
assertThat(V_5_3_0.onOrBefore(V_5_3_0), is(true));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.onOrBefore(V_5_3_0), is(false));
|
||||
assertThat(V_6_0_0_alpha2.onOrBefore(V_5_3_0), is(false));
|
||||
|
||||
assertThat(V_5_3_0.after(V_6_0_0_alpha2_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0.after(V_6_0_0_alpha2), is(false));
|
||||
assertThat(V_5_3_0.after(V_5_3_0), is(false));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.after(V_5_3_0), is(true));
|
||||
assertThat(V_6_0_0_alpha2.after(V_5_3_0), is(true));
|
||||
|
||||
assertThat(V_5_3_0.onOrAfter(V_6_0_0_alpha2_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0.onOrAfter(V_6_0_0_alpha2), is(false));
|
||||
assertThat(V_5_3_0.onOrAfter(V_5_3_0), is(true));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.onOrAfter(V_5_3_0), is(true));
|
||||
assertThat(V_6_0_0_alpha2.onOrAfter(V_5_3_0), is(true));
|
||||
|
||||
assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1")));
|
||||
assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2")));
|
||||
assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24")));
|
||||
assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0")));
|
||||
|
||||
assertThat(V_5_3_0, is(lessThan(V_6_0_0_alpha2_UNRELEASED)));
|
||||
assertThat(V_5_3_0, is(lessThan(V_6_0_0_alpha2)));
|
||||
assertThat(V_5_3_0.compareTo(V_5_3_0), is(0));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED, is(greaterThan(V_5_3_0)));
|
||||
assertThat(V_6_0_0_alpha2, is(greaterThan(V_5_3_0)));
|
||||
}
|
||||
|
||||
public void testMin() {
|
||||
|
@ -97,7 +97,7 @@ public class VersionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testMinimumIndexCompatibilityVersion() {
|
||||
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha2_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha2.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099),
|
||||
Version.V_5_1_1.minimumIndexCompatibilityVersion());
|
||||
|
@ -157,7 +157,7 @@ public class VersionTests extends ESTestCase {
|
|||
public void testIndexCreatedVersion() {
|
||||
// an actual index has a IndexMetaData.SETTING_INDEX_UUID
|
||||
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2,
|
||||
Version.V_5_2_0, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
Version.V_5_2_0, Version.V_6_0_0_alpha2);
|
||||
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
|
||||
}
|
||||
|
||||
|
@ -170,11 +170,11 @@ public class VersionTests extends ESTestCase {
|
|||
assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major));
|
||||
// from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is
|
||||
// released since we need to bump the supported minor in Version#minimumCompatibilityVersion()
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().major);
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha2.minimumCompatibilityVersion().major);
|
||||
assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()",
|
||||
lastVersion.minor, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().revision);
|
||||
lastVersion.minor, Version.V_6_0_0_alpha2.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha2.minimumCompatibilityVersion().revision);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
|
@ -325,8 +325,8 @@ public class VersionTests extends ESTestCase {
|
|||
|
||||
public void testIsCompatible() {
|
||||
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
|
||||
assertTrue(isCompatible(Version.V_5_5_0_UNRELEASED, Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
assertTrue(isCompatible(Version.V_5_5_0, Version.V_6_0_0_alpha2));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
|
||||
assertTrue(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0")));
|
||||
if (Version.CURRENT.isRelease()) {
|
||||
|
|
|
@ -155,7 +155,7 @@ public class SettingsTests extends ESTestCase {
|
|||
// time to say goodbye?
|
||||
assertTrue(
|
||||
"It's time to implement #22298. Please delete this test and Settings#getAsBooleanLenientForPreEs6Indices().",
|
||||
Version.CURRENT.minimumCompatibilityVersion().before(Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
Version.CURRENT.minimumCompatibilityVersion().before(Version.V_6_0_0_alpha1));
|
||||
|
||||
|
||||
String falsy = randomFrom("false", "off", "no", "0");
|
||||
|
|
|
@ -2578,7 +2578,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
for (int i = 0; i < numExtraDocs; i++) {
|
||||
ParsedDocument doc = testParsedDocument("extra" + Integer.toString(i), null, testDocument(), new BytesArray("{}"), null);
|
||||
Term uid;
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
uid = new Term(IdFieldMapper.NAME, doc.id());
|
||||
} else {
|
||||
uid = new Term(UidFieldMapper.NAME, Uid.createUid(doc.type(), doc.id()));
|
||||
|
|
|
@ -56,7 +56,7 @@ public class DynamicTemplateTests extends ESTestCase {
|
|||
templateDef2.put("mapping", Collections.singletonMap("store", true));
|
||||
// if a wrong match type is specified, we ignore the template
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> DynamicTemplate.parse("my_template", templateDef2, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
() -> DynamicTemplate.parse("my_template", templateDef2, Version.V_6_0_0_alpha1));
|
||||
assertEquals("No field type matched on [text], possible values are [object, string, long, double, boolean, date, binary]",
|
||||
e.getMessage());
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class StartRecoveryRequestTests extends ESTestCase {
|
|||
assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap()));
|
||||
assertThat(outRequest.isPrimaryRelocation(), equalTo(inRequest.isPrimaryRelocation()));
|
||||
assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId()));
|
||||
if (targetNodeVersion.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (targetNodeVersion.onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
assertThat(outRequest.startingSeqNo(), equalTo(inRequest.startingSeqNo()));
|
||||
} else {
|
||||
assertThat(SequenceNumbersService.UNASSIGNED_SEQ_NO, equalTo(inRequest.startingSeqNo()));
|
||||
|
|
|
@ -110,7 +110,7 @@ public class InternalOrderTests extends AbstractSerializingTestCase<BucketOrder>
|
|||
for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) {
|
||||
BucketOrder order = createTestInstance();
|
||||
Version bwcVersion = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(),
|
||||
VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2));
|
||||
boolean bwcOrderFlag = randomBoolean();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(bwcVersion);
|
||||
|
|
|
@ -112,7 +112,7 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
maxConcurrentSearchRequests = in.readVInt();
|
||||
}
|
||||
requests = in.readStreamableList(SearchTemplateRequest::new);
|
||||
|
@ -121,7 +121,7 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_5_0)) {
|
||||
out.writeVInt(maxConcurrentSearchRequests);
|
||||
}
|
||||
out.writeStreamableList(requests);
|
||||
|
|
|
@ -126,7 +126,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
out.writeInt(maxChildren);
|
||||
out.writeVInt(scoreMode.ordinal());
|
||||
out.writeNamedWriteable(query);
|
||||
if (out.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_5_0)) {
|
||||
final boolean hasInnerHit = innerHitBuilder != null;
|
||||
out.writeBoolean(hasInnerHit);
|
||||
if (hasInnerHit) {
|
||||
|
|
|
@ -121,7 +121,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
out.writeString(type);
|
||||
out.writeBoolean(score);
|
||||
out.writeNamedWriteable(query);
|
||||
if (out.getVersion().before(Version.V_5_5_0_UNRELEASED)) {
|
||||
if (out.getVersion().before(Version.V_5_5_0)) {
|
||||
final boolean hasInnerHit = innerHitBuilder != null;
|
||||
out.writeBoolean(hasInnerHit);
|
||||
if (hasInnerHit) {
|
||||
|
|
|
@ -52,7 +52,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
|||
|
||||
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
|
||||
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
|
||||
private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1);
|
||||
|
||||
@Override
|
||||
protected boolean preserveIndicesUponCompletion() {
|
||||
|
|
|
@ -207,7 +207,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
.put("index.routing.allocation.include._name", bwcNames);
|
||||
|
||||
final boolean checkGlobalCheckpoints = nodes.getMaster().getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
final boolean checkGlobalCheckpoints = nodes.getMaster().getVersion().onOrAfter(Version.V_6_0_0_alpha1);
|
||||
logger.info("master version is [{}], global checkpoints will be [{}]", nodes.getMaster().getVersion(),
|
||||
checkGlobalCheckpoints ? "checked" : "not be checked");
|
||||
final String index = "test";
|
||||
|
@ -287,7 +287,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
final long expectedGlobalCkp;
|
||||
final long expectMaxSeqNo;
|
||||
logger.info("primary resolved to node {}", primaryShard.getNode());
|
||||
if (primaryShard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (primaryShard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
expectMaxSeqNo = numDocs - 1;
|
||||
expectedGlobalCkp = numDocs - 1;
|
||||
} else {
|
||||
|
@ -295,7 +295,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
expectMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
|
||||
}
|
||||
for (Shard shard : shards) {
|
||||
if (shard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (shard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
final SeqNoStats seqNoStats = shard.getSeqNoStats();
|
||||
logger.info("stats for {}, primary [{}]: [{}]", shard.getNode(), shard.isPrimary(), seqNoStats);
|
||||
assertThat("max_seq no on " + shard.getNode() + " is wrong", seqNoStats.getMaxSeqNo(), equalTo(expectMaxSeqNo));
|
||||
|
@ -324,7 +324,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
final Boolean primary = ObjectPath.evaluate(shard, "routing.primary");
|
||||
final Node node = nodes.getSafe(nodeId);
|
||||
final SeqNoStats seqNoStats;
|
||||
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
Integer maxSeqNo = ObjectPath.evaluate(shard, "seq_no.max_seq_no");
|
||||
Integer localCheckpoint = ObjectPath.evaluate(shard, "seq_no.local_checkpoint");
|
||||
Integer globalCheckpoint = ObjectPath.evaluate(shard, "seq_no.global_checkpoint");
|
||||
|
|
|
@ -20,60 +20,101 @@
|
|||
package org.elasticsearch.test;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
||||
/** Utilities for selecting versions in tests */
|
||||
public class VersionUtils {
|
||||
/**
|
||||
* Sort versions that have backwards compatibility guarantees from
|
||||
* those that don't. Doesn't actually check whether or not the versions
|
||||
* are released, instead it relies on gradle to have already checked
|
||||
* this which it does in {@code :core:verifyVersions}. So long as the
|
||||
* rules here match up with the rules in gradle then this should
|
||||
* produce sensible results.
|
||||
* @return a tuple containing versions with backwards compatibility
|
||||
* guarantees in v1 and versions without the guranteees in v2
|
||||
*/
|
||||
static Tuple<List<Version>, List<Version>> resolveReleasedVersions(Version current, Class<?> versionClass) {
|
||||
Field[] fields = versionClass.getFields();
|
||||
List<Version> versions = new ArrayList<>(fields.length);
|
||||
for (final Field field : fields) {
|
||||
final int mod = field.getModifiers();
|
||||
if (false == Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
|
||||
continue;
|
||||
}
|
||||
if (field.getType() != Version.class) {
|
||||
continue;
|
||||
}
|
||||
assert field.getName().matches("(V(_\\d+)+(_(alpha|beta|rc)\\d+)?|CURRENT)") : field.getName();
|
||||
if ("CURRENT".equals(field.getName())) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
versions.add(((Version) field.get(null)));
|
||||
} catch (final IllegalAccessException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
Collections.sort(versions);
|
||||
assert versions.get(versions.size() - 1).equals(current) : "The highest version must be the current one "
|
||||
+ "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]";
|
||||
|
||||
if (current.revision != 0) {
|
||||
/* If we are in a stable branch there should be no unreleased version constants
|
||||
* because we don't expect to release any new versions in older branches. If there
|
||||
* are extra constants then gradle will yell about it. */
|
||||
return new Tuple<>(unmodifiableList(versions), emptyList());
|
||||
}
|
||||
|
||||
/* If we are on a patch release then we know that at least the version before the
|
||||
* current one is unreleased. If it is released then gradle would be complaining. */
|
||||
int unreleasedIndex = versions.size() - 2;
|
||||
while (true) {
|
||||
if (unreleasedIndex < 0) {
|
||||
throw new IllegalArgumentException("Couldn't find first non-alpha release");
|
||||
}
|
||||
/* Technically we don't support backwards compatiblity for alphas, betas,
|
||||
* and rcs. But the testing infrastructure requires that we act as though we
|
||||
* do. This is a difference between the gradle and Java logic but should be
|
||||
* fairly safe as it is errs on us being more compatible rather than less....
|
||||
* Anyway, the upshot is that we never declare alphas as unreleased, no
|
||||
* matter where they are in the list. */
|
||||
if (versions.get(unreleasedIndex).isRelease()) {
|
||||
break;
|
||||
}
|
||||
unreleasedIndex--;
|
||||
}
|
||||
|
||||
Version unreleased = versions.remove(unreleasedIndex);
|
||||
if (unreleased.revision == 0) {
|
||||
/* If the last unreleased version is itself a patch release then gradle enforces
|
||||
* that there is yet another unreleased version before that. */
|
||||
unreleasedIndex--;
|
||||
Version earlierUnreleased = versions.remove(unreleasedIndex);
|
||||
return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierUnreleased, unreleased)));
|
||||
}
|
||||
return new Tuple<>(unmodifiableList(versions), singletonList(unreleased));
|
||||
}
|
||||
|
||||
private static final List<Version> RELEASED_VERSIONS;
|
||||
private static final List<Version> UNRELEASED_VERSIONS;
|
||||
|
||||
static {
|
||||
final Field[] declaredFields = Version.class.getFields();
|
||||
final Set<Integer> releasedIdsSet = new HashSet<>();
|
||||
final Set<Integer> unreleasedIdsSet = new HashSet<>();
|
||||
for (final Field field : declaredFields) {
|
||||
final int mod = field.getModifiers();
|
||||
if (Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
|
||||
if (field.getType() == Version.class) {
|
||||
final int id;
|
||||
try {
|
||||
id = ((Version) field.get(null)).id;
|
||||
} catch (final IllegalAccessException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
assert field.getName().matches("(V(_\\d+)+(_(alpha|beta|rc)\\d+)?(_UNRELEASED)?|CURRENT)") : field.getName();
|
||||
// note that below we remove CURRENT and add it to released; we do it this way because there are two constants that
|
||||
// correspond to CURRENT, CURRENT itself and the actual version that CURRENT points to
|
||||
if (field.getName().equals("CURRENT") || field.getName().endsWith("UNRELEASED")) {
|
||||
unreleasedIdsSet.add(id);
|
||||
} else {
|
||||
releasedIdsSet.add(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// treat CURRENT as released for BWC testing
|
||||
unreleasedIdsSet.remove(Version.CURRENT.id);
|
||||
releasedIdsSet.add(Version.CURRENT.id);
|
||||
|
||||
// unreleasedIdsSet and releasedIdsSet should be disjoint
|
||||
assert unreleasedIdsSet.stream().filter(releasedIdsSet::contains).collect(Collectors.toSet()).isEmpty();
|
||||
|
||||
RELEASED_VERSIONS =
|
||||
Collections.unmodifiableList(releasedIdsSet.stream().sorted().map(Version::fromId).collect(Collectors.toList()));
|
||||
UNRELEASED_VERSIONS =
|
||||
Collections.unmodifiableList(unreleasedIdsSet.stream().sorted().map(Version::fromId).collect(Collectors.toList()));
|
||||
Tuple<List<Version>, List<Version>> versions = resolveReleasedVersions(Version.CURRENT, Version.class);
|
||||
RELEASED_VERSIONS = versions.v1();
|
||||
UNRELEASED_VERSIONS = versions.v2();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
|
||||
public class VersionUtilsTests extends ESTestCase {
|
||||
|
||||
public void testAllVersionsSorted() {
|
||||
List<Version> allVersions = VersionUtils.allReleasedVersions();
|
||||
for (int i = 0, j = 1; j < allVersions.size(); ++i, ++j) {
|
||||
assertTrue(allVersions.get(i).before(allVersions.get(j)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomVersionBetween() {
|
||||
// full range
|
||||
Version got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), Version.CURRENT);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
|
||||
// sub range
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0,
|
||||
Version.V_6_0_0_alpha2);
|
||||
assertTrue(got.onOrAfter(Version.V_5_0_0));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2));
|
||||
|
||||
// unbounded lower
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha2);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2));
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0));
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0)));
|
||||
|
||||
// unbounded upper
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, null);
|
||||
assertTrue(got.onOrAfter(Version.V_5_0_0));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
|
||||
// range of one
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getFirstVersion());
|
||||
assertEquals(got, VersionUtils.getFirstVersion());
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
|
||||
assertEquals(got, Version.CURRENT);
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha2,
|
||||
Version.V_6_0_0_alpha2);
|
||||
assertEquals(got, Version.V_6_0_0_alpha2);
|
||||
|
||||
// implicit range of one
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());
|
||||
assertEquals(got, VersionUtils.getFirstVersion());
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, null);
|
||||
assertEquals(got, Version.CURRENT);
|
||||
}
|
||||
|
||||
static class TestReleaseBranch {
|
||||
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||
public static final Version V_5_3_2 = Version.fromString("5.3.2");
|
||||
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||
public static final Version V_5_4_1 = Version.fromString("5.4.1");
|
||||
public static final Version CURRENT = V_5_4_1;
|
||||
}
|
||||
public void testResolveReleasedVersionsForReleaseBranch() {
|
||||
Tuple<List<Version>, List<Version>> t = VersionUtils.resolveReleasedVersions(TestReleaseBranch.CURRENT, TestReleaseBranch.class);
|
||||
List<Version> released = t.v1();
|
||||
List<Version> unreleased = t.v2();
|
||||
assertEquals(Arrays.asList(TestReleaseBranch.V_5_3_0, TestReleaseBranch.V_5_3_1, TestReleaseBranch.V_5_3_2,
|
||||
TestReleaseBranch.V_5_4_0, TestReleaseBranch.V_5_4_1), released);
|
||||
assertEquals(emptyList(), unreleased);
|
||||
}
|
||||
|
||||
static class TestStableBranch {
|
||||
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||
public static final Version V_5_3_2 = Version.fromString("5.3.2");
|
||||
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||
public static final Version CURRENT = V_5_4_0;
|
||||
}
|
||||
public void testResolveReleasedVersionsForUnreleasedStableBranch() {
|
||||
Tuple<List<Version>, List<Version>> t = VersionUtils.resolveReleasedVersions(TestStableBranch.CURRENT,
|
||||
TestStableBranch.class);
|
||||
List<Version> released = t.v1();
|
||||
List<Version> unreleased = t.v2();
|
||||
assertEquals(
|
||||
Arrays.asList(TestStableBranch.V_5_3_0, TestStableBranch.V_5_3_1, TestStableBranch.V_5_4_0),
|
||||
released);
|
||||
assertEquals(singletonList(TestStableBranch.V_5_3_2), unreleased);
|
||||
}
|
||||
|
||||
static class TestStableBranchBehindStableBranch {
|
||||
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||
public static final Version V_5_3_2 = Version.fromString("5.3.2");
|
||||
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||
public static final Version V_5_5_0 = Version.fromString("5.5.0");
|
||||
public static final Version CURRENT = V_5_5_0;
|
||||
}
|
||||
public void testResolveReleasedVersionsForStableBtranchBehindStableBranch() {
|
||||
Tuple<List<Version>, List<Version>> t = VersionUtils.resolveReleasedVersions(TestStableBranchBehindStableBranch.CURRENT,
|
||||
TestStableBranchBehindStableBranch.class);
|
||||
List<Version> released = t.v1();
|
||||
List<Version> unreleased = t.v2();
|
||||
assertEquals(Arrays.asList(TestStableBranchBehindStableBranch.V_5_3_0, TestStableBranchBehindStableBranch.V_5_3_1,
|
||||
TestStableBranchBehindStableBranch.V_5_5_0), released);
|
||||
assertEquals(Arrays.asList(TestStableBranchBehindStableBranch.V_5_3_2, Version.V_5_4_0), unreleased);
|
||||
}
|
||||
|
||||
static class TestUnstableBranch {
|
||||
public static final Version V_5_3_0 = Version.fromString("5.3.0");
|
||||
public static final Version V_5_3_1 = Version.fromString("5.3.1");
|
||||
public static final Version V_5_3_2 = Version.fromString("5.3.2");
|
||||
public static final Version V_5_4_0 = Version.fromString("5.4.0");
|
||||
public static final Version V_6_0_0_alpha1 = Version.fromString("6.0.0-alpha1");
|
||||
public static final Version V_6_0_0_alpha2 = Version.fromString("6.0.0-alpha2");
|
||||
public static final Version CURRENT = V_6_0_0_alpha2;
|
||||
}
|
||||
public void testResolveReleasedVersionsForUnstableBranch() {
|
||||
Tuple<List<Version>, List<Version>> t = VersionUtils.resolveReleasedVersions(TestUnstableBranch.CURRENT,
|
||||
TestUnstableBranch.class);
|
||||
List<Version> released = t.v1();
|
||||
List<Version> unreleased = t.v2();
|
||||
assertEquals(Arrays.asList(TestUnstableBranch.V_5_3_0, TestUnstableBranch.V_5_3_1,
|
||||
TestUnstableBranch.V_6_0_0_alpha1, TestUnstableBranch.V_6_0_0_alpha2), released);
|
||||
assertEquals(Arrays.asList(TestUnstableBranch.V_5_3_2, TestUnstableBranch.V_5_4_0), unreleased);
|
||||
}
|
||||
|
||||
// TODO add a test that compares gradle and VersionUtils.java in a followup
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.test.test;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class VersionUtilsTests extends ESTestCase {
|
||||
|
||||
public void testAllVersionsSorted() {
|
||||
List<Version> allVersions = VersionUtils.allReleasedVersions();
|
||||
for (int i = 0, j = 1; j < allVersions.size(); ++i, ++j) {
|
||||
assertTrue(allVersions.get(i).before(allVersions.get(j)));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomVersionBetween() {
|
||||
// full range
|
||||
Version got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), Version.CURRENT);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), null);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
|
||||
// sub range
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0,
|
||||
Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertTrue(got.onOrAfter(Version.V_5_0_0));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
|
||||
// unbounded lower
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0));
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0)));
|
||||
|
||||
// unbounded upper
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, null);
|
||||
assertTrue(got.onOrAfter(Version.V_5_0_0));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion()));
|
||||
assertTrue(got.onOrBefore(Version.CURRENT));
|
||||
|
||||
// range of one
|
||||
got = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getFirstVersion());
|
||||
assertEquals(got, VersionUtils.getFirstVersion());
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
|
||||
assertEquals(got, Version.CURRENT);
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha2_UNRELEASED,
|
||||
Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertEquals(got, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
|
||||
// implicit range of one
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());
|
||||
assertEquals(got, VersionUtils.getFirstVersion());
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, null);
|
||||
assertEquals(got, Version.CURRENT);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue