Update version to 7.0.0-alpha1 (#25876)

This commit updates the version for master to 7.0.0-alpha1. It also adds
the 6.1 version constant, and fixes many tests, as well as marking some
as awaits fix.

Closes #25893
Closes #25870
This commit is contained in:
Ryan Ernst 2017-08-01 15:47:48 -04:00 committed by GitHub
parent e9669b3762
commit 072281d5aa
27 changed files with 114 additions and 277 deletions

View File

@ -79,18 +79,20 @@ int lastPrevMinor = -1 // the minor version number from the prev major we most r
for (String line : versionLines) {
/* Note that this skips alphas and betas which is fine because they aren't
* compatible with anything. */
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+) .*/
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_beta\d+|_rc\d+)? .*/
if (match.matches()) {
int major = Integer.parseInt(match.group(1))
int minor = Integer.parseInt(match.group(2))
int bugfix = Integer.parseInt(match.group(3))
Version foundVersion = new Version(major, minor, bugfix, false)
if (currentVersion != foundVersion) {
if (currentVersion != foundVersion
&& (major == prevMajor || major == currentVersion.major)
&& (versions.isEmpty() || versions.last() != foundVersion)) {
versions.add(foundVersion)
}
if (major == prevMajor && minor > lastPrevMinor) {
prevMinorIndex = versions.size() - 1
lastPrevMinor = minor
if (major == prevMajor && minor > lastPrevMinor) {
prevMinorIndex = versions.size() - 1
lastPrevMinor = minor
}
}
}
}
@ -242,9 +244,11 @@ subprojects {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
if (indexCompatVersions.size() > 1) {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
}
} else {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'

View File

@ -1,5 +1,5 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-beta1
elasticsearch = 7.0.0-alpha1
lucene = 7.0.0-snapshot-00142c9
# optional dependencies

View File

@ -99,7 +99,13 @@ public class Version implements Comparable<Version> {
public static final int V_6_0_0_beta1_ID = 6000026;
public static final Version V_6_0_0_beta1 =
new Version(V_6_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final Version CURRENT = V_6_0_0_beta1;
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 =
new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final int V_7_0_0_alpha1_ID = 7000001;
public static final Version V_7_0_0_alpha1 =
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final Version CURRENT = V_7_0_0_alpha1;
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
@ -114,6 +120,10 @@ public class Version implements Comparable<Version> {
public static Version fromId(int id) {
switch (id) {
case V_7_0_0_alpha1_ID:
return V_7_0_0_alpha1;
case V_6_1_0_ID:
return V_6_1_0;
case V_6_0_0_beta1_ID:
return V_6_0_0_beta1;
case V_6_0_0_alpha2_ID:
@ -311,12 +321,12 @@ public class Version implements Comparable<Version> {
public Version minimumCompatibilityVersion() {
final int bwcMajor;
final int bwcMinor;
// TODO: remove this entirely, making it static for each version
if (major == 6) { // we only specialize for current major here
bwcMajor = Version.V_5_6_0.major;
bwcMinor = Version.V_5_6_0.minor;
} else if (major > 6) { // all the future versions are compatible with first minor...
bwcMajor = major -1;
bwcMinor = 0;
} else if (major == 7) { // we only specialize for current major here
return V_6_1_0;
} else {
bwcMajor = major;
bwcMinor = 0;
@ -333,6 +343,8 @@ public class Version implements Comparable<Version> {
final int bwcMajor;
if (major == 5) {
bwcMajor = 2; // we jumped from 2 to 5
} else if (major == 7) {
return V_6_0_0_beta1;
} else {
bwcMajor = major - 1;
}

View File

@ -415,8 +415,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
int shards = in.readVInt();
for (int j = 0; j < shards; j++) {
ShardId shardId = ShardId.readShardId(in);
// TODO: Change this to an appropriate version when it's backported
if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
builder.put(shardId, new ShardSnapshotStatus(in));
} else {
String nodeId = in.readOptionalString();
@ -459,8 +458,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
out.writeVInt(entry.shards().size());
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardEntry : entry.shards()) {
shardEntry.key.writeTo(out);
// TODO: Change this to an appropriate version when it's backported
if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
shardEntry.value.writeTo(out);
} else {
out.writeOptionalString(shardEntry.value.nodeId());

View File

@ -141,7 +141,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
// been started yet. However, we don't really need real analyzers at this stage - so we can fake it
IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings);
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final NamedAnalyzer fakeDefault = new NamedAnalyzer("fake_default", AnalyzerScope.INDEX, new Analyzer() {
final NamedAnalyzer fakeDefault = new NamedAnalyzer("default", AnalyzerScope.INDEX, new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
throw new UnsupportedOperationException("shouldn't be here");

View File

@ -221,14 +221,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
this.ephemeralId = in.readString().intern();
this.hostName = in.readString().intern();
this.hostAddress = in.readString().intern();
if (in.getVersion().after(Version.V_5_0_2)) {
this.address = new TransportAddress(in);
} else {
// we need to do this to preserve the host information during pinging and joining of a master. Since the version of the
// DiscoveryNode is set to Version#minimumCompatibilityVersion(), the host information gets lost as we do not serialize the
// hostString for the address
this.address = new TransportAddress(in, hostName);
}
this.address = new TransportAddress(in);
int size = in.readVInt();
this.attributes = new HashMap<>(size);
for (int i = 0; i < size; i++) {

View File

@ -19,8 +19,6 @@
package org.elasticsearch.common.transport;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -70,46 +68,21 @@ public final class TransportAddress implements Writeable {
* Read from a stream.
*/
public TransportAddress(StreamInput in) throws IOException {
this(in, null);
}
/**
* Read from a stream and use the {@code hostString} when creating the InetAddress if the input comes from a version on or prior
* {@link Version#V_5_0_2} as the hostString was not serialized
*/
public TransportAddress(StreamInput in, @Nullable String hostString) throws IOException {
if (in.getVersion().before(Version.V_6_0_0_alpha1)) { // bwc layer for 5.x where we had more than one transport address
final short i = in.readShort();
if(i != 1) { // we fail hard to ensure nobody tries to use some custom transport address impl even if that is difficult to add
throw new AssertionError("illegal transport ID from node of version: " + in.getVersion() + " got: " + i + " expected: 1");
}
}
final int len = in.readByte();
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a);
final InetAddress inetAddress;
if (in.getVersion().after(Version.V_5_0_2)) {
String host = in.readString(); // the host string was serialized so we can ignore the passed in version
inetAddress = InetAddress.getByAddress(host, a);
} else {
// prior to this version, we did not serialize the host string so we used the passed in version
inetAddress = InetAddress.getByAddress(hostString, a);
}
String host = in.readString(); // the host string was serialized so we can ignore the passed in version
final InetAddress inetAddress = InetAddress.getByAddress(host, a);
int port = in.readInt();
this.address = new InetSocketAddress(inetAddress, port);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
if (out.getVersion().before(Version.V_6_0_0_alpha1)) {
out.writeShort((short)1); // this maps to InetSocketTransportAddress in 5.x
}
byte[] bytes = address.getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length);
if (out.getVersion().after(Version.V_5_0_2)) {
out.writeString(address.getHostString());
}
out.writeString(address.getHostString());
// don't serialize scope ids over the network!!!!
// these only make sense with respect to the local machine, and will only formulate
// the address incorrectly remotely.

View File

@ -496,14 +496,8 @@ public final class AnalysisRegistry implements Closeable {
throw new IllegalArgumentException("no default analyzer configured");
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index.getName());
}
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
}
NamedAnalyzer defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer;
NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer;
NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer;
@ -512,7 +506,7 @@ public final class AnalysisRegistry implements Closeable {
throw new IllegalArgumentException("analyzer name must not start with '_'. got \"" + analyzer.getKey() + "\"");
}
}
return new IndexAnalyzers(indexSettings, defaultIndexAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer,
return new IndexAnalyzers(indexSettings, defaultAnalyzer, defaultSearchAnalyzer, defaultSearchQuoteAnalyzer,
unmodifiableMap(analyzers), unmodifiableMap(normalizers));
}

View File

@ -46,6 +46,9 @@ public final class IndexAnalyzers extends AbstractIndexComponent implements Clos
NamedAnalyzer defaultSearchQuoteAnalyzer, Map<String, NamedAnalyzer> analyzers,
Map<String, NamedAnalyzer> normalizers) {
super(indexSettings);
if (defaultIndexAnalyzer.name().equals("default") == false) {
throw new IllegalStateException("default analyzer must have the name [default] but was: [" + defaultIndexAnalyzer.name() + "]");
}
this.defaultIndexAnalyzer = defaultIndexAnalyzer;
this.defaultSearchAnalyzer = defaultSearchAnalyzer;
this.defaultSearchQuoteAnalyzer = defaultSearchQuoteAnalyzer;

View File

@ -63,7 +63,7 @@ grant codeBase "${codebase.mocksocket-1.2.jar}" {
permission java.net.SocketPermission "*", "accept,connect";
};
grant codeBase "${codebase.elasticsearch-rest-client-6.0.0-beta1-SNAPSHOT.jar}" {
grant codeBase "${codebase.elasticsearch-rest-client-7.0.0-alpha1-SNAPSHOT.jar}" {
// rest makes socket connections for rest tests
permission java.net.SocketPermission "*", "connect";
// rest client uses system properties which gets the default proxy
@ -72,7 +72,7 @@ grant codeBase "${codebase.elasticsearch-rest-client-6.0.0-beta1-SNAPSHOT.jar}"
// IDEs need this because they do not play nicely with removing artifacts on projects,
// so we keep it in here for IDE test support
grant codeBase "${codebase.elasticsearch-rest-client-6.0.0-beta1-SNAPSHOT-deps.jar}" {
grant codeBase "${codebase.elasticsearch-rest-client-7.0.0-alpha1-SNAPSHOT-deps.jar}" {
// rest makes socket connections for rest tests
permission java.net.SocketPermission "*", "connect";
};

View File

@ -337,12 +337,8 @@ public class VersionTests extends ESTestCase {
assertTrue(isCompatible(Version.V_5_6_0, Version.V_6_0_0_alpha2));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
assertTrue(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0")));
if (Version.CURRENT.isRelease()) {
assertTrue(isCompatible(Version.CURRENT, Version.fromString("7.0.0")));
} else {
assertFalse(isCompatible(Version.CURRENT, Version.fromString("7.0.0")));
}
assertTrue(isCompatible(Version.fromString("6.1.0"), Version.fromString("7.0.0")));
assertFalse(isCompatible(Version.fromString("6.0.0-alpha1"), Version.fromString("7.0.0")));
assertFalse("only compatible with the latest minor",
isCompatible(VersionUtils.getPreviousMinorVersion(), Version.fromString("7.0.0")));
assertFalse(isCompatible(Version.V_5_0_0, Version.fromString("6.0.0")));

View File

@ -91,16 +91,18 @@ public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("2.4.0"))
.build());
// norelease : having a hardcoded version message requires modifying this test when creating new major version. fix this...
String message = expectThrows(IllegalStateException.class, () -> service.upgradeIndexMetaData(metaData,
Version.CURRENT.minimumIndexCompatibilityVersion())).getMessage();
assertEquals(message, "The index [[foo/BOOM]] was created with version [2.4.0] but the minimum compatible version is [5.0.0]." +
" It should be re-indexed in Elasticsearch 5.x before upgrading to " + Version.CURRENT.toString() + ".");
assertEquals(message, "The index [[foo/BOOM]] was created with version [2.4.0] " +
"but the minimum compatible version is [6.0.0-beta1]." +
" It should be re-indexed in Elasticsearch 6.x before upgrading to " + Version.CURRENT.toString() + ".");
IndexMetaData goodMeta = newIndexMeta("foo", Settings.builder()
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_5_0_0_beta1)
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromString("5.1.0"))
.build());
service.upgradeIndexMetaData(goodMeta, Version.V_5_0_0.minimumIndexCompatibilityVersion());
service.upgradeIndexMetaData(goodMeta, Version.V_6_0_0_beta1.minimumIndexCompatibilityVersion());
}
public void testPluginUpgrade() {

View File

@ -58,28 +58,4 @@ public class DiscoveryNodeTests extends ESTestCase {
assertEquals(transportAddress.getAddress(), serialized.getAddress().getAddress());
assertEquals(transportAddress.getPort(), serialized.getAddress().getPort());
}
public void testDiscoveryNodeSerializationToOldVersion() throws Exception {
InetAddress inetAddress = InetAddress.getByAddress("name1", new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1});
TransportAddress transportAddress = new TransportAddress(inetAddress, randomIntBetween(0, 65535));
DiscoveryNode node = new DiscoveryNode("name1", "id1", transportAddress, emptyMap(), emptySet(), Version.CURRENT);
BytesStreamOutput streamOutput = new BytesStreamOutput();
streamOutput.setVersion(Version.V_5_0_0);
node.writeTo(streamOutput);
StreamInput in = StreamInput.wrap(streamOutput.bytes().toBytesRef().bytes);
in.setVersion(Version.V_5_0_0);
DiscoveryNode serialized = new DiscoveryNode(in);
assertEquals(transportAddress.address().getHostString(), serialized.getHostName());
assertEquals(transportAddress.address().getHostString(), serialized.getAddress().address().getHostString());
assertEquals(transportAddress.getAddress(), serialized.getHostAddress());
assertEquals(transportAddress.getAddress(), serialized.getAddress().getAddress());
assertEquals(transportAddress.getPort(), serialized.getAddress().getPort());
assertFalse("if the minimum index compatibility version moves past 5.0.3, remove the special casing in DiscoverNode(StreamInput)" +
" and the TransportAddress(StreamInput, String) constructor",
Version.CURRENT.minimumIndexCompatibilityVersion().after(Version.V_5_0_2));
// serialization can happen from an old cluster-state in a full cluster restart
// hence we need to maintain this until we drop index bwc
}
}

View File

@ -41,10 +41,10 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.test.VersionUtils;
import java.util.Collections;
import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;
@ -129,36 +129,29 @@ public class ClusterSerializationTests extends ESAllocationTestCase {
// serialize with current version
BytesStreamOutput outStream = new BytesStreamOutput();
Version version = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumIndexCompatibilityVersion(), Version.CURRENT);
outStream.setVersion(version);
diffs.writeTo(outStream);
StreamInput inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
inStream.setVersion(version);
Diff<ClusterState> serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
// serialize with old version
outStream = new BytesStreamOutput();
outStream.setVersion(Version.CURRENT.minimumIndexCompatibilityVersion());
diffs.writeTo(outStream);
inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
inStream.setVersion(outStream.getVersion());
serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE);
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
// remove the custom and try serializing again with old version
// remove the custom and try serializing again
clusterState = ClusterState.builder(clusterState).removeCustom(SnapshotDeletionsInProgress.TYPE).incrementVersion().build();
outStream = new BytesStreamOutput();
outStream.setVersion(version);
diffs.writeTo(outStream);
inStream = outStream.bytes().streamInput();
inStream = new NamedWriteableAwareStreamInput(inStream, new NamedWriteableRegistry(ClusterModule.getNamedWriteables()));
inStream.setVersion(version);
serializedDiffs = ClusterState.readDiffFrom(inStream, clusterState.nodes().getLocalNode());
stateAfterDiffs = serializedDiffs.apply(stateAfterDiffs);
assertThat(stateAfterDiffs.custom(RestoreInProgress.TYPE), includeRestore ? notNullValue() : nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), nullValue());
assertThat(stateAfterDiffs.custom(SnapshotDeletionsInProgress.TYPE), notNullValue());
}
}

View File

@ -803,14 +803,6 @@ public class BytesStreamsTests extends ESTestCase {
Exception e = expectThrows(IllegalStateException.class, () -> output.writeVLong(value));
assertEquals("Negative longs unsupported, use writeLong or writeZLong for negative numbers [" + value + "]", e.getMessage());
}
assertTrue("If we're not compatible with 5.1.1 we can drop the assertion below",
Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_5_1_1));
/* Read -1 as serialized by a version of Elasticsearch that supported writing negative numbers with writeVLong. Note that this
* should be the same test as the first case (when value is negative) but we've kept some bytes so no matter what we do to
* writeVLong in the future we can be sure we can read bytes as written by Elasticsearch before 5.1.2 */
StreamInput in = new BytesArray(Base64.getDecoder().decode("////////////AQAAAAAAAA==")).streamInput();
assertEquals(-1, in.readVLong());
}
public enum TestEnum {

View File

@ -156,7 +156,8 @@ public class SettingsTests extends ESTestCase {
@SuppressWarnings("deprecation") //#getAsBooleanLenientForPreEs6Indices is the test subject
public void testLenientBooleanForPreEs6Index() throws IOException {
// time to say goodbye?
assertTrue(
// norelease: do what the assumption tells us
assumeTrue(
"It's time to implement #22298. Please delete this test and Settings#getAsBooleanLenientForPreEs6Indices().",
Version.CURRENT.minimumCompatibilityVersion().before(Version.V_6_0_0_alpha1));

View File

@ -254,11 +254,6 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
.field("enabled", true)
.endObject().endObject().bytes());
CompressedXContent disabledAll = new CompressedXContent(XContentFactory.jsonBuilder().startObject()
.startObject("_all")
.field("enabled", false)
.endObject().endObject().bytes());
Exception e = expectThrows(MapperParsingException.class,
() -> indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, enabledAll,
MergeReason.MAPPING_UPDATE, random().nextBoolean()));

View File

@ -2321,9 +2321,7 @@ public class TranslogTests extends ESTestCase {
public void testTranslogOpSerialization() throws Exception {
BytesReference B_1 = new BytesArray(new byte[]{1});
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers";
long randomSeqNum = randomBoolean() ? SequenceNumbersService.UNASSIGNED_SEQ_NO : randomNonNegativeLong();
long primaryTerm = randomSeqNum == SequenceNumbersService.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 16);
long randomSeqNum = randomNonNegativeLong();
long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong();
seqID.seqNo.setLongValue(randomSeqNum);
seqID.seqNoDocValue.setLongValue(randomSeqNum);

View File

@ -22,7 +22,7 @@ grant {
permission java.net.SocketPermission "*", "connect";
};
grant codeBase "${codebase.elasticsearch-rest-client-6.0.0-beta1-SNAPSHOT.jar}" {
grant codeBase "${codebase.elasticsearch-rest-client-7.0.0-alpha1-SNAPSHOT.jar}" {
// rest client uses system properties which gets the default proxy
permission java.net.NetPermission "getProxySelector";
};

View File

@ -50,6 +50,7 @@ import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
/**
@ -62,7 +63,7 @@ import static org.hamcrest.Matchers.greaterThan;
public class FullClusterRestartIT extends ESRestTestCase {
private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster"));
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
private final boolean supportsLenientBooleans = oldClusterVersion.onOrAfter(Version.V_6_0_0_alpha1);
private final boolean supportsLenientBooleans = oldClusterVersion.before(Version.V_6_0_0_alpha1);
private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0");
private String index;
@ -545,7 +546,11 @@ public class FullClusterRestartIT extends ESRestTestCase {
int totalBytes = (Integer) indexUpgradeStatus.get("size_in_bytes");
assertThat(totalBytes, greaterThan(0));
int toUpgradeBytes = (Integer) indexUpgradeStatus.get("size_to_upgrade_in_bytes");
assertThat(toUpgradeBytes, greaterThan(0));
if (oldClusterVersion.luceneVersion.equals(Version.CURRENT.luceneVersion)) {
assertThat(toUpgradeBytes, equalTo(0));
} else {
assertThat(toUpgradeBytes, greaterThan(0));
}
Response r = client().performRequest("POST", "/" + index + "/_flush");
assertEquals(200, r.getStatusLine().getStatusCode());
@ -887,7 +892,7 @@ public class FullClusterRestartIT extends ESRestTestCase {
// Check that the template was restored successfully
map = toMap(client().performRequest("GET", "/_template/test_template"));
expected = new HashMap<>();
if (runningAgainstOldCluster) {
if (runningAgainstOldCluster && oldClusterVersion.before(Version.V_6_0_0_beta1)) {
expected.put("template", "evil_*");
} else {
expected.put("index_patterns", singletonList("evil_*"));

View File

@ -213,19 +213,20 @@ public class IndexingIT extends ESRestTestCase {
int numDocs = 0;
final int numberOfInitialDocs = 1 + randomInt(5);
logger.info("indexing [{}] docs initially", numberOfInitialDocs);
numDocs += indexDocs(index, 0, numberOfInitialDocs);
assertSeqNoOnShards(index, nodes, 0, newNodeClient);
numDocs += indexDocs(index, numDocs, numberOfInitialDocs);
assertOK(client().performRequest("POST", index + "/_refresh")); // this forces a global checkpoint sync
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
logger.info("allowing shards on all nodes");
updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name"));
ensureGreen();
assertOK(client().performRequest("POST", index + "/_refresh"));
for (final String bwcName : bwcNamesList) {
assertCount(index, "_only_nodes:" + bwcName, numDocs);
}
final int numberOfDocsAfterAllowingShardsOnAllNodes = 1 + randomInt(5);
logger.info("indexing [{}] docs after allowing shards on all nodes", numberOfDocsAfterAllowingShardsOnAllNodes);
numDocs += indexDocs(index, numDocs, numberOfDocsAfterAllowingShardsOnAllNodes);
assertSeqNoOnShards(index, nodes, 0, newNodeClient);
assertOK(client().performRequest("POST", index + "/_refresh")); // this forces a global checkpoint sync
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get();
logger.info("moving primary to new node by excluding {}", primary.getNode().getNodeName());
updateIndexSetting(index, Settings.builder().put("index.routing.allocation.exclude._name", primary.getNode().getNodeName()));
@ -236,7 +237,7 @@ public class IndexingIT extends ESRestTestCase {
numDocsOnNewPrimary += indexDocs(index, numDocs, numberOfDocsAfterMovingPrimary);
numDocs += numberOfDocsAfterMovingPrimary;
assertOK(client().performRequest("POST", index + "/_refresh")); // this forces a global checkpoint sync
assertSeqNoOnShards(index, nodes, numDocsOnNewPrimary, newNodeClient);
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
/*
* Dropping the number of replicas to zero, and then increasing it to one triggers a recovery thus exercising any BWC-logic in
* the recovery code.
@ -254,7 +255,7 @@ public class IndexingIT extends ESRestTestCase {
// the number of documents on the primary and on the recovered replica should match the number of indexed documents
assertCount(index, "_primary", numDocs);
assertCount(index, "_replica", numDocs);
assertSeqNoOnShards(index, nodes, numDocsOnNewPrimary, newNodeClient);
assertSeqNoOnShards(index, nodes, numDocs, newNodeClient);
}
}
@ -280,28 +281,17 @@ public class IndexingIT extends ESRestTestCase {
List<Shard> shards = buildShards(index, nodes, client);
Shard primaryShard = shards.stream().filter(Shard::isPrimary).findFirst().get();
assertNotNull("failed to find primary shard", primaryShard);
final long expectedGlobalCkp;
final long expectMaxSeqNo;
final long expectedGlobalCkp = numDocs - 1;
final long expectMaxSeqNo = numDocs - 1;
logger.info("primary resolved to node {}", primaryShard.getNode());
if (primaryShard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
expectMaxSeqNo = numDocs - 1;
expectedGlobalCkp = numDocs - 1;
} else {
expectedGlobalCkp = SequenceNumbersService.UNASSIGNED_SEQ_NO;
expectMaxSeqNo = SequenceNumbersService.NO_OPS_PERFORMED;
}
for (Shard shard : shards) {
if (shard.getNode().getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
final SeqNoStats seqNoStats = shard.getSeqNoStats();
logger.info("stats for {}, primary [{}]: [{}]", shard.getNode(), shard.isPrimary(), seqNoStats);
assertThat("max_seq no on " + shard.getNode() + " is wrong", seqNoStats.getMaxSeqNo(), equalTo(expectMaxSeqNo));
assertThat("localCheckpoint no on " + shard.getNode() + " is wrong",
final SeqNoStats seqNoStats = shard.getSeqNoStats();
logger.info("stats for {}, primary [{}]: [{}]", shard.getNode(), shard.isPrimary(), seqNoStats);
assertThat("max_seq no on " + shard.getNode() + " is wrong", seqNoStats.getMaxSeqNo(), equalTo(expectMaxSeqNo));
assertThat("localCheckpoint no on " + shard.getNode() + " is wrong",
seqNoStats.getLocalCheckpoint(), equalTo(expectMaxSeqNo));
assertThat("globalCheckpoint no on " + shard.getNode() + " is wrong",
seqNoStats.getGlobalCheckpoint(), equalTo(expectedGlobalCkp));
} else {
logger.info("skipping seq no test on {}", shard.getNode());
}
assertThat("globalCheckpoint no on " + shard.getNode() + " is wrong",
seqNoStats.getGlobalCheckpoint(), equalTo(expectedGlobalCkp));
}
} catch (IOException e) {
throw new AssertionError("unexpected io exception", e);
@ -318,14 +308,10 @@ public class IndexingIT extends ESRestTestCase {
final Boolean primary = ObjectPath.evaluate(shard, "routing.primary");
final Node node = nodes.getSafe(nodeId);
final SeqNoStats seqNoStats;
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
Integer maxSeqNo = ObjectPath.evaluate(shard, "seq_no.max_seq_no");
Integer localCheckpoint = ObjectPath.evaluate(shard, "seq_no.local_checkpoint");
Integer globalCheckpoint = ObjectPath.evaluate(shard, "seq_no.global_checkpoint");
seqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint);
} else {
seqNoStats = null;
}
Integer maxSeqNo = ObjectPath.evaluate(shard, "seq_no.max_seq_no");
Integer localCheckpoint = ObjectPath.evaluate(shard, "seq_no.local_checkpoint");
Integer globalCheckpoint = ObjectPath.evaluate(shard, "seq_no.global_checkpoint");
seqNoStats = new SeqNoStats(maxSeqNo, localCheckpoint, globalCheckpoint);
shards.add(new Shard(node, primary, seqNoStats));
}
return shards;

View File

@ -1,46 +0,0 @@
setup:
- do:
indices.create:
index: test
body:
mappings:
type_2: {}
type_3:
_parent:
type: type_2
---
"Parent/child inner hits":
- skip:
version: " - 5.4.99"
reason: mapping.single_type was added in 5.5
- do:
index:
index: test
type: type_2
id: 1
body: {"foo": "bar"}
- do:
index:
index: test
type: type_3
id: 1
parent: 1
body: {"bar": "baz"}
- do:
indices.refresh: {}
- do:
search:
body: { "query" : { "has_child" : { "type" : "type_3", "query" : { "match_all" : {} }, "inner_hits" : {} } } }
- match: { hits.total: 1 }
- match: { hits.hits.0._index: "test" }
- match: { hits.hits.0._type: "type_2" }
- match: { hits.hits.0._id: "1" }
- is_false: hits.hits.0.inner_hits.type_3.hits.hits.0._index
- match: { hits.hits.0.inner_hits.type_3.hits.hits.0._type: "type_3" }
- match: { hits.hits.0.inner_hits.type_3.hits.hits.0._id: "1" }
- is_false: hits.hits.0.inner_hits.type_3.hits.hits.0._nested

View File

@ -21,21 +21,21 @@
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v1_mixed", "f2": 5}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v2_mixed", "f2": 6}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v3_mixed", "f2": 7}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v4_mixed", "f2": 8}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v5_mixed", "f2": 9}'
- do:
index:
index: test_index
type: test_type
type: doc
id: d10
body: {"f1": "v6_mixed", "f2": 10}
@ -52,7 +52,7 @@
- do:
delete:
index: test_index
type: test_type
type: doc
id: d10
- do:

View File

@ -7,29 +7,6 @@
settings:
index:
number_of_replicas: 0
- do:
indices.create:
index: multi_type_index
body:
settings:
index.number_of_replicas: 0
index.mapping.single_type: false
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "multi_type_index", "_type": "type1"}}'
- '{"f1": "v1_old", "f2": 0}'
- '{"index": {"_index": "multi_type_index", "_type": "type2"}}'
- '{"f1": "v1_old", "f2": 0}'
- do:
search:
index: multi_type_index
- match: { hits.total: 2 }
- do:
indices.create:
index: index_with_replicas # dummy index to ensure we can recover indices with replicas just fine

View File

@ -17,32 +17,19 @@
- match: { hits.total: 5 } # just check we recovered fine
- do:
search:
index: multi_type_index
- match: { hits.total: 2 } # just check we recovered fine
- do:
indices.get_settings:
index: multi_type_index
- match: { multi_type_index.settings.index.mapping.single_type: "false"}
- do:
bulk:
refresh: true
body:
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v1_upgraded", "f2": 10}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v2_upgraded", "f2": 11}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v3_upgraded", "f2": 12}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v4_upgraded", "f2": 13}'
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
- '{"index": {"_index": "test_index", "_type": "doc"}}'
- '{"f1": "v5_upgraded", "f2": 14}'
- do:

View File

@ -2422,7 +2422,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
int addressLen = serviceB.boundAddress().publishAddress().address().getAddress().getAddress().length;
// if we are bound to a IPv6 address the response address is serialized with the exception so it will be different depending
// on the stack. The emphemeral port will always be in the same range
assertEquals(185 + addressLen, stats.getRxSize().getBytes());
assertEquals(183 + addressLen, stats.getRxSize().getBytes());
assertEquals(91, stats.getTxSize().getBytes());
} finally {
serviceC.close();

View File

@ -23,13 +23,19 @@ import org.elasticsearch.common.collect.Tuple;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedHashSet;
import java.util.List;
import static java.util.Collections.singletonList;
import static java.util.stream.Collectors.toCollection;
import static java.util.stream.Collectors.toList;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
/**
* Tests VersionUtils. Note: this test should remain unchanged across major versions
* it uses the hardcoded versions on purpose.
*/
public class VersionUtilsTests extends ESTestCase {
public void testAllVersionsSorted() {
@ -174,37 +180,29 @@ public class VersionUtilsTests extends ESTestCase {
public void testGradleVersionsMatchVersionUtils() {
// First check the index compatible versions
VersionsFromProperty indexCompatible = new VersionsFromProperty("tests.gradle_index_compat_versions");
List<Version> released = VersionUtils.allReleasedVersions().stream()
/* We skip alphas, betas, and the like in gradle because they don't have
* backwards compatibility guarantees even though they are technically
* released. */
.filter(Version::isRelease)
.filter(v -> v.isRelease() && (v.major == Version.CURRENT.major || v.major == Version.CURRENT.major - 1))
.collect(toList());
List<String> releasedIndexCompatible = released.stream()
.map(Object::toString)
.collect(toList());
assertEquals(releasedIndexCompatible, indexCompatible.released);
List<String> unreleasedIndexCompatible = VersionUtils.allUnreleasedVersions().stream()
List<String> unreleasedIndexCompatible = new ArrayList<>(VersionUtils.allUnreleasedVersions().stream()
/* Gradle skips the current version because being backwards compatible
* with yourself is implied. Java lists the version because it is useful. */
.filter(v -> v != Version.CURRENT)
.map(Object::toString)
.collect(toList());
.map(v -> v.major + "." + v.minor + "." + v.revision)
.collect(toCollection(LinkedHashSet::new)));
assertEquals(unreleasedIndexCompatible, indexCompatible.unreleased);
// Now the wire compatible versions
VersionsFromProperty wireCompatible = new VersionsFromProperty("tests.gradle_wire_compat_versions");
// Big horrible hack:
// This *should* be:
// Version minimumCompatibleVersion = Version.CURRENT.minimumCompatibilityVersion();
// But instead it is:
Version minimumCompatibleVersion = Version.V_5_6_0;
// Because things blow up all over the place if the minimum compatible version isn't released.
// We'll fix this very, very soon. But for now, this hack.
// end big horrible hack
Version minimumCompatibleVersion = Version.CURRENT.minimumCompatibilityVersion();
List<String> releasedWireCompatible = released.stream()
.filter(v -> v.onOrAfter(minimumCompatibleVersion))
.map(Object::toString)