Bump Elasticsearch version to 5.0.0-SNAPSHOT

This commit bumps the Elasticsearch version to 5.0.0-SNAPSHOT in line
with the alignment of versions across the stack.

Closes #16862
This commit is contained in:
Jason Tedor 2016-02-29 10:21:39 -05:00
parent 72ed01c304
commit aa8ee74c6c
26 changed files with 44 additions and 93 deletions

View File

@ -1,4 +1,4 @@
elasticsearch = 3.0.0
elasticsearch = 5.0.0
lucene = 5.5.0
# optional dependencies

View File

@ -264,9 +264,9 @@ public class Version {
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_3_0_0_ID = 3000099;
public static final Version V_3_0_0 = new Version(V_3_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version CURRENT = V_3_0_0;
public static final int V_5_0_0_ID = 5000099;
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final Version CURRENT = V_5_0_0;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@ -279,8 +279,8 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
case V_3_0_0_ID:
return V_3_0_0;
case V_5_0_0_ID:
return V_5_0_0;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_0_ID:

View File

@ -913,11 +913,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
}
} else if ("warmers".equals(currentFieldName)) {
// TODO: do this in 4.0:
// TODO: do this in 6.0:
// throw new IllegalArgumentException("Warmers are not supported anymore - are you upgrading from 1.x?");
// ignore: warmers have been removed in 3.0 and are
// ignore: warmers have been removed in 5.0 and are
// simply ignored when upgrading from 2.x
assert Version.CURRENT.major <= 3;
assert Version.CURRENT.major <= 5;
parser.skipChildren();
} else {
// check if its a custom index metadata

View File

@ -266,7 +266,7 @@ public final class ShardRouting implements Streamable, ToXContent {
return false;
}
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_5_0_0)) {
// when no shards with this id have ever been active for this index
return false;
}

View File

@ -113,7 +113,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final boolean enoughAllocationsFound;
if (lastActiveAllocationIds.isEmpty()) {
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
@ -123,7 +123,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_3_0_0, nodeShardsResult.allocationsFound, shard);
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0, nodeShardsResult.allocationsFound, shard);
} else {
assert lastActiveAllocationIds.isEmpty() == false;
// use allocation ids to select nodes

View File

@ -127,7 +127,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable
}
if (analyzers.containsKey("default_index")) {
final Version createdVersion = indexSettings.getIndexVersionCreated();
if (createdVersion.onOrAfter(Version.V_3_0_0)) {
if (createdVersion.onOrAfter(Version.V_5_0_0)) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index().getName() + "]");
} else {
deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index().getName());

View File

@ -230,7 +230,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
}
protected boolean defaultDocValues(Version indexCreated) {
if (indexCreated.onOrAfter(Version.V_3_0_0)) {
if (indexCreated.onOrAfter(Version.V_5_0_0)) {
// add doc values by default to keyword (boolean, numerics, etc.) fields
return fieldType.tokenized() == false;
} else {

View File

@ -252,7 +252,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Byte objValue = fieldType().nullValue();

View File

@ -478,7 +478,7 @@ public class DateFieldMapper extends NumberFieldMapper {
} else if (token == XContentParser.Token.VALUE_NUMBER) {
dateAsString = parser.text();
} else if (token == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {

View File

@ -244,7 +244,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Double objValue = fieldType().nullValue();

View File

@ -256,7 +256,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Float objValue = fieldType().nullValue();

View File

@ -261,7 +261,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Integer objValue = fieldType().nullValue();

View File

@ -249,7 +249,7 @@ public class LongFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Long objValue = fieldType().nullValue();

View File

@ -257,7 +257,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
context.allEntries().addText(fieldType().name(), fieldType().nullValueAsString(), boost);
}
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
Short objValue = fieldType().nullValue();

View File

@ -334,7 +334,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
return new ValueAndBoost(nullValue, defaultBoost);
}
if (parser.currentToken() == XContentParser.Token.START_OBJECT
&& Version.indexCreated(context.indexSettings()).before(Version.V_3_0_0)) {
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
XContentParser.Token token;
String currentFieldName = null;
String value = nullValue;

View File

@ -64,7 +64,7 @@ public class TypeParsers {
public static final String INDEX_OPTIONS_OFFSETS = "offsets";
private static boolean nodeBooleanValue(Object node, Mapper.TypeParser.ParserContext parserContext) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
return XContentMapValues.nodeBooleanValue(node);
} else {
return XContentMapValues.lenientNodeBooleanValue(node);
@ -353,7 +353,7 @@ public class TypeParsers {
}
public static boolean parseIndex(String fieldName, String index, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
switch (index) {
case "true":
return true;
@ -379,7 +379,7 @@ public class TypeParsers {
}
public static boolean parseStore(String fieldName, String store, Mapper.TypeParser.ParserContext parserContext) throws MapperParsingException {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_3_0_0)) {
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
return XContentMapValues.nodeBooleanValue(store);
} else {
if ("no".equals(store)) {
@ -406,7 +406,7 @@ public class TypeParsers {
}
private static SimilarityProvider resolveSimilarity(Mapper.TypeParser.ParserContext parserContext, String name, String value) {
if (parserContext.indexVersionCreated().before(Version.V_3_0_0) && "default".equals(value)) {
if (parserContext.indexVersionCreated().before(Version.V_5_0_0) && "default".equals(value)) {
// "default" similarity has been renamed into "classic" in 3.x.
value = SimilarityService.DEFAULT_SIMILARITY;
}

View File

@ -124,8 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
if (fieldName.equals("enabled")) {
builder.enabled(lenientNodeBooleanValue(fieldNode));
iterator.remove();
} else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) {
// ignore on old indices, reject on and after 3.0
} else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_5_0_0)) {
// ignore on old indices, reject on and after 5.0
iterator.remove();
} else if (fieldName.equals("includes")) {
List<Object> values = (List<Object>) fieldNode;

View File

@ -243,7 +243,7 @@ public class PercolatorService extends AbstractComponent implements Releasable {
// moved the core percolation logic to a pck protected method to make testing easier:
static PercolateShardResponse doPercolate(PercolateContext context, PercolatorQueriesRegistry queriesRegistry, AggregationPhase aggregationPhase, @Nullable BucketCollector aggregatorCollector, HighlightPhase highlightPhase) throws IOException {
PercolatorQuery.Builder builder = new PercolatorQuery.Builder(context.docSearcher(), queriesRegistry.getPercolateQueries(), context.percolatorTypeFilter());
if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_3_0_0)) {
if (queriesRegistry.indexSettings().getSettings().getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0)) {
builder.extractQueryTermsQuery(PercolatorFieldMapper.EXTRACTED_TERMS_FULL_FIELD_NAME, PercolatorFieldMapper.UNKNOWN_QUERY_FULL_FIELD_NAME);
}
if (context.percolateQuery() != null || context.aliasFilter() != null) {

View File

@ -28,7 +28,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
import org.elasticsearch.test.ESTestCase;
@ -77,7 +76,7 @@ public class AnalysisServiceTests extends ESTestCase {
}
public void testOverrideDefaultIndexAnalyzer() {
Version version = VersionUtils.randomVersionBetween(getRandom(), Version.V_3_0_0, Version.CURRENT);
Version version = VersionUtils.randomVersionBetween(getRandom(), Version.V_5_0_0, Version.CURRENT);
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
try {
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
@ -91,7 +90,7 @@ public class AnalysisServiceTests extends ESTestCase {
}
public void testBackCompatOverrideDefaultIndexAnalyzer() {
Version version = VersionUtils.randomVersionBetween(getRandom(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_3_0_0));
Version version = VersionUtils.randomVersionBetween(getRandom(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_0_0));
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
AnalysisService analysisService = new AnalysisService(IndexSettingsModule.newIndexSettings("index", settings),
Collections.singletonMap("default_index", analyzerProvider("default_index")),
@ -113,7 +112,7 @@ public class AnalysisServiceTests extends ESTestCase {
}
public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() {
Version version = VersionUtils.randomVersionBetween(getRandom(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_3_0_0));
Version version = VersionUtils.randomVersionBetween(getRandom(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion(Version.V_5_0_0));
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
Map<String, AnalyzerProvider> analyzers = new HashMap<>();
analyzers.put("default_index", analyzerProvider("default_index"));

View File

@ -114,7 +114,7 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase<SpanNearQue
}
public void testCollectPayloadsDeprecated() throws Exception {
assertEquals("We can remove support for ignoring collect_payloads in 4.0", 3, Version.CURRENT.major);
assertEquals("We can remove support for ignoring collect_payloads in 6.0.0", 5, Version.CURRENT.major);
String json =
"{\n" +
" \"span_near\" : {\n" +

View File

@ -1,7 +1,7 @@
[[query-dsl-fuzzy-query]]
=== Fuzzy Query
deprecated[3.0.0, Will be removed without a replacement for `string` fields. Note that the `fuzziness` parameter is still supported for match queries and in suggesters. Use range queries for `date` and `numeric` fields instead.]
deprecated[5.0.0, Will be removed without a replacement for `string` fields. Note that the `fuzziness` parameter is still supported for match queries and in suggesters. Use range queries for `date` and `numeric` fields instead.]
The fuzzy query uses similarity based on Levenshtein edit distance for
`string` fields, and a `+/-` margin on numeric and date fields.

View File

@ -1,7 +1,7 @@
[[query-dsl-parent-id-query]]
=== Parent Id Query
added[3.0.0]
added[5.0.0]
The `parent_id` query can be used to find a child document pointing to a particular parent id.

View File

@ -1,11 +1,11 @@
[[search-percolate]]
== Percolator
added[3.0.0,Percolator queries modifications aren't visible immediately and a refresh is required]
added[5.0.0,Percolator queries modifications aren't visible immediately and a refresh is required]
added[3.0.0,Percolate api by defaults limits the number of matches to `10` whereas before this wasn't set]
added[5.0.0,Percolate api by defaults limits the number of matches to `10` whereas before this wasn't set]
added[3.0.0,For indices created on or after version 3.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 3.0.0 indices to take advantage of this new optimization]
added[5.0.0,For indices created on or after version 5.0.0 the percolator automatically indexes the query terms with the percolator queries this allows the percolator to percolate documents quicker. It is advisable to reindex any pre 5.0.0 indices to take advantage of this new optimization]
Traditionally you design documents based on your data, store them into an index, and then define queries via the search API
in order to retrieve these documents. The percolator works in the opposite direction. First you store queries into an

View File

@ -133,7 +133,7 @@ We have increased our test coverage to include scenarios tested by Jepsen. We ma
This status page is a start, but we can do a better job of explicitly documenting the processes at work in Elasticsearch, and what happens in the case of each type of failure. The plan is to have a test case that validates each behavior under simulated conditions. Every test will document the expected results, the associated test code and an explicit PASS or FAIL status for each simulated case.
[float]
=== Do not allow stale shards to automatically be promoted to primary (STATUS: ONGOING, v3.0.0)
=== Do not allow stale shards to automatically be promoted to primary (STATUS: ONGOING, v5.0.0)
In some scenarios, after the loss of all valid copies, a stale replica shard can be automatically assigned as a primary, preferring old data
to no data at all ({GIT}14671[#14671]). This can lead to a loss of acknowledged writes if the valid copies are not lost but are rather
@ -143,7 +143,7 @@ for one of the good shard copies to reappear. In case where all good copies are
stale shard copy.
[float]
=== Make index creation resilient to index closing and full cluster crashes (STATUS: ONGOING, v3.0.0)
=== Make index creation resilient to index closing and full cluster crashes (STATUS: ONGOING, v5.0.0)
Recovering an index requires a quorum (with an exception for 2) of shard copies to be available to allocate a primary. This means that
a primary cannot be assigned if the cluster dies before enough shards have been allocated ({GIT}9126[#9126]). The same happens if an index
@ -156,7 +156,7 @@ shard will be allocated upon reopening the index.
== Unreleased
[float]
=== Use two phase commit for Cluster State publishing (STATUS: UNRELEASED, v3.0.0)
=== Use two phase commit for Cluster State publishing (STATUS: UNRELEASED, v5.0.0)
A master node in Elasticsearch continuously https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html#fault-detection[monitors the cluster nodes]
and removes any node from the cluster that doesn't respond to its pings in a timely

View File

@ -77,9 +77,9 @@ function migratePlugin() {
removeLines "<repositories>" "<\/repositories>" "plugins/$1/pom.xml"
removeLines "<url>" "<\/scm>" "plugins/$1/pom.xml"
# echo "### remove version 3.0.0-SNAPSHOT from $1 pom.xml"
# All plugins for ES 2.0.0 uses 3.0.0-SNAPSHOT version number
replaceLine " <version>3.0.0-SNAPSHOT<\/version>" "" "plugins/$1/pom.xml"
# echo "### remove version 5.0.0-SNAPSHOT from $1 pom.xml"
# All plugins for ES 5.0.0 uses 5.0.0-SNAPSHOT version number
replaceLine " <version>5.0.0-SNAPSHOT<\/version>" "" "plugins/$1/pom.xml"
# echo "### remove unused dev-tools and .git dirs and LICENSE.txt and CONTRIBUTING.md files"
rm -r plugins/$1/dev-tools

View File

@ -1,48 +0,0 @@
# Elasticsearch plugin descriptor file
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
#
### example plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#
# classname=foo.bar.BazPlugin
# description=My cool plugin
# version=2.0
# elasticsearch.version=2.0
# java.version=1.7
#
### mandatory elements for all plugins:
#
# 'description': simple summary of the plugin
description=The S3 repository plugin adds S3 repositories.
#
# 'version': plugin's version
version=3.0.0-SNAPSHOT
#
# 'name': the plugin name
name=repository-s3
#
# 'classname': the name of the class to load, fully-qualified.
classname=org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin
#
# 'java.version' version of java the code is built against
# use the system property java.specification.version
# version string must be a sequence of nonnegative decimal integers
# separated by "."'s and may have leading zeros
java.version=1.8
#
# 'elasticsearch.version' version of elasticsearch compiled against
elasticsearch.version=3.0.0-SNAPSHOT
#
### deprecated elements for jvm plugins :
#
# 'isolated': true if the plugin should have its own classloader.
# passing false is deprecated, and only intended to support plugins
# that have hard dependencies against each other. If this is
# not specified, then the plugin is isolated by default.
isolated=true
#