Merge branch 'master' into feature-suggest-refactoring

This commit is contained in:
Christoph Büscher 2016-03-08 11:10:08 +01:00
commit 1264f37a1b
325 changed files with 2074 additions and 3550 deletions

View File

@ -68,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin {
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
// we "upgrade" these optional deps to provided for plugins, since they will run // we "upgrade" these optional deps to provided for plugins, since they will run
// with a full elasticsearch server that includes optional deps // with a full elasticsearch server that includes optional deps
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}" provided "com.vividsolutions:jts:${project.versions.jts}"
provided "log4j:log4j:${project.versions.log4j}" provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}" provided "log4j:apache-log4j-extras:${project.versions.log4j}"

View File

@ -68,11 +68,17 @@ class PluginPropertiesTask extends Copy {
} }
Map generateSubstitutions() { Map generateSubstitutions() {
def stringSnap = { version ->
if (version.endsWith("-SNAPSHOT")) {
return version.substring(0, version.length() - 9)
}
return version
}
return [ return [
'name': extension.name, 'name': extension.name,
'description': extension.description, 'description': extension.description,
'version': extension.version, 'version': stringSnap(extension.version),
'elasticsearchVersion': VersionProperties.elasticsearch, 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch),
'javaVersion': project.targetCompatibility as String, 'javaVersion': project.targetCompatibility as String,
'isolated': extension.isolated as String, 'isolated': extension.isolated as String,
'classname': extension.classname 'classname': extension.classname

View File

@ -1480,7 +1480,6 @@
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DateRangeTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]FunctionScoreTests.java" checks="LineLength" />

View File

@ -33,20 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
java.io.RandomAccessFile java.io.RandomAccessFile
java.nio.file.Path#toFile() java.nio.file.Path#toFile()
@defaultMessage Don't use deprecated lucene apis
org.apache.lucene.index.DocsEnum
org.apache.lucene.index.DocsAndPositionsEnum
org.apache.lucene.queries.TermFilter
org.apache.lucene.queries.TermsFilter
org.apache.lucene.search.Filter
org.apache.lucene.search.FilteredQuery
org.apache.lucene.search.TermRangeFilter
org.apache.lucene.search.NumericRangeFilter
org.apache.lucene.search.PrefixFilter
org.apache.lucene.search.QueryWrapperFilter
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead. java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead. java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.

View File

@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener) org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
@defaultMessage Pass the precision step from the mappings explicitly instead @defaultMessage Pass the precision step from the mappings explicitly instead
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. @defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
java.lang.Object#wait() java.lang.Object#wait()
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[]) org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
org.elasticsearch.common.io.PathUtils#get(java.net.URI) org.elasticsearch.common.io.PathUtils#get(java.net.URI)
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
org.apache.lucene.search.Query#setBoost(float)
@defaultMessage Constructing a DateTime without a time zone is dangerous @defaultMessage Constructing a DateTime without a time zone is dangerous
org.joda.time.DateTime#<init>() org.joda.time.DateTime#<init>()
org.joda.time.DateTime#<init>(long) org.joda.time.DateTime#<init>(long)

View File

@ -1,8 +1,8 @@
elasticsearch = 5.0.0 elasticsearch = 5.0.0
lucene = 5.5.0 lucene = 6.0.0-snapshot-bea235f
# optional dependencies # optional dependencies
spatial4j = 0.5 spatial4j = 0.6
jts = 1.13 jts = 1.13
jackson = 2.7.1 jackson = 2.7.1
log4j = 1.2.17 log4j = 1.2.17

View File

@ -42,6 +42,7 @@ dependencies {
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}" compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}" compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
compile "org.apache.lucene:lucene-spatial:${versions.lucene}" compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}" compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}" compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
@ -71,7 +72,7 @@ dependencies {
compile 'org.hdrhistogram:HdrHistogram:2.1.6' compile 'org.hdrhistogram:HdrHistogram:2.1.6'
// lucene spatial // lucene spatial
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
compile "com.vividsolutions:jts:${versions.jts}", optional compile "com.vividsolutions:jts:${versions.jts}", optional
// logging // logging
@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [
'org.apache.commons.logging.Log', 'org.apache.commons.logging.Log',
'org.apache.commons.logging.LogFactory', 'org.apache.commons.logging.LogFactory',
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
'org.apache.regexp.CharacterIterator',
'org.apache.regexp.RE',
'org.apache.regexp.REProgram',
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty) // from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
'org.apache.tomcat.jni.Buffer', 'org.apache.tomcat.jni.Buffer',
'org.apache.tomcat.jni.Library', 'org.apache.tomcat.jni.Library',
@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [
'org.jboss.marshalling.MarshallingConfiguration', 'org.jboss.marshalling.MarshallingConfiguration',
'org.jboss.marshalling.Unmarshaller', 'org.jboss.marshalling.Unmarshaller',
// from com.spatial4j.core.io.GeoJSONReader (spatial4j) // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser', 'org.noggit.JSONParser',
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty) // from org.jboss.netty.container.osgi.NettyBundleActivator (netty)

View File

@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.InPlaceMergeSorter; import org.apache.lucene.util.InPlaceMergeSorter;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
if (boosts != null) { if (boosts != null) {
boost = boosts[i]; boost = boosts[i];
} }
builder.append(ToStringUtils.boost(boost)); if (boost != 1f) {
builder.append('^').append(boost);
}
builder.append(", "); builder.append(", ");
} }
if (terms.length > 0) { if (terms.length > 0) {
builder.setLength(builder.length() - 2); builder.setLength(builder.length() - 2);
} }
builder.append("])"); builder.append("])");
builder.append(ToStringUtils.boost(getBoost()));
return builder.toString(); return builder.toString();
} }

View File

@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.FuzzyQuery;
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return getFieldQuerySingle(field, queryText, quoted); return getFieldQuerySingle(field, queryText, quoted);
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return super.getFieldQuery(field, queryText, slop); return super.getFieldQuery(field, queryText, slop);
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} }
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD)); clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
} }
} }
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return getFuzzyQuerySingle(field, termStr, minSimilarity); return getFuzzyQuerySingle(field, termStr, minSimilarity);
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return getPrefixQuerySingle(field, termStr); return getPrefixQuerySingle(field, termStr);
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
for (String token : tlist) { for (String token : tlist) {
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD)); clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
} }
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} }
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return getWildcardQuerySingle(field, termStr); return getWildcardQuerySingle(field, termStr);
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
} }
if (clauses.size() == 0) // happens for stopwords if (clauses.size() == 0) // happens for stopwords
return null; return null;
return getBooleanQuery(clauses, true); return getBooleanQueryCoordDisabled(clauses);
} }
} else { } else {
return getRegexpQuerySingle(field, termStr); return getRegexpQuerySingle(field, termStr);
@ -740,9 +741,23 @@ public class MapperQueryParser extends QueryParser {
} }
} }
/**
* @deprecated review all use of this, don't rely on coord
*/
@Deprecated
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setDisableCoord(true);
for (BooleanClause clause : clauses) {
builder.add(clause);
}
return fixNegativeQueryIfNeeded(builder.build());
}
@Override @Override
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException { protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
Query q = super.getBooleanQuery(clauses, disableCoord); Query q = super.getBooleanQuery(clauses);
if (q == null) { if (q == null) {
return null; return null;
} }
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
} }
pq = builder.build(); pq = builder.build();
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it //make sure that the boost hasn't been set beforehand, otherwise we'd lose it
assert q.getBoost() == 1f;
assert q instanceof BoostQuery == false; assert q instanceof BoostQuery == false;
return pq; return pq;
} else if (q instanceof MultiPhraseQuery) { } else if (q instanceof MultiPhraseQuery) {

View File

@ -26,8 +26,7 @@ import java.io.IOException;
/** /**
* Abstract decorator class of a DocIdSetIterator * Abstract decorator class of a DocIdSetIterator
* implementation that provides on-demand filter/validation * implementation that provides on-demand filter/validation
* mechanism on an underlying DocIdSetIterator. See {@link * mechanism on an underlying DocIdSetIterator.
* FilteredDocIdSet}.
*/ */
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator { public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
protected DocIdSetIterator _innerIter; protected DocIdSetIterator _innerIter;

View File

@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
if (numTerms > 16) { if (numTerms > 16) {
for (Term[] currentPosTerm : terms) { for (Term[] currentPosTerm : terms) {
for (Term term : currentPosTerm) { for (Term term : currentPosTerm) {
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost()); super.flatten(new TermQuery(term), reader, flatQueries, 1F);
} }
} }
return; return;
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]); queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
} }
Query query = queryBuilder.build(); Query query = queryBuilder.build();
this.flatten(query, reader, flatQueries, orig.getBoost()); this.flatten(query, reader, flatQueries, 1F);
} else { } else {
Term[] t = terms.get(currentPos); Term[] t = terms.get(currentPos);
for (int i = 0; i < t.length; i++) { for (int i = 0; i < t.length; i++) {

View File

@ -35,212 +35,10 @@ import java.io.IOException;
@SuppressWarnings("deprecation") @SuppressWarnings("deprecation")
public class Version { public class Version {
// The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is Beta/RC indicator // The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release // AA values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
// the (internal) format of the id is there so we can easily do after/before checks on the id // the (internal) format of the id is there so we can easily do after/before checks on the id
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
public static final int V_0_18_0_ID = /*00*/180099;
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_1_ID = /*00*/180199;
public static final Version V_0_18_1 = new Version(V_0_18_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_2_ID = /*00*/180299;
public static final Version V_0_18_2 = new Version(V_0_18_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_3_ID = /*00*/180399;
public static final Version V_0_18_3 = new Version(V_0_18_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_4_ID = /*00*/180499;
public static final Version V_0_18_4 = new Version(V_0_18_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_5_ID = /*00*/180599;
public static final Version V_0_18_5 = new Version(V_0_18_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_6_ID = /*00*/180699;
public static final Version V_0_18_6 = new Version(V_0_18_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_7_ID = /*00*/180799;
public static final Version V_0_18_7 = new Version(V_0_18_7_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_18_8_ID = /*00*/180899;
public static final Version V_0_18_8 = new Version(V_0_18_8_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC1_ID = /*00*/190051;
public static final Version V_0_19_0_RC1 = new Version(V_0_19_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC2_ID = /*00*/190052;
public static final Version V_0_19_0_RC2 = new Version(V_0_19_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_RC3_ID = /*00*/190053;
public static final Version V_0_19_0_RC3 = new Version(V_0_19_0_RC3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_0_ID = /*00*/190099;
public static final Version V_0_19_0 = new Version(V_0_19_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_1_ID = /*00*/190199;
public static final Version V_0_19_1 = new Version(V_0_19_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_2_ID = /*00*/190299;
public static final Version V_0_19_2 = new Version(V_0_19_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_3_ID = /*00*/190399;
public static final Version V_0_19_3 = new Version(V_0_19_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_4_ID = /*00*/190499;
public static final Version V_0_19_4 = new Version(V_0_19_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_5_ID = /*00*/190599;
public static final Version V_0_19_5 = new Version(V_0_19_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_6_ID = /*00*/190699;
public static final Version V_0_19_6 = new Version(V_0_19_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_7_ID = /*00*/190799;
public static final Version V_0_19_7 = new Version(V_0_19_7_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_8_ID = /*00*/190899;
public static final Version V_0_19_8 = new Version(V_0_19_8_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_9_ID = /*00*/190999;
public static final Version V_0_19_9 = new Version(V_0_19_9_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_10_ID = /*00*/191099;
public static final Version V_0_19_10 = new Version(V_0_19_10_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_11_ID = /*00*/191199;
public static final Version V_0_19_11 = new Version(V_0_19_11_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_12_ID = /*00*/191299;
public static final Version V_0_19_12 = new Version(V_0_19_12_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_19_13_ID = /*00*/191399;
public static final Version V_0_19_13 = new Version(V_0_19_13_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_0_RC1_ID = /*00*/200051;
public static final Version V_0_20_0_RC1 = new Version(V_0_20_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_0_ID = /*00*/200099;
public static final Version V_0_20_0 = new Version(V_0_20_0_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_1_ID = /*00*/200199;
public static final Version V_0_20_1 = new Version(V_0_20_1_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_2_ID = /*00*/200299;
public static final Version V_0_20_2 = new Version(V_0_20_2_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_3_ID = /*00*/200399;
public static final Version V_0_20_3 = new Version(V_0_20_3_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_4_ID = /*00*/200499;
public static final Version V_0_20_4 = new Version(V_0_20_4_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_5_ID = /*00*/200599;
public static final Version V_0_20_5 = new Version(V_0_20_5_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_20_6_ID = /*00*/200699;
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
public static final int V_0_90_0_RC1_ID = /*00*/900051;
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
public static final int V_0_90_0_RC2_ID = /*00*/900052;
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
public static final int V_0_90_0_ID = /*00*/900099;
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
public static final int V_0_90_1_ID = /*00*/900199;
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
public static final int V_0_90_2_ID = /*00*/900299;
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
public static final int V_0_90_3_ID = /*00*/900399;
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_4_ID = /*00*/900499;
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_5_ID = /*00*/900599;
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
public static final int V_0_90_6_ID = /*00*/900699;
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_0_90_7_ID = /*00*/900799;
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_0_90_8_ID = /*00*/900899;
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_9_ID = /*00*/900999;
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_10_ID = /*00*/901099;
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_11_ID = /*00*/901199;
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_12_ID = /*00*/901299;
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_0_90_13_ID = /*00*/901399;
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_Beta1_ID = 1000001;
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
public static final int V_1_0_0_Beta2_ID = 1000002;
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_RC1_ID = 1000051;
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_RC2_ID = 1000052;
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_0_ID = 1000099;
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_1_ID = 1000199;
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_2_ID = 1000299;
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_0_3_ID = 1000399;
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
public static final int V_1_1_0_ID = 1010099;
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_1_1_ID = 1010199;
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_1_2_ID = 1010299;
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
public static final int V_1_2_0_ID = 1020099;
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_1_ID = 1020199;
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_2_ID = 1020299;
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_3_ID = 1020399;
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_2_4_ID = 1020499;
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
public static final int V_1_3_0_ID = 1030099;
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_1_ID = 1030199;
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_2_ID = 1030299;
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_3_ID = 1030399;
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_4_ID = 1030499;
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_5_ID = 1030599;
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_6_ID = 1030699;
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_7_ID = 1030799;
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_8_ID = 1030899;
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_3_9_ID = 1030999;
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
public static final int V_1_4_0_Beta1_ID = 1040001;
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
public static final int V_1_4_0_ID = 1040099;
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_1_ID = 1040199;
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_2_ID = 1040299;
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
public static final int V_1_4_3_ID = 1040399;
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final int V_1_4_4_ID = 1040499;
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
public static final int V_1_4_5_ID = 1040599;
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_0_ID = 1050099;
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_1_ID = 1050199;
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_5_2_ID = 1050299;
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_0_ID = 1060099;
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_1_ID = 1060199;
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_6_2_ID = 1060299;
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_0_ID = 1070099;
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_1_ID = 1070199;
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_2_ID = 1070299;
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_4_ID = 1070499;
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_5_ID = 1070599;
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001; public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
@ -265,7 +63,7 @@ public class Version {
public static final int V_2_3_0_ID = 2030099; public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_ID = 5000099; public static final int V_5_0_0_ID = 5000099;
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final Version CURRENT = V_5_0_0; public static final Version CURRENT = V_5_0_0;
static { static {
@ -303,198 +101,6 @@ public class Version {
return V_2_0_0_beta2; return V_2_0_0_beta2;
case V_2_0_0_beta1_ID: case V_2_0_0_beta1_ID:
return V_2_0_0_beta1; return V_2_0_0_beta1;
case V_1_7_5_ID:
return V_1_7_5;
case V_1_7_4_ID:
return V_1_7_4;
case V_1_7_3_ID:
return V_1_7_3;
case V_1_7_2_ID:
return V_1_7_2;
case V_1_7_1_ID:
return V_1_7_1;
case V_1_7_0_ID:
return V_1_7_0;
case V_1_6_2_ID:
return V_1_6_2;
case V_1_6_1_ID:
return V_1_6_1;
case V_1_6_0_ID:
return V_1_6_0;
case V_1_5_2_ID:
return V_1_5_2;
case V_1_5_1_ID:
return V_1_5_1;
case V_1_5_0_ID:
return V_1_5_0;
case V_1_4_5_ID:
return V_1_4_5;
case V_1_4_4_ID:
return V_1_4_4;
case V_1_4_3_ID:
return V_1_4_3;
case V_1_4_2_ID:
return V_1_4_2;
case V_1_4_1_ID:
return V_1_4_1;
case V_1_4_0_ID:
return V_1_4_0;
case V_1_4_0_Beta1_ID:
return V_1_4_0_Beta1;
case V_1_3_9_ID:
return V_1_3_9;
case V_1_3_8_ID:
return V_1_3_8;
case V_1_3_7_ID:
return V_1_3_7;
case V_1_3_6_ID:
return V_1_3_6;
case V_1_3_5_ID:
return V_1_3_5;
case V_1_3_4_ID:
return V_1_3_4;
case V_1_3_3_ID:
return V_1_3_3;
case V_1_3_2_ID:
return V_1_3_2;
case V_1_3_1_ID:
return V_1_3_1;
case V_1_3_0_ID:
return V_1_3_0;
case V_1_2_4_ID:
return V_1_2_4;
case V_1_2_3_ID:
return V_1_2_3;
case V_1_2_2_ID:
return V_1_2_2;
case V_1_2_1_ID:
return V_1_2_1;
case V_1_2_0_ID:
return V_1_2_0;
case V_1_1_2_ID:
return V_1_1_2;
case V_1_1_1_ID:
return V_1_1_1;
case V_1_1_0_ID:
return V_1_1_0;
case V_1_0_3_ID:
return V_1_0_3;
case V_1_0_2_ID:
return V_1_0_2;
case V_1_0_1_ID:
return V_1_0_1;
case V_1_0_0_ID:
return V_1_0_0;
case V_1_0_0_RC2_ID:
return V_1_0_0_RC2;
case V_1_0_0_RC1_ID:
return V_1_0_0_RC1;
case V_1_0_0_Beta2_ID:
return V_1_0_0_Beta2;
case V_1_0_0_Beta1_ID:
return V_1_0_0_Beta1;
case V_0_90_13_ID:
return V_0_90_13;
case V_0_90_12_ID:
return V_0_90_12;
case V_0_90_11_ID:
return V_0_90_11;
case V_0_90_10_ID:
return V_0_90_10;
case V_0_90_9_ID:
return V_0_90_9;
case V_0_90_8_ID:
return V_0_90_8;
case V_0_90_7_ID:
return V_0_90_7;
case V_0_90_6_ID:
return V_0_90_6;
case V_0_90_5_ID:
return V_0_90_5;
case V_0_90_4_ID:
return V_0_90_4;
case V_0_90_3_ID:
return V_0_90_3;
case V_0_90_2_ID:
return V_0_90_2;
case V_0_90_1_ID:
return V_0_90_1;
case V_0_90_0_ID:
return V_0_90_0;
case V_0_90_0_RC2_ID:
return V_0_90_0_RC2;
case V_0_90_0_RC1_ID:
return V_0_90_0_RC1;
case V_0_90_0_Beta1_ID:
return V_0_90_0_Beta1;
case V_0_20_6_ID:
return V_0_20_6;
case V_0_20_5_ID:
return V_0_20_5;
case V_0_20_4_ID:
return V_0_20_4;
case V_0_20_3_ID:
return V_0_20_3;
case V_0_20_2_ID:
return V_0_20_2;
case V_0_20_1_ID:
return V_0_20_1;
case V_0_20_0_ID:
return V_0_20_0;
case V_0_20_0_RC1_ID:
return V_0_20_0_RC1;
case V_0_19_0_RC1_ID:
return V_0_19_0_RC1;
case V_0_19_0_RC2_ID:
return V_0_19_0_RC2;
case V_0_19_0_RC3_ID:
return V_0_19_0_RC3;
case V_0_19_0_ID:
return V_0_19_0;
case V_0_19_1_ID:
return V_0_19_1;
case V_0_19_2_ID:
return V_0_19_2;
case V_0_19_3_ID:
return V_0_19_3;
case V_0_19_4_ID:
return V_0_19_4;
case V_0_19_5_ID:
return V_0_19_5;
case V_0_19_6_ID:
return V_0_19_6;
case V_0_19_7_ID:
return V_0_19_7;
case V_0_19_8_ID:
return V_0_19_8;
case V_0_19_9_ID:
return V_0_19_9;
case V_0_19_10_ID:
return V_0_19_10;
case V_0_19_11_ID:
return V_0_19_11;
case V_0_19_12_ID:
return V_0_19_12;
case V_0_19_13_ID:
return V_0_19_13;
case V_0_18_0_ID:
return V_0_18_0;
case V_0_18_1_ID:
return V_0_18_1;
case V_0_18_2_ID:
return V_0_18_2;
case V_0_18_3_ID:
return V_0_18_3;
case V_0_18_4_ID:
return V_0_18_4;
case V_0_18_5_ID:
return V_0_18_5;
case V_0_18_6_ID:
return V_0_18_6;
case V_0_18_7_ID:
return V_0_18_7;
case V_0_18_8_ID:
return V_0_18_8;
default: default:
return new Version(id, org.apache.lucene.util.Version.LATEST); return new Version(id, org.apache.lucene.util.Version.LATEST);
} }
@ -531,15 +137,23 @@ public class Version {
if (!Strings.hasLength(version)) { if (!Strings.hasLength(version)) {
return Version.CURRENT; return Version.CURRENT;
} }
final boolean snapshot; // this is some BWC for 2.x and before indices
if (snapshot = version.endsWith("-SNAPSHOT")) {
version = version.substring(0, version.length() - 9);
}
String[] parts = version.split("\\.|\\-"); String[] parts = version.split("\\.|\\-");
if (parts.length < 3 || parts.length > 4) { if (parts.length < 3 || parts.length > 4) {
throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version); throw new IllegalArgumentException("the version needs to contain major, minor, and revision, and optionally the build: " + version);
} }
try { try {
final int rawMajor = Integer.parseInt(parts[0]);
if (rawMajor >= 5 && snapshot) { // we don't support snapshot as part of the version here anymore
throw new IllegalArgumentException("illegal version format - snapshots are only supported until version 2.x");
}
final int betaOffset = rawMajor < 5 ? 0 : 25;
//we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo //we reverse the version id calculation based on some assumption as we can't reliably reverse the modulo
final int major = Integer.parseInt(parts[0]) * 1000000; final int major = rawMajor * 1000000;
final int minor = Integer.parseInt(parts[1]) * 10000; final int minor = Integer.parseInt(parts[1]) * 10000;
final int revision = Integer.parseInt(parts[2]) * 100; final int revision = Integer.parseInt(parts[2]) * 100;
@ -547,11 +161,17 @@ public class Version {
int build = 99; int build = 99;
if (parts.length == 4) { if (parts.length == 4) {
String buildStr = parts[3]; String buildStr = parts[3];
if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) { if (buildStr.startsWith("alpha")) {
build = Integer.parseInt(buildStr.substring(4)); assert rawMajor >= 5 : "major must be >= 5 but was " + major;
} build = Integer.parseInt(buildStr.substring(5));
if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) { assert build < 25 : "expected a beta build but " + build + " >= 25";
} else if (buildStr.startsWith("Beta") || buildStr.startsWith("beta")) {
build = betaOffset + Integer.parseInt(buildStr.substring(4));
assert build < 50 : "expected a beta build but " + build + " >= 50";
} else if (buildStr.startsWith("RC") || buildStr.startsWith("rc")) {
build = Integer.parseInt(buildStr.substring(2)) + 50; build = Integer.parseInt(buildStr.substring(2)) + 50;
} else {
throw new IllegalArgumentException("unable to parse version " + version);
} }
} }
@ -614,13 +234,16 @@ public class Version {
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append(major).append('.').append(minor).append('.').append(revision); sb.append(major).append('.').append(minor).append('.').append(revision);
if (isBeta()) { if (isAlpha()) {
sb.append("-alpha");
sb.append(build);
} else if (isBeta()) {
if (major >= 2) { if (major >= 2) {
sb.append("-beta"); sb.append("-beta");
} else { } else {
sb.append(".Beta"); sb.append(".Beta");
} }
sb.append(build); sb.append(major < 5 ? build : build-25);
} else if (build < 99) { } else if (build < 99) {
if (major >= 2) { if (major >= 2) {
sb.append("-rc"); sb.append("-rc");
@ -656,7 +279,16 @@ public class Version {
} }
public boolean isBeta() { public boolean isBeta() {
return build < 50; return major < 5 ? build < 50 : build >= 25 && build < 50;
}
/**
* Returns true iff this version is an alpha version
* Note: This has been introduced in elasticsearch version 5. Previous versions will never
* have an alpha version.
*/
public boolean isAlpha() {
return major < 5 ? false : build < 25;
} }
public boolean isRC() { public boolean isRC() {

View File

@ -197,9 +197,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
numberOfPendingTasks = in.readInt(); numberOfPendingTasks = in.readInt();
timedOut = in.readBoolean(); timedOut = in.readBoolean();
numberOfInFlightFetch = in.readInt(); numberOfInFlightFetch = in.readInt();
if (in.getVersion().onOrAfter(Version.V_1_7_0)) { delayedUnassignedShards= in.readInt();
delayedUnassignedShards= in.readInt();
}
taskMaxWaitingTime = TimeValue.readTimeValue(in); taskMaxWaitingTime = TimeValue.readTimeValue(in);
} }
@ -212,9 +210,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
out.writeInt(numberOfPendingTasks); out.writeInt(numberOfPendingTasks);
out.writeBoolean(timedOut); out.writeBoolean(timedOut);
out.writeInt(numberOfInFlightFetch); out.writeInt(numberOfInFlightFetch);
if (out.getVersion().onOrAfter(Version.V_1_7_0)) { out.writeInt(delayedUnassignedShards);
out.writeInt(delayedUnassignedShards);
}
taskMaxWaitingTime.writeTo(out); taskMaxWaitingTime.writeTo(out);
} }

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.http.HttpInfo; import org.elasticsearch.http.HttpInfo;
import org.elasticsearch.ingest.core.IngestInfo;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.os.OsInfo;
import org.elasticsearch.monitor.process.ProcessInfo; import org.elasticsearch.monitor.process.ProcessInfo;
@ -74,12 +75,15 @@ public class NodeInfo extends BaseNodeResponse {
@Nullable @Nullable
private PluginsAndModules plugins; private PluginsAndModules plugins;
NodeInfo() { @Nullable
private IngestInfo ingest;
public NodeInfo() {
} }
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings, public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool, @Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins) { @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
super(node); super(node);
this.version = version; this.version = version;
this.build = build; this.build = build;
@ -92,6 +96,7 @@ public class NodeInfo extends BaseNodeResponse {
this.transport = transport; this.transport = transport;
this.http = http; this.http = http;
this.plugins = plugins; this.plugins = plugins;
this.ingest = ingest;
} }
/** /**
@ -176,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
return this.plugins; return this.plugins;
} }
@Nullable
public IngestInfo getIngest() {
return ingest;
}
public static NodeInfo readNodeInfo(StreamInput in) throws IOException { public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
NodeInfo nodeInfo = new NodeInfo(); NodeInfo nodeInfo = new NodeInfo();
nodeInfo.readFrom(in); nodeInfo.readFrom(in);
@ -220,6 +230,10 @@ public class NodeInfo extends BaseNodeResponse {
plugins = new PluginsAndModules(); plugins = new PluginsAndModules();
plugins.readFrom(in); plugins.readFrom(in);
} }
if (in.readBoolean()) {
ingest = new IngestInfo();
ingest.readFrom(in);
}
} }
@Override @Override
@ -285,5 +299,11 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true); out.writeBoolean(true);
plugins.writeTo(out); plugins.writeTo(out);
} }
if (ingest == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
ingest.writeTo(out);
}
} }
} }

View File

@ -38,6 +38,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
private boolean transport = true; private boolean transport = true;
private boolean http = true; private boolean http = true;
private boolean plugins = true; private boolean plugins = true;
private boolean ingest = true;
public NodesInfoRequest() { public NodesInfoRequest() {
} }
@ -62,6 +63,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
transport = false; transport = false;
http = false; http = false;
plugins = false; plugins = false;
ingest = false;
return this; return this;
} }
@ -77,6 +79,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
transport = true; transport = true;
http = true; http = true;
plugins = true; plugins = true;
ingest = true;
return this; return this;
} }
@ -202,6 +205,22 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
return plugins; return plugins;
} }
/**
* Should information about ingest be returned
* @param ingest true if you want info
*/
public NodesInfoRequest ingest(boolean ingest) {
this.ingest = ingest;
return this;
}
/**
* @return true if information about ingest is requested
*/
public boolean ingest() {
return ingest;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
@ -213,6 +232,7 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
transport = in.readBoolean(); transport = in.readBoolean();
http = in.readBoolean(); http = in.readBoolean();
plugins = in.readBoolean(); plugins = in.readBoolean();
ingest = in.readBoolean();
} }
@Override @Override
@ -226,5 +246,6 @@ public class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
out.writeBoolean(transport); out.writeBoolean(transport);
out.writeBoolean(http); out.writeBoolean(http);
out.writeBoolean(plugins); out.writeBoolean(plugins);
out.writeBoolean(ingest);
} }
} }

View File

@ -110,4 +110,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder<NodesI
request().plugins(plugins); request().plugins(plugins);
return this; return this;
} }
/**
* Should the node ingest info be returned.
*/
public NodesInfoRequestBuilder setIngest(boolean ingest) {
request().ingest(ingest);
return this;
}
} }

View File

@ -121,6 +121,9 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
if (nodeInfo.getPlugins() != null) { if (nodeInfo.getPlugins() != null) {
nodeInfo.getPlugins().toXContent(builder, params); nodeInfo.getPlugins().toXContent(builder, params);
} }
if (nodeInfo.getIngest() != null) {
nodeInfo.getIngest().toXContent(builder, params);
}
builder.endObject(); builder.endObject();
} }

View File

@ -80,7 +80,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) {
NodesInfoRequest request = nodeRequest.request; NodesInfoRequest request = nodeRequest.request;
return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(), return nodeService.info(request.settings(), request.os(), request.process(), request.jvm(), request.threadPool(),
request.transport(), request.http(), request.plugins()); request.transport(), request.http(), request.plugins(), request.ingest());
} }
@Override @Override
@ -95,7 +95,7 @@ public class TransportNodesInfoAction extends TransportNodesAction<NodesInfoRequ
public NodeInfoRequest() { public NodeInfoRequest() {
} }
NodeInfoRequest(String nodeId, NodesInfoRequest request) { public NodeInfoRequest(String nodeId, NodesInfoRequest request) {
super(nodeId); super(nodeId);
this.request = request; this.request = request;
} }

View File

@ -98,7 +98,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override @Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) { protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true); NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, true, false, true, false);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false); NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, true, false, false, false, false, false);
List<ShardStats> shardsStats = new ArrayList<>(); List<ShardStats> shardsStats = new ArrayList<>();
for (IndexService indexService : indicesService) { for (IndexService indexService : indicesService) {

View File

@ -20,6 +20,10 @@
package org.elasticsearch.action.ingest; package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
@ -27,24 +31,32 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore; import org.elasticsearch.ingest.PipelineStore;
import org.elasticsearch.ingest.core.IngestInfo;
import org.elasticsearch.node.service.NodeService; import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.HashMap;
import java.util.Map;
public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> { public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPipelineRequest, WritePipelineResponse> {
private final PipelineStore pipelineStore; private final PipelineStore pipelineStore;
private final ClusterService clusterService; private final ClusterService clusterService;
private final TransportNodesInfoAction nodesInfoAction;
@Inject @Inject
public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, public PutPipelineTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, ActionFilters actionFilters, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService) { IndexNameExpressionResolver indexNameExpressionResolver, NodeService nodeService,
TransportNodesInfoAction nodesInfoAction) {
super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new); super(settings, PutPipelineAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, PutPipelineRequest::new);
this.clusterService = clusterService; this.clusterService = clusterService;
this.nodesInfoAction = nodesInfoAction;
this.pipelineStore = nodeService.getIngestService().getPipelineStore(); this.pipelineStore = nodeService.getIngestService().getPipelineStore();
} }
@ -60,7 +72,28 @@ public class PutPipelineTransportAction extends TransportMasterNodeAction<PutPip
@Override @Override
protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception { protected void masterOperation(PutPipelineRequest request, ClusterState state, ActionListener<WritePipelineResponse> listener) throws Exception {
pipelineStore.put(clusterService, request, listener); NodesInfoRequest nodesInfoRequest = new NodesInfoRequest();
nodesInfoRequest.clear();
nodesInfoRequest.ingest(true);
nodesInfoAction.execute(nodesInfoRequest, new ActionListener<NodesInfoResponse>() {
@Override
public void onResponse(NodesInfoResponse nodeInfos) {
try {
Map<DiscoveryNode, IngestInfo> ingestInfos = new HashMap<>();
for (NodeInfo nodeInfo : nodeInfos) {
ingestInfos.put(nodeInfo.getNode(), nodeInfo.getIngest());
}
pipelineStore.put(clusterService, ingestInfos, request, listener);
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
} }
@Override @Override

View File

@ -18,26 +18,23 @@
*/ */
package org.elasticsearch.common; package org.elasticsearch.common;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import java.util.EnumSet;
import java.util.HashSet; import java.util.HashSet;
/** /**
* Holds a field that can be found in a request while parsing and its different variants, which may be deprecated. * Holds a field that can be found in a request while parsing and its different variants, which may be deprecated.
*/ */
public class ParseField { public class ParseField {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ParseField.class));
private final String camelCaseName; private final String camelCaseName;
private final String underscoreName; private final String underscoreName;
private final String[] deprecatedNames; private final String[] deprecatedNames;
private String allReplacedWith = null; private String allReplacedWith = null;
static final EnumSet<Flag> EMPTY_FLAGS = EnumSet.noneOf(Flag.class);
static final EnumSet<Flag> STRICT_FLAGS = EnumSet.of(Flag.STRICT);
enum Flag {
STRICT
}
public ParseField(String value, String... deprecatedNames) { public ParseField(String value, String... deprecatedNames) {
camelCaseName = Strings.toCamelCase(value); camelCaseName = Strings.toCamelCase(value);
underscoreName = Strings.toUnderscoreCase(value); underscoreName = Strings.toUnderscoreCase(value);
@ -80,19 +77,21 @@ public class ParseField {
return parseField; return parseField;
} }
boolean match(String currentFieldName, EnumSet<Flag> flags) { boolean match(String currentFieldName, boolean strict) {
if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) { if (allReplacedWith == null && (currentFieldName.equals(camelCaseName) || currentFieldName.equals(underscoreName))) {
return true; return true;
} }
String msg; String msg;
for (String depName : deprecatedNames) { for (String depName : deprecatedNames) {
if (currentFieldName.equals(depName)) { if (currentFieldName.equals(depName)) {
if (flags.contains(Flag.STRICT)) { msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead";
msg = "Deprecated field [" + currentFieldName + "] used, expected [" + underscoreName + "] instead"; if (allReplacedWith != null) {
if (allReplacedWith != null) { msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]";
msg = "Deprecated field [" + currentFieldName + "] used, replaced by [" + allReplacedWith + "]"; }
} if (strict) {
throw new IllegalArgumentException(msg); throw new IllegalArgumentException(msg);
} else {
DEPRECATION_LOGGER.deprecated(msg);
} }
return true; return true;
} }

View File

@ -21,29 +21,28 @@ package org.elasticsearch.common;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.util.EnumSet;
/** /**
* Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField} * Matcher to use in combination with {@link ParseField} while parsing requests. Matches a {@link ParseField}
* against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting. * against a field name and throw deprecation exception depending on the current value of the {@link #PARSE_STRICT} setting.
*/ */
public class ParseFieldMatcher { public class ParseFieldMatcher {
public static final String PARSE_STRICT = "index.query.parse.strict"; public static final String PARSE_STRICT = "index.query.parse.strict";
public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(ParseField.EMPTY_FLAGS); public static final ParseFieldMatcher EMPTY = new ParseFieldMatcher(false);
public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(ParseField.STRICT_FLAGS); public static final ParseFieldMatcher STRICT = new ParseFieldMatcher(true);
private final EnumSet<ParseField.Flag> parseFlags; private final boolean strict;
public ParseFieldMatcher(Settings settings) { public ParseFieldMatcher(Settings settings) {
if (settings.getAsBoolean(PARSE_STRICT, false)) { this(settings.getAsBoolean(PARSE_STRICT, false));
this.parseFlags = EnumSet.of(ParseField.Flag.STRICT);
} else {
this.parseFlags = ParseField.EMPTY_FLAGS;
}
} }
public ParseFieldMatcher(EnumSet<ParseField.Flag> parseFlags) { public ParseFieldMatcher(boolean strict) {
this.parseFlags = parseFlags; this.strict = strict;
}
/** Should deprecated settings be rejected? */
public boolean isStrict() {
return strict;
} }
/** /**
@ -55,6 +54,6 @@ public class ParseFieldMatcher {
* @return true whenever the parse field that we are looking for was found, false otherwise * @return true whenever the parse field that we are looking for was found, false otherwise
*/ */
public boolean match(String fieldName, ParseField parseField) { public boolean match(String fieldName, ParseField parseField) {
return parseField.match(fieldName, parseFlags); return parseField.match(fieldName, strict);
} }
} }

View File

@ -29,7 +29,7 @@ public class ShapesAvailability {
static { static {
boolean xSPATIAL4J_AVAILABLE; boolean xSPATIAL4J_AVAILABLE;
try { try {
Class.forName("com.spatial4j.core.shape.impl.PointImpl"); Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl");
xSPATIAL4J_AVAILABLE = true; xSPATIAL4J_AVAILABLE = true;
} catch (Throwable t) { } catch (Throwable t) {
xSPATIAL4J_AVAILABLE = false; xSPATIAL4J_AVAILABLE = false;

View File

@ -19,9 +19,9 @@
package org.elasticsearch.common.geo; package org.elasticsearch.common.geo;
import com.spatial4j.core.context.SpatialContext; import org.locationtech.spatial4j.context.SpatialContext;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.spatial4j.core.shape.ShapeCollection; import org.locationtech.spatial4j.shape.ShapeCollection;
import java.util.List; import java.util.List;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Circle; import org.locationtech.spatial4j.shape.Circle;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Rectangle; import org.locationtech.spatial4j.shape.Rectangle;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.geo.XShapeCollection;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory; import com.vividsolutions.jts.geom.GeometryFactory;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.LineString;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Point; import org.locationtech.spatial4j.shape.Point;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.geo.XShapeCollection;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.geo.XShapeCollection;

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Point; import org.locationtech.spatial4j.shape.Point;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.exception.InvalidShapeException; import org.locationtech.spatial4j.exception.InvalidShapeException;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory; import com.vividsolutions.jts.geom.GeometryFactory;

View File

@ -19,10 +19,10 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.context.jts.JtsSpatialContext; import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
import com.spatial4j.core.exception.InvalidShapeException; import org.locationtech.spatial4j.exception.InvalidShapeException;
import com.spatial4j.core.shape.Shape; import org.locationtech.spatial4j.shape.Shape;
import com.spatial4j.core.shape.jts.JtsGeometry; import org.locationtech.spatial4j.shape.jts.JtsGeometry;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
import com.vividsolutions.jts.geom.GeometryFactory; import com.vividsolutions.jts.geom.GeometryFactory;
@ -81,9 +81,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
* this normally isn't allowed. * this normally isn't allowed.
*/ */
protected final boolean multiPolygonMayOverlap = false; protected final boolean multiPolygonMayOverlap = false;
/** @see com.spatial4j.core.shape.jts.JtsGeometry#validate() */ /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#validate() */
protected final boolean autoValidateJtsGeometry = true; protected final boolean autoValidateJtsGeometry = true;
/** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ /** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#index() */
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
protected ShapeBuilder() { protected ShapeBuilder() {

View File

@ -1,74 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.lucene;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import java.io.IOException;
import java.util.Objects;
/**
* Base implementation for a query which is cacheable at the index level but
* not the segment level as usually expected.
*/
public abstract class IndexCacheableQuery extends Query {
private Object readerCacheKey;
@Override
public Query rewrite(IndexReader reader) throws IOException {
if (reader.getCoreCacheKey() != this.readerCacheKey) {
IndexCacheableQuery rewritten = (IndexCacheableQuery) clone();
rewritten.readerCacheKey = reader.getCoreCacheKey();
return rewritten;
}
return super.rewrite(reader);
}
@Override
public boolean equals(Object obj) {
return super.equals(obj)
&& readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + Objects.hashCode(readerCacheKey);
}
@Override
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
if (readerCacheKey == null) {
throw new IllegalStateException("Rewrite first");
}
if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) {
throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting");
}
return doCreateWeight(searcher, needsScores);
}
/** Create a {@link Weight} for this query.
* @see Query#createWeight(IndexSearcher, boolean)
*/
public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException;
}

View File

@ -88,7 +88,7 @@ import java.util.Objects;
public class Lucene { public class Lucene {
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54"; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
public static final String LATEST_CODEC = "Lucene54"; public static final String LATEST_CODEC = "Lucene60";
static { static {
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
@ -236,13 +236,8 @@ public class Lucene {
protected Object doBody(String segmentFileName) throws IOException { protected Object doBody(String segmentFileName) throws IOException {
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) { try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
final int format = input.readInt(); final int format = input.readInt();
final int actualFormat;
if (format == CodecUtil.CODEC_MAGIC) { if (format == CodecUtil.CODEC_MAGIC) {
// 4.0+ CodecUtil.checksumEntireFile(input);
actualFormat = CodecUtil.checkHeaderNoMagic(input, "segments", SegmentInfos.VERSION_40, Integer.MAX_VALUE);
if (actualFormat >= SegmentInfos.VERSION_48) {
CodecUtil.checksumEntireFile(input);
}
} }
// legacy.... // legacy....
} }
@ -382,7 +377,7 @@ public class Lucene {
writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse())); writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse()));
} else { } else {
writeSortType(out, sortField.getType()); writeSortType(out, sortField.getType());
writeMissingValue(out, sortField.missingValue); writeMissingValue(out, sortField.getMissingValue());
} }
out.writeBoolean(sortField.getReverse()); out.writeBoolean(sortField.getReverse());
} }
@ -684,7 +679,7 @@ public class Lucene {
segmentsFileName = infos.getSegmentsFileName(); segmentsFileName = infos.getSegmentsFileName();
this.dir = dir; this.dir = dir;
userData = infos.getUserData(); userData = infos.getUserData();
files = Collections.unmodifiableCollection(infos.files(dir, true)); files = Collections.unmodifiableCollection(infos.files(true));
generation = infos.getGeneration(); generation = infos.getGeneration();
segmentCount = infos.size(); segmentCount = infos.size();
} }

View File

@ -226,7 +226,7 @@ public final class AllTermQuery extends Query {
@Override @Override
public String toString(String field) { public String toString(String field) {
return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost()); return new TermQuery(term).toString(field);
} }
} }

View File

@ -30,7 +30,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitDocIdSet;
import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -118,9 +117,7 @@ public class FilterableTermsEnum extends TermsEnum {
}; };
} }
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); bits = BitSet.of(docs, context.reader().maxDoc());
builder.or(docs);
bits = builder.build().bits();
// Count how many docs are in our filtered set // Count how many docs are in our filtered set
// TODO make this lazy-loaded only for those that need it? // TODO make this lazy-loaded only for those that need it?

View File

@ -30,7 +30,6 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -51,7 +50,7 @@ public class MultiPhrasePrefixQuery extends Query {
/** /**
* Sets the phrase slop for this query. * Sets the phrase slop for this query.
* *
* @see org.apache.lucene.search.PhraseQuery#setSlop(int) * @see org.apache.lucene.search.PhraseQuery.Builder#setSlop(int)
*/ */
public void setSlop(int s) { public void setSlop(int s) {
slop = s; slop = s;
@ -64,7 +63,7 @@ public class MultiPhrasePrefixQuery extends Query {
/** /**
* Sets the phrase slop for this query. * Sets the phrase slop for this query.
* *
* @see org.apache.lucene.search.PhraseQuery#getSlop() * @see org.apache.lucene.search.PhraseQuery.Builder#getSlop()
*/ */
public int getSlop() { public int getSlop() {
return slop; return slop;
@ -73,7 +72,7 @@ public class MultiPhrasePrefixQuery extends Query {
/** /**
* Add a single term at the next position in the phrase. * Add a single term at the next position in the phrase.
* *
* @see org.apache.lucene.search.PhraseQuery#add(Term) * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
*/ */
public void add(Term term) { public void add(Term term) {
add(new Term[]{term}); add(new Term[]{term});
@ -83,7 +82,7 @@ public class MultiPhrasePrefixQuery extends Query {
* Add multiple terms at the next position in the phrase. Any of the terms * Add multiple terms at the next position in the phrase. Any of the terms
* may match. * may match.
* *
* @see org.apache.lucene.search.PhraseQuery#add(Term) * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
*/ */
public void add(Term[] terms) { public void add(Term[] terms) {
int position = 0; int position = 0;
@ -98,7 +97,7 @@ public class MultiPhrasePrefixQuery extends Query {
* *
* @param terms the terms * @param terms the terms
* @param position the position of the terms provided as argument * @param position the position of the terms provided as argument
* @see org.apache.lucene.search.PhraseQuery#add(Term, int) * @see org.apache.lucene.search.PhraseQuery.Builder#add(Term, int)
*/ */
public void add(Term[] terms, int position) { public void add(Term[] terms, int position) {
if (termArrays.size() == 0) if (termArrays.size() == 0)
@ -231,8 +230,6 @@ public class MultiPhrasePrefixQuery extends Query {
buffer.append(slop); buffer.append(slop);
} }
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString(); return buffer.toString();
} }

View File

@ -23,7 +23,6 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.PrefixQuery;
@ -132,11 +131,7 @@ public class Queries {
builder.add(clause); builder.add(clause);
} }
builder.setMinimumNumberShouldMatch(msm); builder.setMinimumNumberShouldMatch(msm);
BooleanQuery bq = builder.build(); return builder.build();
if (query.getBoost() != 1f) {
return new BoostQuery(bq, query.getBoost());
}
return bq;
} else { } else {
return query; return query;
} }

View File

@ -29,7 +29,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
@ -102,7 +101,7 @@ public class FiltersFunctionScoreQuery extends Query {
} }
} }
Query subQuery; final Query subQuery;
final FilterFunction[] filterFunctions; final FilterFunction[] filterFunctions;
final ScoreMode scoreMode; final ScoreMode scoreMode;
final float maxBoost; final float maxBoost;
@ -136,9 +135,7 @@ public class FiltersFunctionScoreQuery extends Query {
Query newQ = subQuery.rewrite(reader); Query newQ = subQuery.rewrite(reader);
if (newQ == subQuery) if (newQ == subQuery)
return this; return this;
FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone(); return new FiltersFunctionScoreQuery(newQ, scoreMode, filterFunctions, maxBoost, minScore, combineFunction);
bq.subQuery = newQ;
return bq;
} }
@Override @Override
@ -355,7 +352,6 @@ public class FiltersFunctionScoreQuery extends Query {
sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}"); sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}");
} }
sb.append("])"); sb.append("])");
sb.append(ToStringUtils.boost(getBoost()));
return sb.toString(); return sb.toString();
} }

View File

@ -28,7 +28,6 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException; import java.io.IOException;
import java.util.Objects; import java.util.Objects;
@ -41,7 +40,7 @@ public class FunctionScoreQuery extends Query {
public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE; public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE;
Query subQuery; final Query subQuery;
final ScoreFunction function; final ScoreFunction function;
final float maxBoost; final float maxBoost;
final CombineFunction combineFunction; final CombineFunction combineFunction;
@ -84,9 +83,7 @@ public class FunctionScoreQuery extends Query {
if (newQ == subQuery) { if (newQ == subQuery) {
return this; return this;
} }
FunctionScoreQuery bq = (FunctionScoreQuery) this.clone(); return new FunctionScoreQuery(newQ, function, minScore, combineFunction, maxBoost);
bq.subQuery = newQ;
return bq;
} }
@Override @Override
@ -205,7 +202,6 @@ public class FunctionScoreQuery extends Query {
public String toString(String field) { public String toString(String field) {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')'); sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')');
sb.append(ToStringUtils.boost(getBoost()));
return sb.toString(); return sb.toString();
} }

View File

@ -30,7 +30,7 @@ public class FilterIndexOutput extends IndexOutput {
protected final IndexOutput out; protected final IndexOutput out;
public FilterIndexOutput(String resourceDescription, IndexOutput out) { public FilterIndexOutput(String resourceDescription, IndexOutput out) {
super(resourceDescription); super(resourceDescription, out.getName());
this.out = out; this.out = out;
} }

View File

@ -41,9 +41,9 @@ public class BigArrays {
/** Page size in bytes: 16KB */ /** Page size in bytes: 16KB */
public static final int PAGE_SIZE_IN_BYTES = 1 << 14; public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE; public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES;
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT; public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Integer.BYTES;
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG; public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES;
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF; public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
/** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */ /** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
@ -490,7 +490,7 @@ public class BigArrays {
if (minSize <= array.size()) { if (minSize <= array.size()) {
return array; return array;
} }
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE); final long newSize = overSize(minSize, BYTE_PAGE_SIZE, 1);
return resize(array, newSize); return resize(array, newSize);
} }
@ -573,7 +573,7 @@ public class BigArrays {
if (minSize <= array.size()) { if (minSize <= array.size()) {
return array; return array;
} }
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT); final long newSize = overSize(minSize, INT_PAGE_SIZE, Integer.BYTES);
return resize(array, newSize); return resize(array, newSize);
} }
@ -623,7 +623,7 @@ public class BigArrays {
if (minSize <= array.size()) { if (minSize <= array.size()) {
return array; return array;
} }
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG); final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
return resize(array, newSize); return resize(array, newSize);
} }
@ -670,7 +670,7 @@ public class BigArrays {
if (minSize <= array.size()) { if (minSize <= array.size()) {
return array; return array;
} }
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG); final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
return resize(array, newSize); return resize(array, newSize);
} }
@ -717,7 +717,7 @@ public class BigArrays {
if (minSize <= array.size()) { if (minSize <= array.size()) {
return array; return array;
} }
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT); final long newSize = overSize(minSize, INT_PAGE_SIZE, Float.BYTES);
return resize(array, newSize); return resize(array, newSize);
} }

View File

@ -127,7 +127,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray {
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_BYTE; return 1;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -71,7 +71,7 @@ final class BigDoubleArray extends AbstractBigArray implements DoubleArray {
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_INT; return Integer.BYTES;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -71,7 +71,7 @@ final class BigFloatArray extends AbstractBigArray implements FloatArray {
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_FLOAT; return Float.BYTES;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -88,7 +88,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray {
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_INT; return Integer.BYTES;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -70,7 +70,7 @@ final class BigLongArray extends AbstractBigArray implements LongArray {
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_LONG; return Long.BYTES;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -65,7 +65,7 @@ final class BigObjectArray<T> extends AbstractBigArray implements ObjectArray<T>
@Override @Override
protected int numBytesPerElement() { protected int numBytesPerElement() {
return RamUsageEstimator.NUM_BYTES_INT; return Integer.BYTES;
} }
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */ /** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */

View File

@ -388,7 +388,7 @@ public class BloomFilter {
} }
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_LONG * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16; return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
} }
} }

View File

@ -333,7 +333,7 @@ public class CollectionUtils {
assert indices.length >= numValues; assert indices.length >= numValues;
if (numValues > 1) { if (numValues > 1) {
new InPlaceMergeSorter() { new InPlaceMergeSorter() {
final Comparator<BytesRef> comparator = BytesRef.getUTF8SortedAsUnicodeComparator(); final Comparator<BytesRef> comparator = Comparator.naturalOrder();
@Override @Override
protected int compare(int i, int j) { protected int compare(int i, int j) {
return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j])); return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j]));

View File

@ -116,7 +116,7 @@ public abstract class MetaDataStateFormat<T> {
final Path finalStatePath = stateLocation.resolve(fileName); final Path finalStatePath = stateLocation.resolve(fileName);
try { try {
final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")"; final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")";
try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) { try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, fileName, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION); CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
out.writeInt(format.index()); out.writeInt(format.index());
out.writeLong(version); out.writeLong(version);

View File

@ -20,8 +20,8 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.apache.lucene.analysis.ar.ArabicAnalyzer; import org.apache.lucene.analysis.ar.ArabicAnalyzer;
import org.apache.lucene.analysis.bg.BulgarianAnalyzer; import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
import org.apache.lucene.analysis.br.BrazilianAnalyzer; import org.apache.lucene.analysis.br.BrazilianAnalyzer;
@ -300,7 +300,7 @@ public class Analysis {
* <p>Although most analyzers generate character terms (CharTermAttribute), * <p>Although most analyzers generate character terms (CharTermAttribute),
* some token only contain binary terms (BinaryTermAttribute, * some token only contain binary terms (BinaryTermAttribute,
* CharTermAttribute being a special type of BinaryTermAttribute), such as * CharTermAttribute being a special type of BinaryTermAttribute), such as
* {@link NumericTokenStream} and unsuitable for highlighting and * {@link LegacyNumericTokenStream} and unsuitable for highlighting and
* more-like-this queries which expect character terms.</p> * more-like-this queries which expect character terms.</p>
*/ */
public static boolean isCharacterTokenStream(TokenStream tokenStream) { public static boolean isCharacterTokenStream(TokenStream tokenStream) {

View File

@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -43,14 +41,11 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
public static final int SIDE_BACK = 2; public static final int SIDE_BACK = 2;
private final int side; private final int side;
private org.elasticsearch.Version esVersion;
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE); this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
this.side = parseSide(settings.get("side", "front")); this.side = parseSide(settings.get("side", "front"));
this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings.getSettings());
} }
static int parseSide(String side) { static int parseSide(String side) {
@ -70,15 +65,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
result = new ReverseStringFilter(result); result = new ReverseStringFilter(result);
} }
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) { result = new EdgeNGramTokenFilter(result, minGram, maxGram);
/*
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
*/
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
} else {
result = new Lucene43EdgeNGramTokenFilter(result, minGram, maxGram);
}
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect // side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
if (side == SIDE_BACK) { if (side == SIDE_BACK) {

View File

@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -33,55 +31,33 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC
/** /**
* *
*/ */
@SuppressWarnings("deprecation")
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
private final int minGram; private final int minGram;
private final int maxGram; private final int maxGram;
private final Lucene43EdgeNGramTokenizer.Side side;
private final CharMatcher matcher; private final CharMatcher matcher;
protected org.elasticsearch.Version esVersion;
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE); this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
this.matcher = parseTokenChars(settings.getAsArray("token_chars")); this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
this.esVersion = indexSettings.getIndexVersionCreated();
} }
@Override @Override
public Tokenizer create() { public Tokenizer create() {
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) { if (matcher == null) {
/* return new EdgeNGramTokenizer(minGram, maxGram);
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
*/
if (side == Lucene43EdgeNGramTokenizer.Side.BACK) {
throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use"
+ " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs."
+ " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back"
+ " in combination with a \"keyword\" tokenizer");
}
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // always use 4.4 or higher
if (matcher == null) {
return new EdgeNGramTokenizer(minGram, maxGram);
} else {
return new EdgeNGramTokenizer(minGram, maxGram) {
@Override
protected boolean isTokenChar(int chr) {
return matcher.isTokenChar(chr);
}
};
}
} else { } else {
return new Lucene43EdgeNGramTokenizer(side, minGram, maxGram); return new EdgeNGramTokenizer(minGram, maxGram) {
@Override
protected boolean isTokenChar(int chr) {
return matcher.isTokenChar(chr);
}
};
} }
} }
} }

View File

@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter; import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter;
import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -40,9 +39,6 @@ import org.elasticsearch.index.IndexSettings;
* <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words * <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words
* / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if * / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if
* both are set an exception will be thrown.</li> * both are set an exception will be thrown.</li>
* <li>{@value #ENABLE_POS_INC_KEY} <code>true</code> iff the filter should
* maintain position increments for dropped tokens. The default is
* <code>true</code>.</li>
* <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The * <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The
* default is <code>false</code> which corresponds to case-sensitive.</li> * default is <code>false</code> which corresponds to case-sensitive.</li>
* </ul> * </ul>
@ -51,10 +47,11 @@ import org.elasticsearch.index.IndexSettings;
*/ */
public class KeepWordFilterFactory extends AbstractTokenFilterFactory { public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
private final CharArraySet keepWords; private final CharArraySet keepWords;
private final boolean enablePositionIncrements;
private static final String KEEP_WORDS_KEY = "keep_words"; private static final String KEEP_WORDS_KEY = "keep_words";
private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path"; private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
// unsupported ancient option
private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
public KeepWordFilterFactory(IndexSettings indexSettings, public KeepWordFilterFactory(IndexSettings indexSettings,
@ -68,26 +65,14 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `"
+ KEEP_WORDS_PATH_KEY + "` to be configured"); + KEEP_WORDS_PATH_KEY + "` to be configured");
} }
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { if (settings.get(ENABLE_POS_INC_KEY) != null) {
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
} }
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY); this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY);
} }
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_4)) { return new KeepWordFilter(tokenStream, keepWords);
return new KeepWordFilter(tokenStream, keepWords);
} else {
@SuppressWarnings("deprecation")
final TokenStream filter = new Lucene43KeepWordFilter(enablePositionIncrements, tokenStream, keepWords);
return filter;
}
} }
} }

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.LengthFilter; import org.apache.lucene.analysis.miscellaneous.LengthFilter;
import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -34,28 +32,21 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
private final int min; private final int min;
private final int max; private final int max;
private final boolean enablePositionIncrements;
// ancient unsupported option
private static final String ENABLE_POS_INC_KEY = "enable_position_increments"; private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
min = settings.getAsInt("min", 0); min = settings.getAsInt("min", 0);
max = settings.getAsInt("max", Integer.MAX_VALUE); max = settings.getAsInt("max", Integer.MAX_VALUE);
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { if (settings.get(ENABLE_POS_INC_KEY) != null) {
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
} }
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
} }
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_4)) { return new LengthFilter(tokenStream, min, max);
return new LengthFilter(tokenStream, min, max);
} else {
@SuppressWarnings("deprecation")
final TokenStream filter = new Lucene43LengthFilter(enablePositionIncrements, tokenStream, min, max);
return filter;
}
} }
} }

View File

@ -20,9 +20,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter;
import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -44,14 +42,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE); this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
} }
@SuppressWarnings("deprecation")
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // we supported it since 4.3 return new NGramTokenFilter(tokenStream, minGram, maxGram);
if (version.onOrAfter(Version.LUCENE_4_3)) {
return new NGramTokenFilter(tokenStream, minGram, maxGram);
} else {
return new Lucene43NGramTokenFilter(tokenStream, minGram, maxGram);
}
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.joda.time.format.DateTimeFormatter; import org.joda.time.format.DateTimeFormatter;
import java.io.IOException; import java.io.IOException;
@ -30,11 +30,11 @@ import java.io.IOException;
public class NumericDateTokenizer extends NumericTokenizer { public class NumericDateTokenizer extends NumericTokenizer {
public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException { public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, dateTimeFormatter); super(new LegacyNumericTokenStream(precisionStep), buffer, dateTimeFormatter);
} }
@Override @Override
protected void setValue(NumericTokenStream tokenStream, String value) { protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value)); tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value));
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import java.io.IOException; import java.io.IOException;
@ -29,11 +29,11 @@ import java.io.IOException;
public class NumericDoubleTokenizer extends NumericTokenizer { public class NumericDoubleTokenizer extends NumericTokenizer {
public NumericDoubleTokenizer(int precisionStep, char[] buffer) throws IOException { public NumericDoubleTokenizer(int precisionStep, char[] buffer) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, null); super(new LegacyNumericTokenStream(precisionStep), buffer, null);
} }
@Override @Override
protected void setValue(NumericTokenStream tokenStream, String value) { protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
tokenStream.setDoubleValue(Double.parseDouble(value)); tokenStream.setDoubleValue(Double.parseDouble(value));
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import java.io.IOException; import java.io.IOException;
@ -29,11 +29,11 @@ import java.io.IOException;
public class NumericFloatTokenizer extends NumericTokenizer { public class NumericFloatTokenizer extends NumericTokenizer {
public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException { public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, null); super(new LegacyNumericTokenStream(precisionStep), buffer, null);
} }
@Override @Override
protected void setValue(NumericTokenStream tokenStream, String value) { protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
tokenStream.setFloatValue(Float.parseFloat(value)); tokenStream.setFloatValue(Float.parseFloat(value));
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import java.io.IOException; import java.io.IOException;
@ -29,11 +29,11 @@ import java.io.IOException;
public class NumericIntegerTokenizer extends NumericTokenizer { public class NumericIntegerTokenizer extends NumericTokenizer {
public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException { public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, null); super(new LegacyNumericTokenStream(precisionStep), buffer, null);
} }
@Override @Override
protected void setValue(NumericTokenStream tokenStream, String value) { protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
tokenStream.setIntValue(Integer.parseInt(value)); tokenStream.setIntValue(Integer.parseInt(value));
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import java.io.IOException; import java.io.IOException;
@ -29,11 +29,11 @@ import java.io.IOException;
public class NumericLongTokenizer extends NumericTokenizer { public class NumericLongTokenizer extends NumericTokenizer {
public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException { public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException {
super(new NumericTokenStream(precisionStep), buffer, null); super(new LegacyNumericTokenStream(precisionStep), buffer, null);
} }
@Override @Override
protected void setValue(NumericTokenStream tokenStream, String value) { protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
tokenStream.setLongValue(Long.parseLong(value)); tokenStream.setLongValue(Long.parseLong(value));
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.Attribute; import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory; import org.apache.lucene.util.AttributeFactory;
@ -45,12 +45,12 @@ public abstract class NumericTokenizer extends Tokenizer {
}; };
} }
private final NumericTokenStream numericTokenStream; private final LegacyNumericTokenStream numericTokenStream;
private final char[] buffer; private final char[] buffer;
protected final Object extra; protected final Object extra;
private boolean started; private boolean started;
protected NumericTokenizer(NumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException { protected NumericTokenizer(LegacyNumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
super(delegatingAttributeFactory(numericTokenStream)); super(delegatingAttributeFactory(numericTokenStream));
this.numericTokenStream = numericTokenStream; this.numericTokenStream = numericTokenStream;
// Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream // Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream
@ -95,5 +95,5 @@ public abstract class NumericTokenizer extends Tokenizer {
numericTokenStream.close(); numericTokenStream.close();
} }
protected abstract void setValue(NumericTokenStream tokenStream, String value); protected abstract void setValue(LegacyNumericTokenStream tokenStream, String value);
} }

View File

@ -40,13 +40,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { public PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
Version esVersion = indexSettings.getIndexVersionCreated(); final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
final CharArraySet defaultStopwords;
if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
defaultStopwords = CharArraySet.EMPTY_SET;
} else {
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
}
boolean lowercase = settings.getAsBoolean("lowercase", true); boolean lowercase = settings.getAsBoolean("lowercase", true);
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);

View File

@ -28,7 +28,6 @@ import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
import org.apache.lucene.analysis.snowball.SnowballFilter; import org.apache.lucene.analysis.snowball.SnowballFilter;
import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
@ -64,12 +63,7 @@ public final class SnowballAnalyzer extends Analyzer {
and a {@link SnowballFilter} */ and a {@link SnowballFilter} */
@Override @Override
public TokenStreamComponents createComponents(String fieldName) { public TokenStreamComponents createComponents(String fieldName) {
final Tokenizer tokenizer; final Tokenizer tokenizer = new StandardTokenizer();
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
tokenizer = new StandardTokenizer();
} else {
tokenizer = new StandardTokenizer40();
}
TokenStream result = tokenizer; TokenStream result = tokenizer;
// remove the possessive 's for english stemmers // remove the possessive 's for english stemmers
if (name.equals("English") || name.equals("Porter") || name.equals("Lovins")) if (name.equals("English") || name.equals("Porter") || name.equals("Lovins"))

View File

@ -33,18 +33,10 @@ import org.elasticsearch.index.IndexSettings;
public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardAnalyzer> { public class StandardAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardAnalyzer> {
private final StandardAnalyzer standardAnalyzer; private final StandardAnalyzer standardAnalyzer;
private final Version esVersion;
public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { public StandardAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
this.esVersion = indexSettings.getIndexVersionCreated(); final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
final CharArraySet defaultStopwords;
if (esVersion.onOrAfter(Version.V_1_0_0_Beta1)) {
defaultStopwords = CharArraySet.EMPTY_SET;
} else {
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
}
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH); int maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
standardAnalyzer = new StandardAnalyzer(stopWords); standardAnalyzer = new StandardAnalyzer(stopWords);

View File

@ -26,10 +26,8 @@ import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.standard.StandardFilter; import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.StopwordAnalyzerBase; import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
import org.apache.lucene.util.Version;
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase { public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@ -47,12 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
@Override @Override
protected TokenStreamComponents createComponents(final String fieldName) { protected TokenStreamComponents createComponents(final String fieldName) {
final Tokenizer src; final Tokenizer src = new StandardTokenizer();
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
src = new StandardTokenizer();
} else {
src = new StandardTokenizer40();
}
TokenStream tok = new StandardFilter(src); TokenStream tok = new StandardFilter(src);
tok = new LowerCaseFilter(tok); tok = new LowerCaseFilter(tok);
if (!stopwords.isEmpty()) { if (!stopwords.isEmpty()) {

View File

@ -32,17 +32,10 @@ import org.elasticsearch.index.IndexSettings;
public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardHtmlStripAnalyzer> { public class StandardHtmlStripAnalyzerProvider extends AbstractIndexAnalyzerProvider<StandardHtmlStripAnalyzer> {
private final StandardHtmlStripAnalyzer analyzer; private final StandardHtmlStripAnalyzer analyzer;
private final Version esVersion;
public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) { public StandardHtmlStripAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
this.esVersion = indexSettings.getIndexVersionCreated(); final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
final CharArraySet defaultStopwords;
if (esVersion.onOrAfter(Version.V_1_0_0_RC1)) {
defaultStopwords = CharArraySet.EMPTY_SET;
} else {
defaultStopwords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
}
CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords); CharArraySet stopWords = Analysis.parseStopWords(env, settings, defaultStopwords);
analyzer = new StandardHtmlStripAnalyzer(stopWords); analyzer = new StandardHtmlStripAnalyzer(stopWords);
analyzer.setVersion(version); analyzer.setVersion(version);

View File

@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.standard.StandardTokenizer;
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -41,14 +39,8 @@ public class StandardTokenizerFactory extends AbstractTokenizerFactory {
@Override @Override
public Tokenizer create() { public Tokenizer create() {
if (version.onOrAfter(Version.LUCENE_4_7_0)) { StandardTokenizer tokenizer = new StandardTokenizer();
StandardTokenizer tokenizer = new StandardTokenizer(); tokenizer.setMaxTokenLength(maxTokenLength);
tokenizer.setMaxTokenLength(maxTokenLength); return tokenizer;
return tokenizer;
} else {
StandardTokenizer40 tokenizer = new StandardTokenizer40();
tokenizer.setMaxTokenLength(maxTokenLength);
return tokenizer;
}
} }
} }

View File

@ -122,11 +122,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
// English stemmers // English stemmers
} else if ("english".equalsIgnoreCase(language)) { } else if ("english".equalsIgnoreCase(language)) {
if (indexVersion.onOrAfter(Version.V_1_3_0)) { return new PorterStemFilter(tokenStream);
return new PorterStemFilter(tokenStream);
} else {
return new SnowballFilter(tokenStream, new EnglishStemmer());
}
} else if ("light_english".equalsIgnoreCase(language) || "lightEnglish".equalsIgnoreCase(language) } else if ("light_english".equalsIgnoreCase(language) || "lightEnglish".equalsIgnoreCase(language)
|| "kstem".equalsIgnoreCase(language)) { || "kstem".equalsIgnoreCase(language)) {
return new KStemFilter(tokenStream); return new KStemFilter(tokenStream);
@ -135,11 +131,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
} else if ("porter".equalsIgnoreCase(language)) { } else if ("porter".equalsIgnoreCase(language)) {
return new PorterStemFilter(tokenStream); return new PorterStemFilter(tokenStream);
} else if ("porter2".equalsIgnoreCase(language)) { } else if ("porter2".equalsIgnoreCase(language)) {
if (indexVersion.onOrAfter(Version.V_1_3_0)) { return new SnowballFilter(tokenStream, new EnglishStemmer());
return new SnowballFilter(tokenStream, new EnglishStemmer());
} else {
return new SnowballFilter(tokenStream, new PorterStemmer());
}
} else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) {
return new EnglishMinimalStemFilter(tokenStream); return new EnglishMinimalStemFilter(tokenStream);
} else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) {

View File

@ -20,7 +20,6 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.Lucene43StopFilter;
import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.core.StopAnalyzer;
import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.core.StopFilter;
import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.CharArraySet;
@ -42,7 +41,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean ignoreCase; private final boolean ignoreCase;
private final boolean enablePositionIncrements;
private final boolean removeTrailing; private final boolean removeTrailing;
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
@ -50,21 +48,15 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
this.ignoreCase = settings.getAsBoolean("ignore_case", false); this.ignoreCase = settings.getAsBoolean("ignore_case", false);
this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) { if (settings.get("enable_position_increments") != null) {
throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain");
+ " Please fix your analysis chain or use an older compatibility version (<= 4.3).");
} }
this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true);
} }
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (removeTrailing) { if (removeTrailing) {
if (version.onOrAfter(Version.LUCENE_4_4)) { return new StopFilter(tokenStream, stopWords);
return new StopFilter(tokenStream, stopWords);
} else {
return new Lucene43StopFilter(enablePositionIncrements, tokenStream, stopWords);
}
} else { } else {
return new SuggestStopFilter(tokenStream, stopWords); return new SuggestStopFilter(tokenStream, stopWords);
} }

View File

@ -20,9 +20,7 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter;
import org.apache.lucene.analysis.miscellaneous.TrimFilter; import org.apache.lucene.analysis.miscellaneous.TrimFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -32,26 +30,17 @@ import org.elasticsearch.index.IndexSettings;
*/ */
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
private final boolean updateOffsets;
private static final String UPDATE_OFFSETS_KEY = "update_offsets"; private static final String UPDATE_OFFSETS_KEY = "update_offsets";
public TrimTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { public TrimTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
super(indexSettings, name, settings); super(indexSettings, name, settings);
if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) { if (settings.get(UPDATE_OFFSETS_KEY) != null) {
throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain");
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
} }
this.updateOffsets = settings.getAsBoolean("update_offsets", false);
} }
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_4_0)) { return new TrimFilter(tokenStream);
return new TrimFilter(tokenStream);
} else {
@SuppressWarnings("deprecation")
final TokenStream filter = new Lucene43TrimFilter(tokenStream, updateOffsets);
return filter;
}
} }
} }

View File

@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer; import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
import org.apache.lucene.analysis.standard.std40.UAX29URLEmailTokenizer40;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -42,14 +40,8 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
@Override @Override
public Tokenizer create() { public Tokenizer create() {
if (version.onOrAfter(Version.LUCENE_4_7)) { UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer();
UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer(); tokenizer.setMaxTokenLength(maxTokenLength);
tokenizer.setMaxTokenLength(maxTokenLength); return tokenizer;
return tokenizer;
} else {
UAX29URLEmailTokenizer40 tokenizer = new UAX29URLEmailTokenizer40();
tokenizer.setMaxTokenLength(maxTokenLength);
return tokenizer;
}
} }
} }

View File

@ -20,11 +20,9 @@
package org.elasticsearch.index.analysis; package org.elasticsearch.index.analysis;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.Lucene47WordDelimiterFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter; import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator; import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -96,17 +94,10 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_8)) { return new WordDelimiterFilter(tokenStream,
return new WordDelimiterFilter(tokenStream,
charTypeTable, charTypeTable,
flags, flags,
protoWords); protoWords);
} else {
return new Lucene47WordDelimiterFilter(tokenStream,
charTypeTable,
flags,
protoWords);
}
} }
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) { public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {

View File

@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis.compound;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter; import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter;
import org.apache.lucene.analysis.compound.Lucene43DictionaryCompoundWordTokenFilter;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -41,12 +39,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends AbstractCompoundWo
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_4_0)) { return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize,
return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
minSubwordSize, maxSubwordSize, onlyLongestMatch);
} else {
return new Lucene43DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize,
minSubwordSize, maxSubwordSize, onlyLongestMatch);
}
} }
} }

View File

@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis.compound;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter; import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter;
import org.apache.lucene.analysis.compound.Lucene43HyphenationCompoundWordTokenFilter;
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree; import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.util.Version;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -60,12 +58,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW
@Override @Override
public TokenStream create(TokenStream tokenStream) { public TokenStream create(TokenStream tokenStream) {
if (version.onOrAfter(Version.LUCENE_4_4_0)) { return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize,
return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize, minSubwordSize, maxSubwordSize, onlyLongestMatch);
minSubwordSize, maxSubwordSize, onlyLongestMatch);
} else {
return new Lucene43HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize,
minSubwordSize, maxSubwordSize, onlyLongestMatch);
}
} }
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.index.codec;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene54.Lucene54Codec; import org.apache.lucene.codecs.lucene54.Lucene54Codec;
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
@ -47,8 +48,8 @@ public class CodecService {
public CodecService(@Nullable MapperService mapperService, ESLogger logger) { public CodecService(@Nullable MapperService mapperService, ESLogger logger) {
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder(); final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
if (mapperService == null) { if (mapperService == null) {
codecs.put(DEFAULT_CODEC, new Lucene54Codec()); codecs.put(DEFAULT_CODEC, new Lucene60Codec());
codecs.put(BEST_COMPRESSION_CODEC, new Lucene54Codec(Mode.BEST_COMPRESSION)); codecs.put(BEST_COMPRESSION_CODEC, new Lucene60Codec(Mode.BEST_COMPRESSION));
} else { } else {
codecs.put(DEFAULT_CODEC, codecs.put(DEFAULT_CODEC,
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.codec;
import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene54.Lucene54Codec; import org.apache.lucene.codecs.lucene60.Lucene60Codec;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
* configured for a specific field the default postings format is used. * configured for a specific field the default postings format is used.
*/ */
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
public class PerFieldMappingPostingFormatCodec extends Lucene54Codec { public class PerFieldMappingPostingFormatCodec extends Lucene60Codec {
private final ESLogger logger; private final ESLogger logger;
private final MapperService mapperService; private final MapperService mapperService;

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.engine; package org.elasticsearch.index.engine;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */ /** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
@ -44,6 +43,6 @@ class DeleteVersionValue extends VersionValue {
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return super.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_LONG; return super.ramBytesUsed() + Long.BYTES;
} }
} }

View File

@ -275,7 +275,7 @@ public class InternalEngine extends Engine {
SearcherManager searcherManager = null; SearcherManager searcherManager = null;
try { try {
try { try {
final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId); final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
searcherManager = new SearcherManager(directoryReader, searcherFactory); searcherManager = new SearcherManager(directoryReader, searcherFactory);
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store); lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
success = true; success = true;

View File

@ -64,7 +64,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
* *
* NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */ * NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */
private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
2*RamUsageEstimator.NUM_BYTES_INT + 2*Integer.BYTES +
RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_REF +
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER; RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
@ -76,7 +76,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
* CHM's pointer to CHM.Entry, double for approx load factor: * CHM's pointer to CHM.Entry, double for approx load factor:
* + 2*NUM_BYTES_OBJECT_REF */ * + 2*NUM_BYTES_OBJECT_REF */
private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
RamUsageEstimator.NUM_BYTES_INT + Integer.BYTES +
5*RamUsageEstimator.NUM_BYTES_OBJECT_REF; 5*RamUsageEstimator.NUM_BYTES_OBJECT_REF;
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account /** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account

View File

@ -54,7 +54,7 @@ class VersionValue implements Accountable {
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed(); return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed();
} }
@Override @Override

View File

@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
/** /**
* A list of per-document binary values, sorted * A list of per-document binary values, sorted
* according to {@link BytesRef#getUTF8SortedAsUnicodeComparator()}. * according to {@link BytesRef#compareTo(BytesRef)}.
* There might be dups however. * There might be dups however.
*/ */
public abstract class SortedBinaryDocValues { public abstract class SortedBinaryDocValues {

View File

@ -30,8 +30,8 @@ import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LongsRef; import org.apache.lucene.util.LongsRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.packed.GrowableWriter; import org.apache.lucene.util.packed.GrowableWriter;
import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedInts;
import org.apache.lucene.util.packed.PagedGrowableWriter; import org.apache.lucene.util.packed.PagedGrowableWriter;
@ -459,7 +459,7 @@ public final class OrdinalsBuilder implements Closeable {
@Override @Override
protected AcceptStatus accept(BytesRef term) throws IOException { protected AcceptStatus accept(BytesRef term) throws IOException {
// we stop accepting terms once we moved across the prefix codec terms - redundant values! // we stop accepting terms once we moved across the prefix codec terms - redundant values!
return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
} }
}; };
} }
@ -475,7 +475,7 @@ public final class OrdinalsBuilder implements Closeable {
@Override @Override
protected AcceptStatus accept(BytesRef term) throws IOException { protected AcceptStatus accept(BytesRef term) throws IOException {
// we stop accepting terms once we moved across the prefix codec terms - redundant values! // we stop accepting terms once we moved across the prefix codec terms - redundant values!
return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END; return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
} }
}; };
} }

View File

@ -24,7 +24,7 @@ import org.apache.lucene.spatial.util.GeoEncodingUtils;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
@ -62,7 +62,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
if (termEncoding == GeoPointField.TermEncoding.PREFIX) { if (termEncoding == GeoPointField.TermEncoding.PREFIX) {
return GeoEncodingUtils.prefixCodedToGeoCoded(term); return GeoEncodingUtils.prefixCodedToGeoCoded(term);
} else if (termEncoding == GeoPointField.TermEncoding.NUMERIC) { } else if (termEncoding == GeoPointField.TermEncoding.NUMERIC) {
return NumericUtils.prefixCodedToLong(term); return LegacyNumericUtils.prefixCodedToLong(term);
} }
throw new IllegalArgumentException("GeoPoint.TermEncoding should be one of: " + GeoPointField.TermEncoding.PREFIX throw new IllegalArgumentException("GeoPoint.TermEncoding should be one of: " + GeoPointField.TermEncoding.PREFIX
+ " or " + GeoPointField.TermEncoding.NUMERIC + " found: " + termEncoding); + " or " + GeoPointField.TermEncoding.NUMERIC + " found: " + termEncoding);

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables; import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldData;
@ -59,7 +58,7 @@ public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPoin
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT + indexedPoints.ramBytesUsed(); return Integer.BYTES + indexedPoints.ramBytesUsed();
} }
@Override @Override
@ -117,7 +116,7 @@ public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPoin
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT + indexedPoint.ramBytesUsed() return Integer.BYTES + indexedPoint.ramBytesUsed()
+ (set == null ? 0 : set.ramBytesUsed()); + (set == null ? 0 : set.ramBytesUsed());
} }

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Accountables; import org.apache.lucene.util.Accountables;
import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.RamUsageEstimator;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.common.util.DoubleArray;
import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.FieldData;
@ -61,7 +60,7 @@ public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicG
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed(); return Integer.BYTES/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed();
} }
@Override @Override
@ -132,7 +131,7 @@ public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicG
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return RamUsageEstimator.NUM_BYTES_INT + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed()); return Integer.BYTES + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
} }
@Override @Override

View File

@ -305,13 +305,15 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData<AtomicPare
public class GlobalFieldData implements IndexParentChildFieldData, Accountable { public class GlobalFieldData implements IndexParentChildFieldData, Accountable {
private final Object coreCacheKey;
private final List<LeafReaderContext> leaves;
private final AtomicParentChildFieldData[] fielddata; private final AtomicParentChildFieldData[] fielddata;
private final IndexReader reader;
private final long ramBytesUsed; private final long ramBytesUsed;
private final Map<String, OrdinalMapAndAtomicFieldData> ordinalMapPerType; private final Map<String, OrdinalMapAndAtomicFieldData> ordinalMapPerType;
GlobalFieldData(IndexReader reader, AtomicParentChildFieldData[] fielddata, long ramBytesUsed, Map<String, OrdinalMapAndAtomicFieldData> ordinalMapPerType) { GlobalFieldData(IndexReader reader, AtomicParentChildFieldData[] fielddata, long ramBytesUsed, Map<String, OrdinalMapAndAtomicFieldData> ordinalMapPerType) {
this.reader = reader; this.coreCacheKey = reader.getCoreCacheKey();
this.leaves = reader.leaves();
this.ramBytesUsed = ramBytesUsed; this.ramBytesUsed = ramBytesUsed;
this.fielddata = fielddata; this.fielddata = fielddata;
this.ordinalMapPerType = ordinalMapPerType; this.ordinalMapPerType = ordinalMapPerType;
@ -329,7 +331,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData<AtomicPare
@Override @Override
public AtomicParentChildFieldData load(LeafReaderContext context) { public AtomicParentChildFieldData load(LeafReaderContext context) {
assert context.reader().getCoreCacheKey() == reader.leaves().get(context.ord).reader().getCoreCacheKey(); assert context.reader().getCoreCacheKey() == leaves.get(context.ord).reader().getCoreCacheKey();
return fielddata[context.ord]; return fielddata[context.ord];
} }
@ -365,7 +367,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData<AtomicPare
@Override @Override
public IndexParentChildFieldData loadGlobal(DirectoryReader indexReader) { public IndexParentChildFieldData loadGlobal(DirectoryReader indexReader) {
if (indexReader.getCoreCacheKey() == reader.getCoreCacheKey()) { if (indexReader.getCoreCacheKey() == coreCacheKey) {
return this; return this;
} }
throw new IllegalStateException(); throw new IllegalStateException();

View File

@ -22,6 +22,10 @@ package org.elasticsearch.index.mapper;
import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.ObjectObjectHashMap;
import com.carrotsearch.hppc.ObjectObjectMap; import com.carrotsearch.hppc.ObjectObjectMap;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableField;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
@ -128,8 +132,8 @@ public abstract class ParseContext {
* Returns an array of values of the field specified as the method parameter. * Returns an array of values of the field specified as the method parameter.
* This method returns an empty array when there are no * This method returns an empty array when there are no
* matching fields. It never returns null. * matching fields. It never returns null.
* For {@link org.apache.lucene.document.IntField}, {@link org.apache.lucene.document.LongField}, {@link * For {@link org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link
* org.apache.lucene.document.FloatField} and {@link org.apache.lucene.document.DoubleField} it returns the string value of the number. * org.apache.lucene.document.LegacyFloatField} and {@link org.apache.lucene.document.LegacyDoubleField} it returns the string value of the number.
* If you want the actual numeric field instances back, use {@link #getFields}. * If you want the actual numeric field instances back, use {@link #getFields}.
* @param name the name of the field * @param name the name of the field
* @return a <code>String[]</code> of field values * @return a <code>String[]</code> of field values

View File

@ -23,11 +23,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
@ -116,7 +116,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
static final class ByteFieldType extends NumberFieldType { static final class ByteFieldType extends NumberFieldType {
public ByteFieldType() { public ByteFieldType() {
super(NumericType.INT); super(LegacyNumericType.INT);
} }
protected ByteFieldType(ByteFieldType ref) { protected ByteFieldType(ByteFieldType ref) {
@ -155,13 +155,13 @@ public class ByteFieldMapper extends NumberFieldMapper {
@Override @Override
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : (int)parseValue(lowerTerm), lowerTerm == null ? null : (int)parseValue(lowerTerm),
upperTerm == null ? null : (int)parseValue(upperTerm), upperTerm == null ? null : (int)parseValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -171,7 +171,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
byte iValue = parseValue(value); byte iValue = parseValue(value);
byte iSim = fuzziness.asByte(); byte iSim = fuzziness.asByte();
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -179,8 +179,8 @@ public class ByteFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
long minValue = NumericUtils.getMinInt(terms); long minValue = LegacyNumericUtils.getMinInt(terms);
long maxValue = NumericUtils.getMaxInt(terms); long maxValue = LegacyNumericUtils.getMaxInt(terms);
return new FieldStats.Long( return new FieldStats.Long(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );

View File

@ -23,12 +23,11 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.ToStringUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
@ -243,7 +242,6 @@ public class DateFieldMapper extends NumberFieldMapper {
.append(" TO ") .append(" TO ")
.append((upperTerm == null) ? "*" : upperTerm.toString()) .append((upperTerm == null) ? "*" : upperTerm.toString())
.append(includeUpper ? ']' : '}') .append(includeUpper ? ']' : '}')
.append(ToStringUtils.boost(getBoost()))
.toString(); .toString();
} }
} }
@ -253,7 +251,7 @@ public class DateFieldMapper extends NumberFieldMapper {
protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter); protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter);
public DateFieldType() { public DateFieldType() {
super(NumericType.LONG); super(LegacyNumericType.LONG);
setFieldDataType(new FieldDataType("long")); setFieldDataType(new FieldDataType("long"));
} }
@ -360,7 +358,7 @@ public class DateFieldMapper extends NumberFieldMapper {
@Override @Override
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match LegacyNumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@ -392,7 +390,7 @@ public class DateFieldMapper extends NumberFieldMapper {
// not a time format // not a time format
iSim = fuzziness.asLong(); iSim = fuzziness.asLong();
} }
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -400,8 +398,8 @@ public class DateFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
long minValue = NumericUtils.getMinLong(terms); long minValue = LegacyNumericUtils.getMinLong(terms);
long maxValue = NumericUtils.getMaxLong(terms); long maxValue = LegacyNumericUtils.getMaxLong(terms);
return new FieldStats.Date( return new FieldStats.Date(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter() maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter()
); );
@ -412,7 +410,7 @@ public class DateFieldMapper extends NumberFieldMapper {
} }
private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) { private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) {
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser), upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
includeLower, includeUpper); includeLower, includeUpper);

View File

@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
@ -49,7 +50,6 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static org.apache.lucene.util.NumericUtils.doubleToSortableLong;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
@ -118,7 +118,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
public static final class DoubleFieldType extends NumberFieldType { public static final class DoubleFieldType extends NumberFieldType {
public DoubleFieldType() { public DoubleFieldType() {
super(NumericType.DOUBLE); super(LegacyNumericType.DOUBLE);
} }
protected DoubleFieldType(DoubleFieldType ref) { protected DoubleFieldType(DoubleFieldType ref) {
@ -158,13 +158,13 @@ public class DoubleFieldMapper extends NumberFieldMapper {
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value)); long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value));
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match LegacyNumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : parseDoubleValue(lowerTerm), lowerTerm == null ? null : parseDoubleValue(lowerTerm),
upperTerm == null ? null : parseDoubleValue(upperTerm), upperTerm == null ? null : parseDoubleValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -174,7 +174,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
double iValue = parseDoubleValue(value); double iValue = parseDoubleValue(value);
double iSim = fuzziness.asDouble(); double iSim = fuzziness.asDouble();
return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -182,8 +182,8 @@ public class DoubleFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
double minValue = NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)); double minValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms));
double maxValue = NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)); double maxValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms));
return new FieldStats.Double( return new FieldStats.Double(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );
@ -284,7 +284,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
fields.add(field); fields.add(field);
} }
if (fieldType().hasDocValues()) { if (fieldType().hasDocValues()) {
addDocValue(context, fields, doubleToSortableLong(value)); addDocValue(context, fields, NumericUtils.doubleToSortableLong(value));
} }
} }

View File

@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
@ -50,7 +51,6 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static org.apache.lucene.util.NumericUtils.floatToSortableInt;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
@ -119,7 +119,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
static final class FloatFieldType extends NumberFieldType { static final class FloatFieldType extends NumberFieldType {
public FloatFieldType() { public FloatFieldType() {
super(NumericType.FLOAT); super(LegacyNumericType.FLOAT);
} }
protected FloatFieldType(FloatFieldType ref) { protected FloatFieldType(FloatFieldType ref) {
@ -159,13 +159,13 @@ public class FloatFieldMapper extends NumberFieldMapper {
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
int intValue = NumericUtils.floatToSortableInt(parseValue(value)); int intValue = NumericUtils.floatToSortableInt(parseValue(value));
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match LegacyNumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -175,7 +175,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
float iValue = parseValue(value); float iValue = parseValue(value);
final float iSim = fuzziness.asFloat(); final float iSim = fuzziness.asFloat();
return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -183,8 +183,8 @@ public class FloatFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
float minValue = NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)); float minValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms));
float maxValue = NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)); float maxValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms));
return new FieldStats.Float( return new FieldStats.Float(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );
@ -296,7 +296,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
fields.add(field); fields.add(field);
} }
if (fieldType().hasDocValues()) { if (fieldType().hasDocValues()) {
addDocValue(context, fields, floatToSortableInt(value)); addDocValue(context, fields, NumericUtils.floatToSortableInt(value));
} }
} }

View File

@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
@ -124,7 +124,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
public static final class IntegerFieldType extends NumberFieldType { public static final class IntegerFieldType extends NumberFieldType {
public IntegerFieldType() { public IntegerFieldType() {
super(NumericType.INT); super(LegacyNumericType.INT);
} }
protected IntegerFieldType(IntegerFieldType ref) { protected IntegerFieldType(IntegerFieldType ref) {
@ -164,13 +164,13 @@ public class IntegerFieldMapper extends NumberFieldMapper {
@Override @Override
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : parseValue(lowerTerm), lowerTerm == null ? null : parseValue(lowerTerm),
upperTerm == null ? null : parseValue(upperTerm), upperTerm == null ? null : parseValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -180,7 +180,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
int iValue = parseValue(value); int iValue = parseValue(value);
int iSim = fuzziness.asInt(); int iSim = fuzziness.asInt();
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -188,8 +188,8 @@ public class IntegerFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
long minValue = NumericUtils.getMinInt(terms); long minValue = LegacyNumericUtils.getMinInt(terms);
long maxValue = NumericUtils.getMaxInt(terms); long maxValue = LegacyNumericUtils.getMaxInt(terms);
return new FieldStats.Long( return new FieldStats.Long(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );

View File

@ -85,9 +85,9 @@ public final class KeywordFieldMapper extends FieldMapper implements AllFieldMap
@Override @Override
public Builder indexOptions(IndexOptions indexOptions) { public Builder indexOptions(IndexOptions indexOptions) {
if (fieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) > 0) { if (indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) > 0) {
throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]=" throw new IllegalArgumentException("The [keyword] field does not support positions, got [index_options]="
+ indexOptionToString(fieldType.indexOptions())); + indexOptionToString(indexOptions));
} }
return super.indexOptions(indexOptions); return super.indexOptions(indexOptions);
} }

View File

@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
@ -123,7 +123,7 @@ public class LongFieldMapper extends NumberFieldMapper {
public static class LongFieldType extends NumberFieldType { public static class LongFieldType extends NumberFieldType {
public LongFieldType() { public LongFieldType() {
super(NumericType.LONG); super(LegacyNumericType.LONG);
} }
protected LongFieldType(LongFieldType ref) { protected LongFieldType(LongFieldType ref) {
@ -162,13 +162,13 @@ public class LongFieldMapper extends NumberFieldMapper {
@Override @Override
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match LegacyNumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : parseLongValue(lowerTerm), lowerTerm == null ? null : parseLongValue(lowerTerm),
upperTerm == null ? null : parseLongValue(upperTerm), upperTerm == null ? null : parseLongValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -178,7 +178,7 @@ public class LongFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
long iValue = parseLongValue(value); long iValue = parseLongValue(value);
final long iSim = fuzziness.asLong(); final long iSim = fuzziness.asLong();
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -186,8 +186,8 @@ public class LongFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
long minValue = NumericUtils.getMinLong(terms); long minValue = LegacyNumericUtils.getMinLong(terms);
long maxValue = NumericUtils.getMaxLong(terms); long maxValue = LegacyNumericUtils.getMaxLong(terms);
return new FieldStats.Long( return new FieldStats.Long(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );

View File

@ -20,7 +20,7 @@
package org.elasticsearch.index.mapper.core; package org.elasticsearch.index.mapper.core;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.NumericTokenStream; import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType; import org.apache.lucene.document.FieldType;
@ -129,7 +129,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
public static abstract class NumberFieldType extends MappedFieldType { public static abstract class NumberFieldType extends MappedFieldType {
public NumberFieldType(NumericType numericType) { public NumberFieldType(LegacyNumericType numericType) {
setTokenized(false); setTokenized(false);
setOmitNorms(true); setOmitNorms(true);
setIndexOptions(IndexOptions.DOCS); setIndexOptions(IndexOptions.DOCS);
@ -295,38 +295,38 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
// used to we can use a numeric field in a document that is then parsed twice! // used to we can use a numeric field in a document that is then parsed twice!
public abstract static class CustomNumericField extends Field { public abstract static class CustomNumericField extends Field {
private ThreadLocal<NumericTokenStream> tokenStream = new ThreadLocal<NumericTokenStream>() { private ThreadLocal<LegacyNumericTokenStream> tokenStream = new ThreadLocal<LegacyNumericTokenStream>() {
@Override @Override
protected NumericTokenStream initialValue() { protected LegacyNumericTokenStream initialValue() {
return new NumericTokenStream(fieldType().numericPrecisionStep()); return new LegacyNumericTokenStream(fieldType().numericPrecisionStep());
} }
}; };
private static ThreadLocal<NumericTokenStream> tokenStream4 = new ThreadLocal<NumericTokenStream>() { private static ThreadLocal<LegacyNumericTokenStream> tokenStream4 = new ThreadLocal<LegacyNumericTokenStream>() {
@Override @Override
protected NumericTokenStream initialValue() { protected LegacyNumericTokenStream initialValue() {
return new NumericTokenStream(4); return new LegacyNumericTokenStream(4);
} }
}; };
private static ThreadLocal<NumericTokenStream> tokenStream8 = new ThreadLocal<NumericTokenStream>() { private static ThreadLocal<LegacyNumericTokenStream> tokenStream8 = new ThreadLocal<LegacyNumericTokenStream>() {
@Override @Override
protected NumericTokenStream initialValue() { protected LegacyNumericTokenStream initialValue() {
return new NumericTokenStream(8); return new LegacyNumericTokenStream(8);
} }
}; };
private static ThreadLocal<NumericTokenStream> tokenStream16 = new ThreadLocal<NumericTokenStream>() { private static ThreadLocal<LegacyNumericTokenStream> tokenStream16 = new ThreadLocal<LegacyNumericTokenStream>() {
@Override @Override
protected NumericTokenStream initialValue() { protected LegacyNumericTokenStream initialValue() {
return new NumericTokenStream(16); return new LegacyNumericTokenStream(16);
} }
}; };
private static ThreadLocal<NumericTokenStream> tokenStreamMax = new ThreadLocal<NumericTokenStream>() { private static ThreadLocal<LegacyNumericTokenStream> tokenStreamMax = new ThreadLocal<LegacyNumericTokenStream>() {
@Override @Override
protected NumericTokenStream initialValue() { protected LegacyNumericTokenStream initialValue() {
return new NumericTokenStream(Integer.MAX_VALUE); return new LegacyNumericTokenStream(Integer.MAX_VALUE);
} }
}; };
@ -337,7 +337,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
} }
} }
protected NumericTokenStream getCachedStream() { protected LegacyNumericTokenStream getCachedStream() {
if (fieldType().numericPrecisionStep() == 4) { if (fieldType().numericPrecisionStep() == 4) {
return tokenStream4.get(); return tokenStream4.get();
} else if (fieldType().numericPrecisionStep() == 8) { } else if (fieldType().numericPrecisionStep() == 8) {

View File

@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.LegacyNumericUtils;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Explicit;
@ -121,7 +121,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
static final class ShortFieldType extends NumberFieldType { static final class ShortFieldType extends NumberFieldType {
public ShortFieldType() { public ShortFieldType() {
super(NumericType.INT); super(LegacyNumericType.INT);
} }
protected ShortFieldType(ShortFieldType ref) { protected ShortFieldType(ShortFieldType ref) {
@ -160,13 +160,13 @@ public class ShortFieldMapper extends NumberFieldMapper {
@Override @Override
public BytesRef indexedValueForSearch(Object value) { public BytesRef indexedValueForSearch(Object value) {
BytesRefBuilder bytesRef = new BytesRefBuilder(); BytesRefBuilder bytesRef = new BytesRefBuilder();
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
return bytesRef.get(); return bytesRef.get();
} }
@Override @Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) { public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
lowerTerm == null ? null : (int)parseValue(lowerTerm), lowerTerm == null ? null : (int)parseValue(lowerTerm),
upperTerm == null ? null : (int)parseValue(upperTerm), upperTerm == null ? null : (int)parseValue(upperTerm),
includeLower, includeUpper); includeLower, includeUpper);
@ -176,7 +176,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) { public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
short iValue = parseValue(value); short iValue = parseValue(value);
short iSim = fuzziness.asShort(); short iSim = fuzziness.asShort();
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(), return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
iValue - iSim, iValue - iSim,
iValue + iSim, iValue + iSim,
true, true); true, true);
@ -184,8 +184,8 @@ public class ShortFieldMapper extends NumberFieldMapper {
@Override @Override
public FieldStats stats(Terms terms, int maxDoc) throws IOException { public FieldStats stats(Terms terms, int maxDoc) throws IOException {
long minValue = NumericUtils.getMinInt(terms); long minValue = LegacyNumericUtils.getMinInt(terms);
long maxValue = NumericUtils.getMaxInt(terms); long maxValue = LegacyNumericUtils.getMaxInt(terms);
return new FieldStats.Long( return new FieldStats.Long(
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
); );

Some files were not shown because too many files have changed in this diff Show More