mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-10 06:55:32 +00:00
upgrade to lucene 6.0.0-snapshot-bea235f
Closes #16964 Squashed commit of the following: commit a23f9d2d29220991aa498214530753d7a5a148c6 Merge: eec9c4e 0b0a251 Author: Robert Muir <rmuir@apache.org> Date: Mon Mar 7 04:12:02 2016 -0500 Merge branch 'master' into lucene6 commit eec9c4e5cd11e9c3e0b426f04894bb2a6dae4f21 Merge: bc67205 675d940 Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 13:45:00 2016 -0500 Merge branch 'master' into lucene6 commit bc67205bdfe1526eae277ab7856fc050ecbdb7b2 Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 09:56:31 2016 -0500 fix test bug commit a60723b007ff12d97b1810cef473bd7b553a0327 Author: Simon Willnauer <simonw@apache.org> Date: Fri Mar 4 15:35:35 2016 +0100 Fix SimpleValidateQueryIT to put braces around boosted terms commit ae3a49d7ba7ced448d2a5262e5d8ec98671a9090 Author: Simon Willnauer <simonw@apache.org> Date: Fri Mar 4 15:27:25 2016 +0100 fix multimatchquery commit ae23fdb88a8f6d3fb7ba60fd1aaf3fd72d899aa5 Author: Simon Willnauer <simonw@apache.org> Date: Fri Mar 4 15:20:49 2016 +0100 Rewrite DecayFunctionScoreIT to be independent of the similarity used This test relied a lot on the term scoring and compared scores that are dependent on the similarity. This commit changes the base query to be a predictable constant score query. commit 366c2d518c35d31251033f1b6f6a93f6e2ae327d Author: Simon Willnauer <simonw@apache.org> Date: Fri Mar 4 14:06:14 2016 +0100 Fix scoring in tests due to changes to idf calculation. Lucene 6 uses a different default similarity as well as a different way to calculate IDF. In contrast to older version lucene 6 uses docCount per field to calculate the IDF not the # of docs in the index to overcome the sparse field cases. commit dac99fd64ac2fa71b8d8d106fe68825e574c49f8 Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 08:21:57 2016 -0500 don't hardcoded expected termquery score commit 6e9f340ba49ab10eed512df86d52a121aa775b0f Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 08:04:45 2016 -0500 suppress deprecation warning until migrated to points commit 3ac8908424b3fdad44a90a4f7bdb3eff7efd077d Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 07:21:43 2016 -0500 Remove invalid test: all commits have IDs, and its illegal to do this. commit c12976288124ad1a26467e7e848fb810548e7eab Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 07:06:14 2016 -0500 don't test with unsupported back compat commit 18bbfe76128570bc70883bf91ff4c44c82d27817 Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 07:02:18 2016 -0500 remove now invalid lucene 4 backcompat test commit 7e730e572886f0ef2d3faba712e4256216ff01ec Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 06:58:52 2016 -0500 remove now invalid lucene 4 backwards test commit 244d2ab6868ba5ac9e0bcde3c2833743751a25ec Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 06:47:23 2016 -0500 use 6.0 codec commit 5f64d4a431a6fdaa1234adca23f154c2a1de8284 Author: Robert Muir <rmuir@apache.org> Date: Fri Mar 4 06:43:08 2016 -0500 compile, javadocs, forbidden-apis, etc commit 1f273cd62a7fe9ca8f8944acbbfc5cbdd3d81ccb Merge: cd33921 29e3443 Author: Simon Willnauer <simonw@apache.org> Date: Fri Mar 4 10:45:29 2016 +0100 Merge branch 'master' into lucene6 commit cd33921ac742ef9fb351012eff35f3c7dbda7264 Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:58:37 2016 -0500 fix hunspell dictionary loading commit c7fdbd837b01f7defe9cb1c24e2ec65604b0dc96 Merge: 4d4190f d8948ba Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:41:53 2016 -0500 Merge branch 'master' into lucene6 commit 4d4190fd82601aaafac6b8254ccb3edf218faa34 Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:39:14 2016 -0500 remove nocommit commit 77ca69e288b1a41aa9595c921ed166c272a00ea8 Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:38:24 2016 -0500 clean up numericutils vs legacynumericutils commit a466d696fbaad04b647ffbc0857a9439b583d0bf Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:32:43 2016 -0500 upgrade spatial4j commit 5412c747a8cfe638bacedbc8233163cb75cc3dc5 Author: Robert Muir <rmuir@apache.org> Date: Thu Mar 3 23:19:28 2016 -0500 move to 6.0.0-snapshot-8eada27 commit b32bfe924626b87e540692375ece09e7c2edb189 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 11:30:09 2016 +0100 Fix some test compile errors. commit 6ccde35e9840b03c68d1a2cd47c7923a06edf64a Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 11:25:51 2016 +0100 Current Lucene version is 6.0.0. commit f62e1015d931b4cc04c778298a8fa1ba65e97ad9 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 11:20:48 2016 +0100 Fix compile errors in NGramTokenFilterFactory. commit 6837c6eabf96075f743649da9b9b52dd39611c58 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 10:50:59 2016 +0100 Fix the edge ngram tokenizer/filter. commit ccd7f070de5efcdfbeb34b9555c65c4990bf1ba6 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 10:42:44 2016 +0100 The missing value is now accessible through a getter. commit bd3b77f9b28e5b05daa3d49683a9922a6baf2963 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 10:41:51 2016 +0100 Remove IndexCacheableQuery. commit 05f3091c347aeae80eeb16349ac51d2b53cf86f7 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 10:39:43 2016 +0100 Fix compilation of function_score queries. commit 81cda79a2431ac78f56b0cc5a5765387f662d801 Author: Adrien Grand <jpountz@gmail.com> Date: Thu Mar 3 10:35:02 2016 +0100 Fix compile errors in BlendedTermQuery. commit 70994ce8dd1eca0b995870974a38e20f26f96a7b Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 23:33:03 2016 -0500 add bug ID commit 29d4f1a71f36f646b5a6060bed3db019564a279d Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 21:02:32 2016 -0500 easy .store changes commit 5e1a1e6fd665fa455e88d3a8987362fad5f44bb1 Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 20:47:24 2016 -0500 cleanups mostly around boosting commit 333a669ec6c305ada5645d13ed1da0e19ec1d053 Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 20:27:56 2016 -0500 more simple fixes commit bd5cd98a1e089c866b6b4a5e159400b110140ce6 Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 19:49:38 2016 -0500 more easy fixes and removal of ancient cruft commit a68f419ee47da5f9c9ce5b372f01d707e902474c Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 19:35:02 2016 -0500 cutover numerics commit 4ca5dc1fa47dd5892db00899032133318fff3116 Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 18:34:18 2016 -0500 fix some constants commit 88710a17817086e477c6c021ec346d0534b7fb88 Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 18:14:25 2016 -0500 Add spatial-extras jar as a core dependency commit c8cd6726583e5ce3f546ed355d4eca037164a30d Author: Robert Muir <rmuir@apache.org> Date: Wed Mar 2 18:03:33 2016 -0500 update to lucene 6 jars
This commit is contained in:
parent
0b0a251343
commit
54018a5d37
@ -68,7 +68,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||||||
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
|
||||||
// we "upgrade" these optional deps to provided for plugins, since they will run
|
// we "upgrade" these optional deps to provided for plugins, since they will run
|
||||||
// with a full elasticsearch server that includes optional deps
|
// with a full elasticsearch server that includes optional deps
|
||||||
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
|
provided "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}"
|
||||||
provided "com.vividsolutions:jts:${project.versions.jts}"
|
provided "com.vividsolutions:jts:${project.versions.jts}"
|
||||||
provided "log4j:log4j:${project.versions.log4j}"
|
provided "log4j:log4j:${project.versions.log4j}"
|
||||||
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
provided "log4j:apache-log4j-extras:${project.versions.log4j}"
|
||||||
|
@ -33,20 +33,6 @@ java.util.Formatter#<init>(java.lang.String,java.lang.String,java.util.Locale)
|
|||||||
java.io.RandomAccessFile
|
java.io.RandomAccessFile
|
||||||
java.nio.file.Path#toFile()
|
java.nio.file.Path#toFile()
|
||||||
|
|
||||||
@defaultMessage Don't use deprecated lucene apis
|
|
||||||
org.apache.lucene.index.DocsEnum
|
|
||||||
org.apache.lucene.index.DocsAndPositionsEnum
|
|
||||||
org.apache.lucene.queries.TermFilter
|
|
||||||
org.apache.lucene.queries.TermsFilter
|
|
||||||
org.apache.lucene.search.Filter
|
|
||||||
org.apache.lucene.search.FilteredQuery
|
|
||||||
org.apache.lucene.search.TermRangeFilter
|
|
||||||
org.apache.lucene.search.NumericRangeFilter
|
|
||||||
org.apache.lucene.search.PrefixFilter
|
|
||||||
org.apache.lucene.search.QueryWrapperFilter
|
|
||||||
org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter
|
|
||||||
org.apache.lucene.index.IndexWriter#isLocked(org.apache.lucene.store.Directory)
|
|
||||||
|
|
||||||
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
java.nio.file.Paths @ Use org.elasticsearch.common.io.PathUtils.get() instead.
|
||||||
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
java.nio.file.FileSystems#getDefault() @ use org.elasticsearch.common.io.PathUtils.getDefaultFileSystem() instead.
|
||||||
|
|
||||||
|
@ -41,14 +41,10 @@ org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.in
|
|||||||
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
|
||||||
|
|
||||||
@defaultMessage Pass the precision step from the mappings explicitly instead
|
@defaultMessage Pass the precision step from the mappings explicitly instead
|
||||||
org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
||||||
org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
||||||
org.apache.lucene.search.NumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
||||||
org.apache.lucene.search.NumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
||||||
org.apache.lucene.search.NumericRangeFilter#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
|
|
||||||
org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
|
|
||||||
org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
|
|
||||||
org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
|
|
||||||
|
|
||||||
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
|
||||||
java.lang.Object#wait()
|
java.lang.Object#wait()
|
||||||
@ -88,9 +84,6 @@ java.util.concurrent.Future#cancel(boolean)
|
|||||||
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
org.elasticsearch.common.io.PathUtils#get(java.lang.String, java.lang.String[])
|
||||||
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
org.elasticsearch.common.io.PathUtils#get(java.net.URI)
|
||||||
|
|
||||||
@defaultMessage Don't use deprecated Query#setBoost, wrap the query into a BoostQuery instead
|
|
||||||
org.apache.lucene.search.Query#setBoost(float)
|
|
||||||
|
|
||||||
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
@defaultMessage Constructing a DateTime without a time zone is dangerous
|
||||||
org.joda.time.DateTime#<init>()
|
org.joda.time.DateTime#<init>()
|
||||||
org.joda.time.DateTime#<init>(long)
|
org.joda.time.DateTime#<init>(long)
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
elasticsearch = 5.0.0
|
elasticsearch = 5.0.0
|
||||||
lucene = 5.5.0
|
lucene = 6.0.0-snapshot-bea235f
|
||||||
|
|
||||||
# optional dependencies
|
# optional dependencies
|
||||||
spatial4j = 0.5
|
spatial4j = 0.6
|
||||||
jts = 1.13
|
jts = 1.13
|
||||||
jackson = 2.7.1
|
jackson = 2.7.1
|
||||||
log4j = 1.2.17
|
log4j = 1.2.17
|
||||||
|
@ -42,6 +42,7 @@ dependencies {
|
|||||||
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
compile "org.apache.lucene:lucene-queryparser:${versions.lucene}"
|
||||||
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
compile "org.apache.lucene:lucene-sandbox:${versions.lucene}"
|
||||||
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
compile "org.apache.lucene:lucene-spatial:${versions.lucene}"
|
||||||
|
compile "org.apache.lucene:lucene-spatial-extras:${versions.lucene}"
|
||||||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||||
|
|
||||||
@ -71,7 +72,7 @@ dependencies {
|
|||||||
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
|
||||||
|
|
||||||
// lucene spatial
|
// lucene spatial
|
||||||
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
|
compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional
|
||||||
compile "com.vividsolutions:jts:${versions.jts}", optional
|
compile "com.vividsolutions:jts:${versions.jts}", optional
|
||||||
|
|
||||||
// logging
|
// logging
|
||||||
@ -168,11 +169,6 @@ thirdPartyAudit.excludes = [
|
|||||||
'org.apache.commons.logging.Log',
|
'org.apache.commons.logging.Log',
|
||||||
'org.apache.commons.logging.LogFactory',
|
'org.apache.commons.logging.LogFactory',
|
||||||
|
|
||||||
// from org.apache.lucene.sandbox.queries.regex.JakartaRegexpCapabilities$JakartaRegexMatcher (lucene-sandbox)
|
|
||||||
'org.apache.regexp.CharacterIterator',
|
|
||||||
'org.apache.regexp.RE',
|
|
||||||
'org.apache.regexp.REProgram',
|
|
||||||
|
|
||||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||||
'org.apache.tomcat.jni.Buffer',
|
'org.apache.tomcat.jni.Buffer',
|
||||||
'org.apache.tomcat.jni.Library',
|
'org.apache.tomcat.jni.Library',
|
||||||
@ -210,7 +206,7 @@ thirdPartyAudit.excludes = [
|
|||||||
'org.jboss.marshalling.MarshallingConfiguration',
|
'org.jboss.marshalling.MarshallingConfiguration',
|
||||||
'org.jboss.marshalling.Unmarshaller',
|
'org.jboss.marshalling.Unmarshaller',
|
||||||
|
|
||||||
// from com.spatial4j.core.io.GeoJSONReader (spatial4j)
|
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||||
'org.noggit.JSONParser',
|
'org.noggit.JSONParser',
|
||||||
|
|
||||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||||
|
@ -33,7 +33,6 @@ import org.apache.lucene.search.Query;
|
|||||||
import org.apache.lucene.search.TermQuery;
|
import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.apache.lucene.util.InPlaceMergeSorter;
|
import org.apache.lucene.util.InPlaceMergeSorter;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -247,14 +246,15 @@ public abstract class BlendedTermQuery extends Query {
|
|||||||
if (boosts != null) {
|
if (boosts != null) {
|
||||||
boost = boosts[i];
|
boost = boosts[i];
|
||||||
}
|
}
|
||||||
builder.append(ToStringUtils.boost(boost));
|
if (boost != 1f) {
|
||||||
|
builder.append('^').append(boost);
|
||||||
|
}
|
||||||
builder.append(", ");
|
builder.append(", ");
|
||||||
}
|
}
|
||||||
if (terms.length > 0) {
|
if (terms.length > 0) {
|
||||||
builder.setLength(builder.length() - 2);
|
builder.setLength(builder.length() - 2);
|
||||||
}
|
}
|
||||||
builder.append("])");
|
builder.append("])");
|
||||||
builder.append(ToStringUtils.boost(getBoost()));
|
|
||||||
return builder.toString();
|
return builder.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.BoostQuery;
|
import org.apache.lucene.search.BoostQuery;
|
||||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||||
import org.apache.lucene.search.FuzzyQuery;
|
import org.apache.lucene.search.FuzzyQuery;
|
||||||
@ -165,7 +166,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return getFieldQuerySingle(field, queryText, quoted);
|
return getFieldQuerySingle(field, queryText, quoted);
|
||||||
@ -267,7 +268,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return super.getFieldQuery(field, queryText, slop);
|
return super.getFieldQuery(field, queryText, slop);
|
||||||
@ -318,7 +319,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -380,7 +381,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
return getFuzzyQuerySingle(field, termStr, minSimilarity);
|
||||||
@ -445,7 +446,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return getPrefixQuerySingle(field, termStr);
|
return getPrefixQuerySingle(field, termStr);
|
||||||
@ -520,7 +521,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
for (String token : tlist) {
|
for (String token : tlist) {
|
||||||
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
clauses.add(new BooleanClause(super.getPrefixQuery(field, token), BooleanClause.Occur.SHOULD));
|
||||||
}
|
}
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,7 +576,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return getWildcardQuerySingle(field, termStr);
|
return getWildcardQuerySingle(field, termStr);
|
||||||
@ -704,7 +705,7 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
if (clauses.size() == 0) // happens for stopwords
|
if (clauses.size() == 0) // happens for stopwords
|
||||||
return null;
|
return null;
|
||||||
return getBooleanQuery(clauses, true);
|
return getBooleanQueryCoordDisabled(clauses);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return getRegexpQuerySingle(field, termStr);
|
return getRegexpQuerySingle(field, termStr);
|
||||||
@ -740,9 +741,23 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated review all use of this, don't rely on coord
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
|
||||||
|
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||||
|
builder.setDisableCoord(true);
|
||||||
|
for (BooleanClause clause : clauses) {
|
||||||
|
builder.add(clause);
|
||||||
|
}
|
||||||
|
return fixNegativeQueryIfNeeded(builder.build());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord) throws ParseException {
|
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {
|
||||||
Query q = super.getBooleanQuery(clauses, disableCoord);
|
Query q = super.getBooleanQuery(clauses);
|
||||||
if (q == null) {
|
if (q == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -769,7 +784,6 @@ public class MapperQueryParser extends QueryParser {
|
|||||||
}
|
}
|
||||||
pq = builder.build();
|
pq = builder.build();
|
||||||
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
//make sure that the boost hasn't been set beforehand, otherwise we'd lose it
|
||||||
assert q.getBoost() == 1f;
|
|
||||||
assert q instanceof BoostQuery == false;
|
assert q instanceof BoostQuery == false;
|
||||||
return pq;
|
return pq;
|
||||||
} else if (q instanceof MultiPhraseQuery) {
|
} else if (q instanceof MultiPhraseQuery) {
|
||||||
|
@ -26,8 +26,7 @@ import java.io.IOException;
|
|||||||
/**
|
/**
|
||||||
* Abstract decorator class of a DocIdSetIterator
|
* Abstract decorator class of a DocIdSetIterator
|
||||||
* implementation that provides on-demand filter/validation
|
* implementation that provides on-demand filter/validation
|
||||||
* mechanism on an underlying DocIdSetIterator. See {@link
|
* mechanism on an underlying DocIdSetIterator.
|
||||||
* FilteredDocIdSet}.
|
|
||||||
*/
|
*/
|
||||||
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
public abstract class XFilteredDocIdSetIterator extends DocIdSetIterator {
|
||||||
protected DocIdSetIterator _innerIter;
|
protected DocIdSetIterator _innerIter;
|
||||||
|
@ -87,7 +87,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||||||
if (numTerms > 16) {
|
if (numTerms > 16) {
|
||||||
for (Term[] currentPosTerm : terms) {
|
for (Term[] currentPosTerm : terms) {
|
||||||
for (Term term : currentPosTerm) {
|
for (Term term : currentPosTerm) {
|
||||||
super.flatten(new TermQuery(term), reader, flatQueries, orig.getBoost());
|
super.flatten(new TermQuery(term), reader, flatQueries, 1F);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
@ -104,7 +104,7 @@ public class CustomFieldQuery extends FieldQuery {
|
|||||||
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
|
||||||
}
|
}
|
||||||
Query query = queryBuilder.build();
|
Query query = queryBuilder.build();
|
||||||
this.flatten(query, reader, flatQueries, orig.getBoost());
|
this.flatten(query, reader, flatQueries, 1F);
|
||||||
} else {
|
} else {
|
||||||
Term[] t = terms.get(currentPos);
|
Term[] t = terms.get(currentPos);
|
||||||
for (int i = 0; i < t.length; i++) {
|
for (int i = 0; i < t.length; i++) {
|
||||||
|
@ -39,8 +39,9 @@ public class Version {
|
|||||||
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
// AA values below 50 are beta builds, and below 99 are RC builds, with 99 indicating a release
|
||||||
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
// the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||||
|
|
||||||
// NOTE: indexes created with 3.6 use this constant for e.g. analysis chain emulation (imperfect)
|
// NOTE: ancient indexes created before 5.0 use this constant for e.g. analysis chain emulation (imperfect)
|
||||||
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_4_0_0;
|
// its named lucene 3 but also lucene 4 or anything else we no longer support.
|
||||||
|
public static final org.apache.lucene.util.Version LUCENE_3_EMULATION_VERSION = org.apache.lucene.util.Version.LUCENE_5_0_0;
|
||||||
|
|
||||||
public static final int V_0_18_0_ID = /*00*/180099;
|
public static final int V_0_18_0_ID = /*00*/180099;
|
||||||
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
|
public static final Version V_0_18_0 = new Version(V_0_18_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
@ -117,130 +118,130 @@ public class Version {
|
|||||||
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
|
public static final Version V_0_20_6 = new Version(V_0_20_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
|
|
||||||
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
public static final int V_0_90_0_Beta1_ID = /*00*/900001;
|
||||||
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
public static final Version V_0_90_0_Beta1 = new Version(V_0_90_0_Beta1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
public static final int V_0_90_0_RC1_ID = /*00*/900051;
|
||||||
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_1);
|
public static final Version V_0_90_0_RC1 = new Version(V_0_90_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
public static final int V_0_90_0_RC2_ID = /*00*/900052;
|
||||||
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
public static final Version V_0_90_0_RC2 = new Version(V_0_90_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_0_ID = /*00*/900099;
|
public static final int V_0_90_0_ID = /*00*/900099;
|
||||||
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, org.apache.lucene.util.Version.LUCENE_4_2);
|
public static final Version V_0_90_0 = new Version(V_0_90_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_1_ID = /*00*/900199;
|
public static final int V_0_90_1_ID = /*00*/900199;
|
||||||
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
public static final Version V_0_90_1 = new Version(V_0_90_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_2_ID = /*00*/900299;
|
public static final int V_0_90_2_ID = /*00*/900299;
|
||||||
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, org.apache.lucene.util.Version.LUCENE_4_3);
|
public static final Version V_0_90_2 = new Version(V_0_90_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_3_ID = /*00*/900399;
|
public static final int V_0_90_3_ID = /*00*/900399;
|
||||||
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
public static final Version V_0_90_3 = new Version(V_0_90_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_4_ID = /*00*/900499;
|
public static final int V_0_90_4_ID = /*00*/900499;
|
||||||
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
public static final Version V_0_90_4 = new Version(V_0_90_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_5_ID = /*00*/900599;
|
public static final int V_0_90_5_ID = /*00*/900599;
|
||||||
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, org.apache.lucene.util.Version.LUCENE_4_4);
|
public static final Version V_0_90_5 = new Version(V_0_90_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_6_ID = /*00*/900699;
|
public static final int V_0_90_6_ID = /*00*/900699;
|
||||||
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
public static final Version V_0_90_6 = new Version(V_0_90_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_7_ID = /*00*/900799;
|
public static final int V_0_90_7_ID = /*00*/900799;
|
||||||
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
public static final Version V_0_90_7 = new Version(V_0_90_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_8_ID = /*00*/900899;
|
public static final int V_0_90_8_ID = /*00*/900899;
|
||||||
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_8 = new Version(V_0_90_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_9_ID = /*00*/900999;
|
public static final int V_0_90_9_ID = /*00*/900999;
|
||||||
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_9 = new Version(V_0_90_9_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_10_ID = /*00*/901099;
|
public static final int V_0_90_10_ID = /*00*/901099;
|
||||||
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_10 = new Version(V_0_90_10_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_11_ID = /*00*/901199;
|
public static final int V_0_90_11_ID = /*00*/901199;
|
||||||
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_11 = new Version(V_0_90_11_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_12_ID = /*00*/901299;
|
public static final int V_0_90_12_ID = /*00*/901299;
|
||||||
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_12 = new Version(V_0_90_12_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_0_90_13_ID = /*00*/901399;
|
public static final int V_0_90_13_ID = /*00*/901399;
|
||||||
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_0_90_13 = new Version(V_0_90_13_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
|
|
||||||
public static final int V_1_0_0_Beta1_ID = 1000001;
|
public static final int V_1_0_0_Beta1_ID = 1000001;
|
||||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_5);
|
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_0_Beta2_ID = 1000002;
|
public static final int V_1_0_0_Beta2_ID = 1000002;
|
||||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_0_RC1_ID = 1000051;
|
public static final int V_1_0_0_RC1_ID = 1000051;
|
||||||
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_0_RC1 = new Version(V_1_0_0_RC1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_0_RC2_ID = 1000052;
|
public static final int V_1_0_0_RC2_ID = 1000052;
|
||||||
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_0_RC2 = new Version(V_1_0_0_RC2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_0_ID = 1000099;
|
public static final int V_1_0_0_ID = 1000099;
|
||||||
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_0 = new Version(V_1_0_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_1_ID = 1000199;
|
public static final int V_1_0_1_ID = 1000199;
|
||||||
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_1 = new Version(V_1_0_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_2_ID = 1000299;
|
public static final int V_1_0_2_ID = 1000299;
|
||||||
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_2 = new Version(V_1_0_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_0_3_ID = 1000399;
|
public static final int V_1_0_3_ID = 1000399;
|
||||||
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, org.apache.lucene.util.Version.LUCENE_4_6);
|
public static final Version V_1_0_3 = new Version(V_1_0_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_1_0_ID = 1010099;
|
public static final int V_1_1_0_ID = 1010099;
|
||||||
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
public static final Version V_1_1_0 = new Version(V_1_1_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_1_1_ID = 1010199;
|
public static final int V_1_1_1_ID = 1010199;
|
||||||
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
public static final Version V_1_1_1 = new Version(V_1_1_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_1_2_ID = 1010299;
|
public static final int V_1_1_2_ID = 1010299;
|
||||||
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, org.apache.lucene.util.Version.LUCENE_4_7);
|
public static final Version V_1_1_2 = new Version(V_1_1_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_2_0_ID = 1020099;
|
public static final int V_1_2_0_ID = 1020099;
|
||||||
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
public static final Version V_1_2_0 = new Version(V_1_2_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_2_1_ID = 1020199;
|
public static final int V_1_2_1_ID = 1020199;
|
||||||
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
public static final Version V_1_2_1 = new Version(V_1_2_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_2_2_ID = 1020299;
|
public static final int V_1_2_2_ID = 1020299;
|
||||||
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
public static final Version V_1_2_2 = new Version(V_1_2_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_2_3_ID = 1020399;
|
public static final int V_1_2_3_ID = 1020399;
|
||||||
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
public static final Version V_1_2_3 = new Version(V_1_2_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_2_4_ID = 1020499;
|
public static final int V_1_2_4_ID = 1020499;
|
||||||
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, org.apache.lucene.util.Version.LUCENE_4_8);
|
public static final Version V_1_2_4 = new Version(V_1_2_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_0_ID = 1030099;
|
public static final int V_1_3_0_ID = 1030099;
|
||||||
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_0 = new Version(V_1_3_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_1_ID = 1030199;
|
public static final int V_1_3_1_ID = 1030199;
|
||||||
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_1 = new Version(V_1_3_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_2_ID = 1030299;
|
public static final int V_1_3_2_ID = 1030299;
|
||||||
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_2 = new Version(V_1_3_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_3_ID = 1030399;
|
public static final int V_1_3_3_ID = 1030399;
|
||||||
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_3 = new Version(V_1_3_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_4_ID = 1030499;
|
public static final int V_1_3_4_ID = 1030499;
|
||||||
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_4 = new Version(V_1_3_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_5_ID = 1030599;
|
public static final int V_1_3_5_ID = 1030599;
|
||||||
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_5 = new Version(V_1_3_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_6_ID = 1030699;
|
public static final int V_1_3_6_ID = 1030699;
|
||||||
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_6 = new Version(V_1_3_6_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_7_ID = 1030799;
|
public static final int V_1_3_7_ID = 1030799;
|
||||||
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_7 = new Version(V_1_3_7_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_8_ID = 1030899;
|
public static final int V_1_3_8_ID = 1030899;
|
||||||
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_8 = new Version(V_1_3_8_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_3_9_ID = 1030999;
|
public static final int V_1_3_9_ID = 1030999;
|
||||||
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, org.apache.lucene.util.Version.LUCENE_4_9);
|
public static final Version V_1_3_9 = new Version(V_1_3_9_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_0_Beta1_ID = 1040001;
|
public static final int V_1_4_0_Beta1_ID = 1040001;
|
||||||
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, org.apache.lucene.util.Version.LUCENE_4_10_1);
|
public static final Version V_1_4_0_Beta1 = new Version(V_1_4_0_Beta1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_0_ID = 1040099;
|
public static final int V_1_4_0_ID = 1040099;
|
||||||
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
public static final Version V_1_4_0 = new Version(V_1_4_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_1_ID = 1040199;
|
public static final int V_1_4_1_ID = 1040199;
|
||||||
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
public static final Version V_1_4_1 = new Version(V_1_4_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_2_ID = 1040299;
|
public static final int V_1_4_2_ID = 1040299;
|
||||||
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_2);
|
public static final Version V_1_4_2 = new Version(V_1_4_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_3_ID = 1040399;
|
public static final int V_1_4_3_ID = 1040399;
|
||||||
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
public static final Version V_1_4_3 = new Version(V_1_4_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_4_ID = 1040499;
|
public static final int V_1_4_4_ID = 1040499;
|
||||||
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_3);
|
public static final Version V_1_4_4 = new Version(V_1_4_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_4_5_ID = 1040599;
|
public static final int V_1_4_5_ID = 1040599;
|
||||||
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_4_5 = new Version(V_1_4_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_5_0_ID = 1050099;
|
public static final int V_1_5_0_ID = 1050099;
|
||||||
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_5_0 = new Version(V_1_5_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_5_1_ID = 1050199;
|
public static final int V_1_5_1_ID = 1050199;
|
||||||
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_5_1 = new Version(V_1_5_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_5_2_ID = 1050299;
|
public static final int V_1_5_2_ID = 1050299;
|
||||||
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_5_2 = new Version(V_1_5_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_6_0_ID = 1060099;
|
public static final int V_1_6_0_ID = 1060099;
|
||||||
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_6_0 = new Version(V_1_6_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_6_1_ID = 1060199;
|
public static final int V_1_6_1_ID = 1060199;
|
||||||
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_6_1 = new Version(V_1_6_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_6_2_ID = 1060299;
|
public static final int V_1_6_2_ID = 1060299;
|
||||||
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_6_2 = new Version(V_1_6_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_0_ID = 1070099;
|
public static final int V_1_7_0_ID = 1070099;
|
||||||
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_0 = new Version(V_1_7_0_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_1_ID = 1070199;
|
public static final int V_1_7_1_ID = 1070199;
|
||||||
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_2_ID = 1070299;
|
public static final int V_1_7_2_ID = 1070299;
|
||||||
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_3_ID = 1070399;
|
public static final int V_1_7_3_ID = 1070399;
|
||||||
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_4_ID = 1070499;
|
public static final int V_1_7_4_ID = 1070499;
|
||||||
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
public static final int V_1_7_5_ID = 1070599;
|
public static final int V_1_7_5_ID = 1070599;
|
||||||
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, org.apache.lucene.util.Version.LUCENE_4_10_4);
|
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, LUCENE_3_EMULATION_VERSION);
|
||||||
|
|
||||||
public static final int V_2_0_0_beta1_ID = 2000001;
|
public static final int V_2_0_0_beta1_ID = 2000001;
|
||||||
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||||
@ -265,7 +266,7 @@ public class Version {
|
|||||||
public static final int V_2_3_0_ID = 2030099;
|
public static final int V_2_3_0_ID = 2030099;
|
||||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||||
public static final int V_5_0_0_ID = 5000099;
|
public static final int V_5_0_0_ID = 5000099;
|
||||||
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
public static final Version V_5_0_0 = new Version(V_5_0_0_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||||
public static final Version CURRENT = V_5_0_0;
|
public static final Version CURRENT = V_5_0_0;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -29,7 +29,7 @@ public class ShapesAvailability {
|
|||||||
static {
|
static {
|
||||||
boolean xSPATIAL4J_AVAILABLE;
|
boolean xSPATIAL4J_AVAILABLE;
|
||||||
try {
|
try {
|
||||||
Class.forName("com.spatial4j.core.shape.impl.PointImpl");
|
Class.forName("org.locationtech.spatial4j.shape.impl.PointImpl");
|
||||||
xSPATIAL4J_AVAILABLE = true;
|
xSPATIAL4J_AVAILABLE = true;
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
xSPATIAL4J_AVAILABLE = false;
|
xSPATIAL4J_AVAILABLE = false;
|
||||||
|
@ -19,9 +19,9 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo;
|
package org.elasticsearch.common.geo;
|
||||||
|
|
||||||
import com.spatial4j.core.context.SpatialContext;
|
import org.locationtech.spatial4j.context.SpatialContext;
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.spatial4j.core.shape.ShapeCollection;
|
import org.locationtech.spatial4j.shape.ShapeCollection;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Circle;
|
import org.locationtech.spatial4j.shape.Circle;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Rectangle;
|
import org.locationtech.spatial4j.shape.Rectangle;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.geo.XShapeCollection;
|
import org.elasticsearch.common.geo.XShapeCollection;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
import com.vividsolutions.jts.geom.Geometry;
|
import com.vividsolutions.jts.geom.Geometry;
|
||||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
import com.vividsolutions.jts.geom.Geometry;
|
import com.vividsolutions.jts.geom.Geometry;
|
||||||
import com.vividsolutions.jts.geom.LineString;
|
import com.vividsolutions.jts.geom.LineString;
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Point;
|
import org.locationtech.spatial4j.shape.Point;
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
|
|
||||||
import org.elasticsearch.common.geo.XShapeCollection;
|
import org.elasticsearch.common.geo.XShapeCollection;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
|
|
||||||
import org.elasticsearch.common.geo.XShapeCollection;
|
import org.elasticsearch.common.geo.XShapeCollection;
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Point;
|
import org.locationtech.spatial4j.shape.Point;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.exception.InvalidShapeException;
|
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
import com.vividsolutions.jts.geom.Geometry;
|
import com.vividsolutions.jts.geom.Geometry;
|
||||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||||
|
@ -19,10 +19,10 @@
|
|||||||
|
|
||||||
package org.elasticsearch.common.geo.builders;
|
package org.elasticsearch.common.geo.builders;
|
||||||
|
|
||||||
import com.spatial4j.core.context.jts.JtsSpatialContext;
|
import org.locationtech.spatial4j.context.jts.JtsSpatialContext;
|
||||||
import com.spatial4j.core.exception.InvalidShapeException;
|
import org.locationtech.spatial4j.exception.InvalidShapeException;
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.spatial4j.core.shape.jts.JtsGeometry;
|
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
|
||||||
import com.vividsolutions.jts.geom.Coordinate;
|
import com.vividsolutions.jts.geom.Coordinate;
|
||||||
import com.vividsolutions.jts.geom.Geometry;
|
import com.vividsolutions.jts.geom.Geometry;
|
||||||
import com.vividsolutions.jts.geom.GeometryFactory;
|
import com.vividsolutions.jts.geom.GeometryFactory;
|
||||||
@ -81,9 +81,9 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
|
|||||||
* this normally isn't allowed.
|
* this normally isn't allowed.
|
||||||
*/
|
*/
|
||||||
protected final boolean multiPolygonMayOverlap = false;
|
protected final boolean multiPolygonMayOverlap = false;
|
||||||
/** @see com.spatial4j.core.shape.jts.JtsGeometry#validate() */
|
/** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#validate() */
|
||||||
protected final boolean autoValidateJtsGeometry = true;
|
protected final boolean autoValidateJtsGeometry = true;
|
||||||
/** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */
|
/** @see org.locationtech.spatial4j.shape.jts.JtsGeometry#index() */
|
||||||
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
|
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
|
||||||
|
|
||||||
protected ShapeBuilder() {
|
protected ShapeBuilder() {
|
||||||
|
@ -1,74 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.lucene;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.IndexReader;
|
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
|
||||||
import org.apache.lucene.search.Query;
|
|
||||||
import org.apache.lucene.search.Weight;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Objects;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Base implementation for a query which is cacheable at the index level but
|
|
||||||
* not the segment level as usually expected.
|
|
||||||
*/
|
|
||||||
public abstract class IndexCacheableQuery extends Query {
|
|
||||||
|
|
||||||
private Object readerCacheKey;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Query rewrite(IndexReader reader) throws IOException {
|
|
||||||
if (reader.getCoreCacheKey() != this.readerCacheKey) {
|
|
||||||
IndexCacheableQuery rewritten = (IndexCacheableQuery) clone();
|
|
||||||
rewritten.readerCacheKey = reader.getCoreCacheKey();
|
|
||||||
return rewritten;
|
|
||||||
}
|
|
||||||
return super.rewrite(reader);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object obj) {
|
|
||||||
return super.equals(obj)
|
|
||||||
&& readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return 31 * super.hashCode() + Objects.hashCode(readerCacheKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
|
||||||
if (readerCacheKey == null) {
|
|
||||||
throw new IllegalStateException("Rewrite first");
|
|
||||||
}
|
|
||||||
if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) {
|
|
||||||
throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting");
|
|
||||||
}
|
|
||||||
return doCreateWeight(searcher, needsScores);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Create a {@link Weight} for this query.
|
|
||||||
* @see Query#createWeight(IndexSearcher, boolean)
|
|
||||||
*/
|
|
||||||
public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException;
|
|
||||||
}
|
|
@ -88,7 +88,7 @@ import java.util.Objects;
|
|||||||
public class Lucene {
|
public class Lucene {
|
||||||
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
|
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
|
||||||
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
|
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
|
||||||
public static final String LATEST_CODEC = "Lucene54";
|
public static final String LATEST_CODEC = "Lucene60";
|
||||||
|
|
||||||
static {
|
static {
|
||||||
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
|
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);
|
||||||
@ -236,13 +236,8 @@ public class Lucene {
|
|||||||
protected Object doBody(String segmentFileName) throws IOException {
|
protected Object doBody(String segmentFileName) throws IOException {
|
||||||
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
|
try (IndexInput input = directory.openInput(segmentFileName, IOContext.READ)) {
|
||||||
final int format = input.readInt();
|
final int format = input.readInt();
|
||||||
final int actualFormat;
|
|
||||||
if (format == CodecUtil.CODEC_MAGIC) {
|
if (format == CodecUtil.CODEC_MAGIC) {
|
||||||
// 4.0+
|
CodecUtil.checksumEntireFile(input);
|
||||||
actualFormat = CodecUtil.checkHeaderNoMagic(input, "segments", SegmentInfos.VERSION_40, Integer.MAX_VALUE);
|
|
||||||
if (actualFormat >= SegmentInfos.VERSION_48) {
|
|
||||||
CodecUtil.checksumEntireFile(input);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// legacy....
|
// legacy....
|
||||||
}
|
}
|
||||||
@ -382,7 +377,7 @@ public class Lucene {
|
|||||||
writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse()));
|
writeMissingValue(out, comparatorSource.missingValue(sortField.getReverse()));
|
||||||
} else {
|
} else {
|
||||||
writeSortType(out, sortField.getType());
|
writeSortType(out, sortField.getType());
|
||||||
writeMissingValue(out, sortField.missingValue);
|
writeMissingValue(out, sortField.getMissingValue());
|
||||||
}
|
}
|
||||||
out.writeBoolean(sortField.getReverse());
|
out.writeBoolean(sortField.getReverse());
|
||||||
}
|
}
|
||||||
@ -684,7 +679,7 @@ public class Lucene {
|
|||||||
segmentsFileName = infos.getSegmentsFileName();
|
segmentsFileName = infos.getSegmentsFileName();
|
||||||
this.dir = dir;
|
this.dir = dir;
|
||||||
userData = infos.getUserData();
|
userData = infos.getUserData();
|
||||||
files = Collections.unmodifiableCollection(infos.files(dir, true));
|
files = Collections.unmodifiableCollection(infos.files(true));
|
||||||
generation = infos.getGeneration();
|
generation = infos.getGeneration();
|
||||||
segmentCount = infos.size();
|
segmentCount = infos.size();
|
||||||
}
|
}
|
||||||
|
@ -226,7 +226,7 @@ public final class AllTermQuery extends Query {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString(String field) {
|
public String toString(String field) {
|
||||||
return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost());
|
return new TermQuery(term).toString(field);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.apache.lucene.util.BitDocIdSet;
|
|
||||||
import org.apache.lucene.util.BitSet;
|
import org.apache.lucene.util.BitSet;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
@ -118,9 +117,7 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
|
bits = BitSet.of(docs, context.reader().maxDoc());
|
||||||
builder.or(docs);
|
|
||||||
bits = builder.build().bits();
|
|
||||||
|
|
||||||
// Count how many docs are in our filtered set
|
// Count how many docs are in our filtered set
|
||||||
// TODO make this lazy-loaded only for those that need it?
|
// TODO make this lazy-loaded only for those that need it?
|
||||||
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.MultiPhraseQuery;
|
|||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.StringHelper;
|
import org.apache.lucene.util.StringHelper;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -51,7 +50,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
/**
|
/**
|
||||||
* Sets the phrase slop for this query.
|
* Sets the phrase slop for this query.
|
||||||
*
|
*
|
||||||
* @see org.apache.lucene.search.PhraseQuery#setSlop(int)
|
* @see org.apache.lucene.search.PhraseQuery.Builder#setSlop(int)
|
||||||
*/
|
*/
|
||||||
public void setSlop(int s) {
|
public void setSlop(int s) {
|
||||||
slop = s;
|
slop = s;
|
||||||
@ -64,7 +63,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
/**
|
/**
|
||||||
* Sets the phrase slop for this query.
|
* Sets the phrase slop for this query.
|
||||||
*
|
*
|
||||||
* @see org.apache.lucene.search.PhraseQuery#getSlop()
|
* @see org.apache.lucene.search.PhraseQuery.Builder#getSlop()
|
||||||
*/
|
*/
|
||||||
public int getSlop() {
|
public int getSlop() {
|
||||||
return slop;
|
return slop;
|
||||||
@ -73,7 +72,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
/**
|
/**
|
||||||
* Add a single term at the next position in the phrase.
|
* Add a single term at the next position in the phrase.
|
||||||
*
|
*
|
||||||
* @see org.apache.lucene.search.PhraseQuery#add(Term)
|
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
|
||||||
*/
|
*/
|
||||||
public void add(Term term) {
|
public void add(Term term) {
|
||||||
add(new Term[]{term});
|
add(new Term[]{term});
|
||||||
@ -83,7 +82,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
* Add multiple terms at the next position in the phrase. Any of the terms
|
* Add multiple terms at the next position in the phrase. Any of the terms
|
||||||
* may match.
|
* may match.
|
||||||
*
|
*
|
||||||
* @see org.apache.lucene.search.PhraseQuery#add(Term)
|
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term)
|
||||||
*/
|
*/
|
||||||
public void add(Term[] terms) {
|
public void add(Term[] terms) {
|
||||||
int position = 0;
|
int position = 0;
|
||||||
@ -98,7 +97,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
*
|
*
|
||||||
* @param terms the terms
|
* @param terms the terms
|
||||||
* @param position the position of the terms provided as argument
|
* @param position the position of the terms provided as argument
|
||||||
* @see org.apache.lucene.search.PhraseQuery#add(Term, int)
|
* @see org.apache.lucene.search.PhraseQuery.Builder#add(Term, int)
|
||||||
*/
|
*/
|
||||||
public void add(Term[] terms, int position) {
|
public void add(Term[] terms, int position) {
|
||||||
if (termArrays.size() == 0)
|
if (termArrays.size() == 0)
|
||||||
@ -231,8 +230,6 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||||||
buffer.append(slop);
|
buffer.append(slop);
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer.append(ToStringUtils.boost(getBoost()));
|
|
||||||
|
|
||||||
return buffer.toString();
|
return buffer.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@ import org.apache.lucene.index.Term;
|
|||||||
import org.apache.lucene.search.BooleanClause;
|
import org.apache.lucene.search.BooleanClause;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.BoostQuery;
|
|
||||||
import org.apache.lucene.search.ConstantScoreQuery;
|
import org.apache.lucene.search.ConstantScoreQuery;
|
||||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
import org.apache.lucene.search.PrefixQuery;
|
import org.apache.lucene.search.PrefixQuery;
|
||||||
@ -132,11 +131,7 @@ public class Queries {
|
|||||||
builder.add(clause);
|
builder.add(clause);
|
||||||
}
|
}
|
||||||
builder.setMinimumNumberShouldMatch(msm);
|
builder.setMinimumNumberShouldMatch(msm);
|
||||||
BooleanQuery bq = builder.build();
|
return builder.build();
|
||||||
if (query.getBoost() != 1f) {
|
|
||||||
return new BoostQuery(bq, query.getBoost());
|
|
||||||
}
|
|
||||||
return bq;
|
|
||||||
} else {
|
} else {
|
||||||
return query;
|
return query;
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.Query;
|
|||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.apache.lucene.util.Bits;
|
import org.apache.lucene.util.Bits;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
@ -102,7 +101,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Query subQuery;
|
final Query subQuery;
|
||||||
final FilterFunction[] filterFunctions;
|
final FilterFunction[] filterFunctions;
|
||||||
final ScoreMode scoreMode;
|
final ScoreMode scoreMode;
|
||||||
final float maxBoost;
|
final float maxBoost;
|
||||||
@ -136,9 +135,7 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||||||
Query newQ = subQuery.rewrite(reader);
|
Query newQ = subQuery.rewrite(reader);
|
||||||
if (newQ == subQuery)
|
if (newQ == subQuery)
|
||||||
return this;
|
return this;
|
||||||
FiltersFunctionScoreQuery bq = (FiltersFunctionScoreQuery) this.clone();
|
return new FiltersFunctionScoreQuery(newQ, scoreMode, filterFunctions, maxBoost, minScore, combineFunction);
|
||||||
bq.subQuery = newQ;
|
|
||||||
return bq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -355,7 +352,6 @@ public class FiltersFunctionScoreQuery extends Query {
|
|||||||
sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}");
|
sb.append("{filter(").append(filterFunction.filter).append("), function [").append(filterFunction.function).append("]}");
|
||||||
}
|
}
|
||||||
sb.append("])");
|
sb.append("])");
|
||||||
sb.append(ToStringUtils.boost(getBoost()));
|
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ import org.apache.lucene.search.IndexSearcher;
|
|||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
@ -41,7 +40,7 @@ public class FunctionScoreQuery extends Query {
|
|||||||
|
|
||||||
public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE;
|
public static final float DEFAULT_MAX_BOOST = Float.MAX_VALUE;
|
||||||
|
|
||||||
Query subQuery;
|
final Query subQuery;
|
||||||
final ScoreFunction function;
|
final ScoreFunction function;
|
||||||
final float maxBoost;
|
final float maxBoost;
|
||||||
final CombineFunction combineFunction;
|
final CombineFunction combineFunction;
|
||||||
@ -84,9 +83,7 @@ public class FunctionScoreQuery extends Query {
|
|||||||
if (newQ == subQuery) {
|
if (newQ == subQuery) {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
FunctionScoreQuery bq = (FunctionScoreQuery) this.clone();
|
return new FunctionScoreQuery(newQ, function, minScore, combineFunction, maxBoost);
|
||||||
bq.subQuery = newQ;
|
|
||||||
return bq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -205,7 +202,6 @@ public class FunctionScoreQuery extends Query {
|
|||||||
public String toString(String field) {
|
public String toString(String field) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')');
|
sb.append("function score (").append(subQuery.toString(field)).append(",function=").append(function).append(')');
|
||||||
sb.append(ToStringUtils.boost(getBoost()));
|
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ public class FilterIndexOutput extends IndexOutput {
|
|||||||
protected final IndexOutput out;
|
protected final IndexOutput out;
|
||||||
|
|
||||||
public FilterIndexOutput(String resourceDescription, IndexOutput out) {
|
public FilterIndexOutput(String resourceDescription, IndexOutput out) {
|
||||||
super(resourceDescription);
|
super(resourceDescription, out.getName());
|
||||||
this.out = out;
|
this.out = out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,9 +41,9 @@ public class BigArrays {
|
|||||||
|
|
||||||
/** Page size in bytes: 16KB */
|
/** Page size in bytes: 16KB */
|
||||||
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
|
public static final int PAGE_SIZE_IN_BYTES = 1 << 14;
|
||||||
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_BYTE;
|
public static final int BYTE_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES;
|
||||||
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_INT;
|
public static final int INT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Integer.BYTES;
|
||||||
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_LONG;
|
public static final int LONG_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / Long.BYTES;
|
||||||
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
public static final int OBJECT_PAGE_SIZE = BigArrays.PAGE_SIZE_IN_BYTES / RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
|
||||||
/** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
|
/** Returns the next size to grow when working with parallel arrays that may have different page sizes or number of bytes per element. */
|
||||||
@ -490,7 +490,7 @@ public class BigArrays {
|
|||||||
if (minSize <= array.size()) {
|
if (minSize <= array.size()) {
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_BYTE);
|
final long newSize = overSize(minSize, BYTE_PAGE_SIZE, 1);
|
||||||
return resize(array, newSize);
|
return resize(array, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -573,7 +573,7 @@ public class BigArrays {
|
|||||||
if (minSize <= array.size()) {
|
if (minSize <= array.size()) {
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_INT);
|
final long newSize = overSize(minSize, INT_PAGE_SIZE, Integer.BYTES);
|
||||||
return resize(array, newSize);
|
return resize(array, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -623,7 +623,7 @@ public class BigArrays {
|
|||||||
if (minSize <= array.size()) {
|
if (minSize <= array.size()) {
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
|
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
|
||||||
return resize(array, newSize);
|
return resize(array, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -670,7 +670,7 @@ public class BigArrays {
|
|||||||
if (minSize <= array.size()) {
|
if (minSize <= array.size()) {
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
final long newSize = overSize(minSize, LONG_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_LONG);
|
final long newSize = overSize(minSize, LONG_PAGE_SIZE, Long.BYTES);
|
||||||
return resize(array, newSize);
|
return resize(array, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -717,7 +717,7 @@ public class BigArrays {
|
|||||||
if (minSize <= array.size()) {
|
if (minSize <= array.size()) {
|
||||||
return array;
|
return array;
|
||||||
}
|
}
|
||||||
final long newSize = overSize(minSize, INT_PAGE_SIZE, RamUsageEstimator.NUM_BYTES_FLOAT);
|
final long newSize = overSize(minSize, INT_PAGE_SIZE, Float.BYTES);
|
||||||
return resize(array, newSize);
|
return resize(array, newSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +127,7 @@ final class BigByteArray extends AbstractBigArray implements ByteArray {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_BYTE;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -71,7 +71,7 @@ final class BigDoubleArray extends AbstractBigArray implements DoubleArray {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT;
|
return Integer.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -71,7 +71,7 @@ final class BigFloatArray extends AbstractBigArray implements FloatArray {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_FLOAT;
|
return Float.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -88,7 +88,7 @@ final class BigIntArray extends AbstractBigArray implements IntArray {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT;
|
return Integer.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -70,7 +70,7 @@ final class BigLongArray extends AbstractBigArray implements LongArray {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_LONG;
|
return Long.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -65,7 +65,7 @@ final class BigObjectArray<T> extends AbstractBigArray implements ObjectArray<T>
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected int numBytesPerElement() {
|
protected int numBytesPerElement() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT;
|
return Integer.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
/** Change the size of this array. Content between indexes <code>0</code> and <code>min(size(), newSize)</code> will be preserved. */
|
||||||
|
@ -388,7 +388,7 @@ public class BloomFilter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_LONG * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
return Long.BYTES * data.length + RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + 16;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ public class CollectionUtils {
|
|||||||
assert indices.length >= numValues;
|
assert indices.length >= numValues;
|
||||||
if (numValues > 1) {
|
if (numValues > 1) {
|
||||||
new InPlaceMergeSorter() {
|
new InPlaceMergeSorter() {
|
||||||
final Comparator<BytesRef> comparator = BytesRef.getUTF8SortedAsUnicodeComparator();
|
final Comparator<BytesRef> comparator = Comparator.naturalOrder();
|
||||||
@Override
|
@Override
|
||||||
protected int compare(int i, int j) {
|
protected int compare(int i, int j) {
|
||||||
return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j]));
|
return comparator.compare(bytes.get(scratch, indices[i]), bytes.get(scratch1, indices[j]));
|
||||||
|
@ -116,7 +116,7 @@ public abstract class MetaDataStateFormat<T> {
|
|||||||
final Path finalStatePath = stateLocation.resolve(fileName);
|
final Path finalStatePath = stateLocation.resolve(fileName);
|
||||||
try {
|
try {
|
||||||
final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")";
|
final String resourceDesc = "MetaDataStateFormat.write(path=\"" + tmpStatePath + "\")";
|
||||||
try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
|
try (OutputStreamIndexOutput out = new OutputStreamIndexOutput(resourceDesc, fileName, Files.newOutputStream(tmpStatePath), BUFFER_SIZE)) {
|
||||||
CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
|
CodecUtil.writeHeader(out, STATE_FILE_CODEC, STATE_FILE_VERSION);
|
||||||
out.writeInt(format.index());
|
out.writeInt(format.index());
|
||||||
out.writeLong(version);
|
out.writeLong(version);
|
||||||
|
@ -20,8 +20,8 @@
|
|||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
import org.apache.lucene.analysis.ar.ArabicAnalyzer;
|
||||||
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
import org.apache.lucene.analysis.bg.BulgarianAnalyzer;
|
||||||
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
import org.apache.lucene.analysis.br.BrazilianAnalyzer;
|
||||||
@ -300,7 +300,7 @@ public class Analysis {
|
|||||||
* <p>Although most analyzers generate character terms (CharTermAttribute),
|
* <p>Although most analyzers generate character terms (CharTermAttribute),
|
||||||
* some token only contain binary terms (BinaryTermAttribute,
|
* some token only contain binary terms (BinaryTermAttribute,
|
||||||
* CharTermAttribute being a special type of BinaryTermAttribute), such as
|
* CharTermAttribute being a special type of BinaryTermAttribute), such as
|
||||||
* {@link NumericTokenStream} and unsuitable for highlighting and
|
* {@link LegacyNumericTokenStream} and unsuitable for highlighting and
|
||||||
* more-like-this queries which expect character terms.</p>
|
* more-like-this queries which expect character terms.</p>
|
||||||
*/
|
*/
|
||||||
public static boolean isCharacterTokenStream(TokenStream tokenStream) {
|
public static boolean isCharacterTokenStream(TokenStream tokenStream) {
|
||||||
|
@ -21,10 +21,8 @@ package org.elasticsearch.index.analysis;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -43,14 +41,11 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
public static final int SIDE_BACK = 2;
|
public static final int SIDE_BACK = 2;
|
||||||
private final int side;
|
private final int side;
|
||||||
|
|
||||||
private org.elasticsearch.Version esVersion;
|
|
||||||
|
|
||||||
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
public EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
|
this.minGram = settings.getAsInt("min_gram", NGramTokenFilter.DEFAULT_MIN_NGRAM_SIZE);
|
||||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
this.side = parseSide(settings.get("side", "front"));
|
this.side = parseSide(settings.get("side", "front"));
|
||||||
this.esVersion = org.elasticsearch.Version.indexCreated(indexSettings.getSettings());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int parseSide(String side) {
|
static int parseSide(String side) {
|
||||||
@ -70,15 +65,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
result = new ReverseStringFilter(result);
|
result = new ReverseStringFilter(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
|
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
|
||||||
/*
|
|
||||||
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
|
|
||||||
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
|
|
||||||
*/
|
|
||||||
result = new EdgeNGramTokenFilter(result, minGram, maxGram);
|
|
||||||
} else {
|
|
||||||
result = new Lucene43EdgeNGramTokenFilter(result, minGram, maxGram);
|
|
||||||
}
|
|
||||||
|
|
||||||
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
|
// side=BACK is not supported anymore but applying ReverseStringFilter up-front and after the token filter has the same effect
|
||||||
if (side == SIDE_BACK) {
|
if (side == SIDE_BACK) {
|
||||||
|
@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -33,55 +31,33 @@ import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenC
|
|||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
private final int minGram;
|
private final int minGram;
|
||||||
|
|
||||||
private final int maxGram;
|
private final int maxGram;
|
||||||
|
|
||||||
private final Lucene43EdgeNGramTokenizer.Side side;
|
|
||||||
|
|
||||||
private final CharMatcher matcher;
|
private final CharMatcher matcher;
|
||||||
|
|
||||||
protected org.elasticsearch.Version esVersion;
|
|
||||||
|
|
||||||
|
|
||||||
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
this.side = Lucene43EdgeNGramTokenizer.Side.getSide(settings.get("side", Lucene43EdgeNGramTokenizer.DEFAULT_SIDE.getLabel()));
|
|
||||||
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
|
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
|
||||||
this.esVersion = indexSettings.getIndexVersionCreated();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Tokenizer create() {
|
public Tokenizer create() {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
|
if (matcher == null) {
|
||||||
/*
|
return new EdgeNGramTokenizer(minGram, maxGram);
|
||||||
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
|
|
||||||
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
|
|
||||||
*/
|
|
||||||
if (side == Lucene43EdgeNGramTokenizer.Side.BACK) {
|
|
||||||
throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use"
|
|
||||||
+ " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs."
|
|
||||||
+ " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back"
|
|
||||||
+ " in combination with a \"keyword\" tokenizer");
|
|
||||||
}
|
|
||||||
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // always use 4.4 or higher
|
|
||||||
if (matcher == null) {
|
|
||||||
return new EdgeNGramTokenizer(minGram, maxGram);
|
|
||||||
} else {
|
|
||||||
return new EdgeNGramTokenizer(minGram, maxGram) {
|
|
||||||
@Override
|
|
||||||
protected boolean isTokenChar(int chr) {
|
|
||||||
return matcher.isTokenChar(chr);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return new Lucene43EdgeNGramTokenizer(side, minGram, maxGram);
|
return new EdgeNGramTokenizer(minGram, maxGram) {
|
||||||
|
@Override
|
||||||
|
protected boolean isTokenChar(int chr) {
|
||||||
|
return matcher.isTokenChar(chr);
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
|
import org.apache.lucene.analysis.miscellaneous.KeepWordFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter;
|
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.lucene.util.Version;
|
import org.apache.lucene.util.Version;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
@ -40,9 +39,6 @@ import org.elasticsearch.index.IndexSettings;
|
|||||||
* <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words
|
* <li>{@value #KEEP_WORDS_PATH_KEY} an reference to a file containing the words
|
||||||
* / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if
|
* / tokens to keep. Note: this is an alternative to {@value #KEEP_WORDS_KEY} if
|
||||||
* both are set an exception will be thrown.</li>
|
* both are set an exception will be thrown.</li>
|
||||||
* <li>{@value #ENABLE_POS_INC_KEY} <code>true</code> iff the filter should
|
|
||||||
* maintain position increments for dropped tokens. The default is
|
|
||||||
* <code>true</code>.</li>
|
|
||||||
* <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The
|
* <li>{@value #KEEP_WORDS_CASE_KEY} to use case sensitive keep words. The
|
||||||
* default is <code>false</code> which corresponds to case-sensitive.</li>
|
* default is <code>false</code> which corresponds to case-sensitive.</li>
|
||||||
* </ul>
|
* </ul>
|
||||||
@ -51,10 +47,11 @@ import org.elasticsearch.index.IndexSettings;
|
|||||||
*/
|
*/
|
||||||
public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
||||||
private final CharArraySet keepWords;
|
private final CharArraySet keepWords;
|
||||||
private final boolean enablePositionIncrements;
|
|
||||||
private static final String KEEP_WORDS_KEY = "keep_words";
|
private static final String KEEP_WORDS_KEY = "keep_words";
|
||||||
private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
|
private static final String KEEP_WORDS_PATH_KEY = KEEP_WORDS_KEY + "_path";
|
||||||
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
|
private static final String KEEP_WORDS_CASE_KEY = KEEP_WORDS_KEY + "_case"; // for javadoc
|
||||||
|
|
||||||
|
// unsupported ancient option
|
||||||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||||
|
|
||||||
public KeepWordFilterFactory(IndexSettings indexSettings,
|
public KeepWordFilterFactory(IndexSettings indexSettings,
|
||||||
@ -68,26 +65,14 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `"
|
throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `"
|
||||||
+ KEEP_WORDS_PATH_KEY + "` to be configured");
|
+ KEEP_WORDS_PATH_KEY + "` to be configured");
|
||||||
}
|
}
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) {
|
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
|
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||||
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
|
|
||||||
}
|
}
|
||||||
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
|
|
||||||
|
|
||||||
this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY);
|
this.keepWords = Analysis.getWordSet(env, settings, KEEP_WORDS_KEY);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
return new KeepWordFilter(tokenStream, keepWords);
|
||||||
return new KeepWordFilter(tokenStream, keepWords);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43KeepWordFilter(enablePositionIncrements, tokenStream, keepWords);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter;
|
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -34,28 +32,21 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
|
|
||||||
private final int min;
|
private final int min;
|
||||||
private final int max;
|
private final int max;
|
||||||
private final boolean enablePositionIncrements;
|
|
||||||
|
// ancient unsupported option
|
||||||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||||
|
|
||||||
public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
public LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
min = settings.getAsInt("min", 0);
|
min = settings.getAsInt("min", 0);
|
||||||
max = settings.getAsInt("max", Integer.MAX_VALUE);
|
max = settings.getAsInt("max", Integer.MAX_VALUE);
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) {
|
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||||
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use"
|
throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||||
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
|
|
||||||
}
|
}
|
||||||
enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
return new LengthFilter(tokenStream, min, max);
|
||||||
return new LengthFilter(tokenStream, min, max);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43LengthFilter(enablePositionIncrements, tokenStream, min, max);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,7 @@
|
|||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -44,14 +42,8 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
this.maxGram = settings.getAsInt("max_gram", NGramTokenFilter.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // we supported it since 4.3
|
return new NGramTokenFilter(tokenStream, minGram, maxGram);
|
||||||
if (version.onOrAfter(Version.LUCENE_4_3)) {
|
|
||||||
return new NGramTokenFilter(tokenStream, minGram, maxGram);
|
|
||||||
} else {
|
|
||||||
return new Lucene43NGramTokenFilter(tokenStream, minGram, maxGram);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
import org.joda.time.format.DateTimeFormatter;
|
import org.joda.time.format.DateTimeFormatter;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -30,11 +30,11 @@ import java.io.IOException;
|
|||||||
public class NumericDateTokenizer extends NumericTokenizer {
|
public class NumericDateTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException {
|
public NumericDateTokenizer(int precisionStep, char[] buffer, DateTimeFormatter dateTimeFormatter) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, dateTimeFormatter);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, dateTimeFormatter);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value));
|
tokenStream.setLongValue(((DateTimeFormatter) extra).parseMillis(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
|||||||
public class NumericDoubleTokenizer extends NumericTokenizer {
|
public class NumericDoubleTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericDoubleTokenizer(int precisionStep, char[] buffer) throws IOException {
|
public NumericDoubleTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setDoubleValue(Double.parseDouble(value));
|
tokenStream.setDoubleValue(Double.parseDouble(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
|||||||
public class NumericFloatTokenizer extends NumericTokenizer {
|
public class NumericFloatTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException {
|
public NumericFloatTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setFloatValue(Float.parseFloat(value));
|
tokenStream.setFloatValue(Float.parseFloat(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
|||||||
public class NumericIntegerTokenizer extends NumericTokenizer {
|
public class NumericIntegerTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException {
|
public NumericIntegerTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setIntValue(Integer.parseInt(value));
|
tokenStream.setIntValue(Integer.parseInt(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
@ -29,11 +29,11 @@ import java.io.IOException;
|
|||||||
public class NumericLongTokenizer extends NumericTokenizer {
|
public class NumericLongTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException {
|
public NumericLongTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setLongValue(Long.parseLong(value));
|
tokenStream.setLongValue(Long.parseLong(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.util.Attribute;
|
import org.apache.lucene.util.Attribute;
|
||||||
import org.apache.lucene.util.AttributeFactory;
|
import org.apache.lucene.util.AttributeFactory;
|
||||||
@ -45,12 +45,12 @@ public abstract class NumericTokenizer extends Tokenizer {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
private final NumericTokenStream numericTokenStream;
|
private final LegacyNumericTokenStream numericTokenStream;
|
||||||
private final char[] buffer;
|
private final char[] buffer;
|
||||||
protected final Object extra;
|
protected final Object extra;
|
||||||
private boolean started;
|
private boolean started;
|
||||||
|
|
||||||
protected NumericTokenizer(NumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
|
protected NumericTokenizer(LegacyNumericTokenStream numericTokenStream, char[] buffer, Object extra) throws IOException {
|
||||||
super(delegatingAttributeFactory(numericTokenStream));
|
super(delegatingAttributeFactory(numericTokenStream));
|
||||||
this.numericTokenStream = numericTokenStream;
|
this.numericTokenStream = numericTokenStream;
|
||||||
// Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream
|
// Add attributes from the numeric token stream, this works fine because the attribute factory delegates to numericTokenStream
|
||||||
@ -95,5 +95,5 @@ public abstract class NumericTokenizer extends Tokenizer {
|
|||||||
numericTokenStream.close();
|
numericTokenStream.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected abstract void setValue(NumericTokenStream tokenStream, String value);
|
protected abstract void setValue(LegacyNumericTokenStream tokenStream, String value);
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,6 @@ import org.apache.lucene.analysis.en.EnglishPossessiveFilter;
|
|||||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
|
||||||
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
|
import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter;
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.lucene.util.Version;
|
import org.apache.lucene.util.Version;
|
||||||
@ -64,12 +63,7 @@ public final class SnowballAnalyzer extends Analyzer {
|
|||||||
and a {@link SnowballFilter} */
|
and a {@link SnowballFilter} */
|
||||||
@Override
|
@Override
|
||||||
public TokenStreamComponents createComponents(String fieldName) {
|
public TokenStreamComponents createComponents(String fieldName) {
|
||||||
final Tokenizer tokenizer;
|
final Tokenizer tokenizer = new StandardTokenizer();
|
||||||
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
|
|
||||||
tokenizer = new StandardTokenizer();
|
|
||||||
} else {
|
|
||||||
tokenizer = new StandardTokenizer40();
|
|
||||||
}
|
|
||||||
TokenStream result = tokenizer;
|
TokenStream result = tokenizer;
|
||||||
// remove the possessive 's for english stemmers
|
// remove the possessive 's for english stemmers
|
||||||
if (name.equals("English") || name.equals("Porter") || name.equals("Lovins"))
|
if (name.equals("English") || name.equals("Porter") || name.equals("Lovins"))
|
||||||
|
@ -26,10 +26,8 @@ import org.apache.lucene.analysis.core.StopAnalyzer;
|
|||||||
import org.apache.lucene.analysis.core.StopFilter;
|
import org.apache.lucene.analysis.core.StopFilter;
|
||||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
|
import org.apache.lucene.analysis.util.StopwordAnalyzerBase;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
|
|
||||||
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
||||||
|
|
||||||
@ -47,12 +45,7 @@ public class StandardHtmlStripAnalyzer extends StopwordAnalyzerBase {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected TokenStreamComponents createComponents(final String fieldName) {
|
protected TokenStreamComponents createComponents(final String fieldName) {
|
||||||
final Tokenizer src;
|
final Tokenizer src = new StandardTokenizer();
|
||||||
if (getVersion().onOrAfter(Version.LUCENE_4_7_0)) {
|
|
||||||
src = new StandardTokenizer();
|
|
||||||
} else {
|
|
||||||
src = new StandardTokenizer40();
|
|
||||||
}
|
|
||||||
TokenStream tok = new StandardFilter(src);
|
TokenStream tok = new StandardFilter(src);
|
||||||
tok = new LowerCaseFilter(tok);
|
tok = new LowerCaseFilter(tok);
|
||||||
if (!stopwords.isEmpty()) {
|
if (!stopwords.isEmpty()) {
|
||||||
|
@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
|
|||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -41,14 +39,8 @@ public class StandardTokenizerFactory extends AbstractTokenizerFactory {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Tokenizer create() {
|
public Tokenizer create() {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_7_0)) {
|
StandardTokenizer tokenizer = new StandardTokenizer();
|
||||||
StandardTokenizer tokenizer = new StandardTokenizer();
|
tokenizer.setMaxTokenLength(maxTokenLength);
|
||||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
return tokenizer;
|
||||||
return tokenizer;
|
|
||||||
} else {
|
|
||||||
StandardTokenizer40 tokenizer = new StandardTokenizer40();
|
|
||||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
|
||||||
return tokenizer;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.core.Lucene43StopFilter;
|
|
||||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||||
import org.apache.lucene.analysis.core.StopFilter;
|
import org.apache.lucene.analysis.core.StopFilter;
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
@ -42,7 +41,6 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
|
|
||||||
private final boolean ignoreCase;
|
private final boolean ignoreCase;
|
||||||
|
|
||||||
private final boolean enablePositionIncrements;
|
|
||||||
private final boolean removeTrailing;
|
private final boolean removeTrailing;
|
||||||
|
|
||||||
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
public StopTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||||
@ -50,21 +48,15 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||||
this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
|
this.removeTrailing = settings.getAsBoolean("remove_trailing", true);
|
||||||
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) {
|
if (settings.get("enable_position_increments") != null) {
|
||||||
throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams."
|
throw new IllegalArgumentException("enable_position_increments is not supported anymore. Please fix your analysis chain");
|
||||||
+ " Please fix your analysis chain or use an older compatibility version (<= 4.3).");
|
|
||||||
}
|
}
|
||||||
this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (removeTrailing) {
|
if (removeTrailing) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4)) {
|
return new StopFilter(tokenStream, stopWords);
|
||||||
return new StopFilter(tokenStream, stopWords);
|
|
||||||
} else {
|
|
||||||
return new Lucene43StopFilter(enablePositionIncrements, tokenStream, stopWords);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return new SuggestStopFilter(tokenStream, stopWords);
|
return new SuggestStopFilter(tokenStream, stopWords);
|
||||||
}
|
}
|
||||||
|
@ -20,9 +20,7 @@
|
|||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter;
|
|
||||||
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -32,26 +30,17 @@ import org.elasticsearch.index.IndexSettings;
|
|||||||
*/
|
*/
|
||||||
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
|
public class TrimTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||||
|
|
||||||
private final boolean updateOffsets;
|
|
||||||
private static final String UPDATE_OFFSETS_KEY = "update_offsets";
|
private static final String UPDATE_OFFSETS_KEY = "update_offsets";
|
||||||
|
|
||||||
public TrimTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
public TrimTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) {
|
if (settings.get(UPDATE_OFFSETS_KEY) != null) {
|
||||||
throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use"
|
throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain");
|
||||||
+ " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs.");
|
|
||||||
}
|
}
|
||||||
this.updateOffsets = settings.getAsBoolean("update_offsets", false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4_0)) {
|
return new TrimFilter(tokenStream);
|
||||||
return new TrimFilter(tokenStream);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43TrimFilter(tokenStream, updateOffsets);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,8 +22,6 @@ package org.elasticsearch.index.analysis;
|
|||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.std40.UAX29URLEmailTokenizer40;
|
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -42,14 +40,8 @@ public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Tokenizer create() {
|
public Tokenizer create() {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_7)) {
|
UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer();
|
||||||
UAX29URLEmailTokenizer tokenizer = new UAX29URLEmailTokenizer();
|
tokenizer.setMaxTokenLength(maxTokenLength);
|
||||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
return tokenizer;
|
||||||
return tokenizer;
|
|
||||||
} else {
|
|
||||||
UAX29URLEmailTokenizer40 tokenizer = new UAX29URLEmailTokenizer40();
|
|
||||||
tokenizer.setMaxTokenLength(maxTokenLength);
|
|
||||||
return tokenizer;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -20,11 +20,9 @@
|
|||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene47WordDelimiterFilter;
|
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -96,17 +94,10 @@ public class WordDelimiterTokenFilterFactory extends AbstractTokenFilterFactory
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_8)) {
|
return new WordDelimiterFilter(tokenStream,
|
||||||
return new WordDelimiterFilter(tokenStream,
|
|
||||||
charTypeTable,
|
charTypeTable,
|
||||||
flags,
|
flags,
|
||||||
protoWords);
|
protoWords);
|
||||||
} else {
|
|
||||||
return new Lucene47WordDelimiterFilter(tokenStream,
|
|
||||||
charTypeTable,
|
|
||||||
flags,
|
|
||||||
protoWords);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
public int getFlag(int flag, Settings settings, String key, boolean defaultValue) {
|
||||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.index.analysis.compound;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter;
|
import org.apache.lucene.analysis.compound.DictionaryCompoundWordTokenFilter;
|
||||||
import org.apache.lucene.analysis.compound.Lucene43DictionaryCompoundWordTokenFilter;
|
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -41,12 +39,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends AbstractCompoundWo
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4_0)) {
|
return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize,
|
||||||
return new DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize,
|
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
||||||
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
|
||||||
} else {
|
|
||||||
return new Lucene43DictionaryCompoundWordTokenFilter(tokenStream, wordList, minWordSize,
|
|
||||||
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,9 +21,7 @@ package org.elasticsearch.index.analysis.compound;
|
|||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter;
|
import org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter;
|
||||||
import org.apache.lucene.analysis.compound.Lucene43HyphenationCompoundWordTokenFilter;
|
|
||||||
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
|
import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
@ -60,12 +58,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream) {
|
public TokenStream create(TokenStream tokenStream) {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_4_0)) {
|
return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize,
|
||||||
return new HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize,
|
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
||||||
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
|
||||||
} else {
|
|
||||||
return new Lucene43HyphenationCompoundWordTokenFilter(tokenStream, hyphenationTree, wordList, minWordSize,
|
|
||||||
minSubwordSize, maxSubwordSize, onlyLongestMatch);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.codec;
|
|||||||
import org.apache.lucene.codecs.Codec;
|
import org.apache.lucene.codecs.Codec;
|
||||||
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
|
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
|
||||||
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
|
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
|
||||||
|
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
@ -47,8 +48,8 @@ public class CodecService {
|
|||||||
public CodecService(@Nullable MapperService mapperService, ESLogger logger) {
|
public CodecService(@Nullable MapperService mapperService, ESLogger logger) {
|
||||||
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
|
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
|
||||||
if (mapperService == null) {
|
if (mapperService == null) {
|
||||||
codecs.put(DEFAULT_CODEC, new Lucene54Codec());
|
codecs.put(DEFAULT_CODEC, new Lucene60Codec());
|
||||||
codecs.put(BEST_COMPRESSION_CODEC, new Lucene54Codec(Mode.BEST_COMPRESSION));
|
codecs.put(BEST_COMPRESSION_CODEC, new Lucene60Codec(Mode.BEST_COMPRESSION));
|
||||||
} else {
|
} else {
|
||||||
codecs.put(DEFAULT_CODEC,
|
codecs.put(DEFAULT_CODEC,
|
||||||
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));
|
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));
|
||||||
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.codec;
|
|||||||
import org.apache.lucene.codecs.Codec;
|
import org.apache.lucene.codecs.Codec;
|
||||||
import org.apache.lucene.codecs.PostingsFormat;
|
import org.apache.lucene.codecs.PostingsFormat;
|
||||||
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
|
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
|
||||||
import org.apache.lucene.codecs.lucene54.Lucene54Codec;
|
import org.apache.lucene.codecs.lucene60.Lucene60Codec;
|
||||||
import org.elasticsearch.common.logging.ESLogger;
|
import org.elasticsearch.common.logging.ESLogger;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.core.CompletionFieldMapper;
|
|||||||
* configured for a specific field the default postings format is used.
|
* configured for a specific field the default postings format is used.
|
||||||
*/
|
*/
|
||||||
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
|
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
|
||||||
public class PerFieldMappingPostingFormatCodec extends Lucene54Codec {
|
public class PerFieldMappingPostingFormatCodec extends Lucene60Codec {
|
||||||
private final ESLogger logger;
|
private final ESLogger logger;
|
||||||
private final MapperService mapperService;
|
private final MapperService mapperService;
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.engine;
|
package org.elasticsearch.index.engine;
|
||||||
|
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
|
|
||||||
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
|
/** Holds a deleted version, which just adds a timestamp to {@link VersionValue} so we know when we can expire the deletion. */
|
||||||
@ -44,6 +43,6 @@ class DeleteVersionValue extends VersionValue {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return super.ramBytesUsed() + RamUsageEstimator.NUM_BYTES_LONG;
|
return super.ramBytesUsed() + Long.BYTES;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ public class InternalEngine extends Engine {
|
|||||||
SearcherManager searcherManager = null;
|
SearcherManager searcherManager = null;
|
||||||
try {
|
try {
|
||||||
try {
|
try {
|
||||||
final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter, true), shardId);
|
final DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
|
||||||
searcherManager = new SearcherManager(directoryReader, searcherFactory);
|
searcherManager = new SearcherManager(directoryReader, searcherFactory);
|
||||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||||
success = true;
|
success = true;
|
||||||
|
@ -64,7 +64,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
|
|||||||
*
|
*
|
||||||
* NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */
|
* NUM_BYTES_OBJECT_HEADER + 2*NUM_BYTES_INT + NUM_BYTES_OBJECT_REF + NUM_BYTES_ARRAY_HEADER [ + bytes.length] */
|
||||||
private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
|
private static final int BASE_BYTES_PER_BYTESREF = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
|
||||||
2*RamUsageEstimator.NUM_BYTES_INT +
|
2*Integer.BYTES +
|
||||||
RamUsageEstimator.NUM_BYTES_OBJECT_REF +
|
RamUsageEstimator.NUM_BYTES_OBJECT_REF +
|
||||||
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
|
RamUsageEstimator.NUM_BYTES_ARRAY_HEADER;
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
|
|||||||
* CHM's pointer to CHM.Entry, double for approx load factor:
|
* CHM's pointer to CHM.Entry, double for approx load factor:
|
||||||
* + 2*NUM_BYTES_OBJECT_REF */
|
* + 2*NUM_BYTES_OBJECT_REF */
|
||||||
private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
|
private static final int BASE_BYTES_PER_CHM_ENTRY = RamUsageEstimator.NUM_BYTES_OBJECT_HEADER +
|
||||||
RamUsageEstimator.NUM_BYTES_INT +
|
Integer.BYTES +
|
||||||
5*RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
5*RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
|
||||||
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account
|
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account
|
||||||
|
@ -54,7 +54,7 @@ class VersionValue implements Accountable {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed();
|
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + Long.BYTES + RamUsageEstimator.NUM_BYTES_OBJECT_REF + translogLocation.ramBytesUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* A list of per-document binary values, sorted
|
* A list of per-document binary values, sorted
|
||||||
* according to {@link BytesRef#getUTF8SortedAsUnicodeComparator()}.
|
* according to {@link BytesRef#compareTo(BytesRef)}.
|
||||||
* There might be dups however.
|
* There might be dups however.
|
||||||
*/
|
*/
|
||||||
public abstract class SortedBinaryDocValues {
|
public abstract class SortedBinaryDocValues {
|
||||||
|
@ -30,8 +30,8 @@ import org.apache.lucene.util.BitSet;
|
|||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefIterator;
|
import org.apache.lucene.util.BytesRefIterator;
|
||||||
import org.apache.lucene.util.FixedBitSet;
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.apache.lucene.util.LongsRef;
|
import org.apache.lucene.util.LongsRef;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
|
||||||
import org.apache.lucene.util.packed.GrowableWriter;
|
import org.apache.lucene.util.packed.GrowableWriter;
|
||||||
import org.apache.lucene.util.packed.PackedInts;
|
import org.apache.lucene.util.packed.PackedInts;
|
||||||
import org.apache.lucene.util.packed.PagedGrowableWriter;
|
import org.apache.lucene.util.packed.PagedGrowableWriter;
|
||||||
@ -459,7 +459,7 @@ public final class OrdinalsBuilder implements Closeable {
|
|||||||
@Override
|
@Override
|
||||||
protected AcceptStatus accept(BytesRef term) throws IOException {
|
protected AcceptStatus accept(BytesRef term) throws IOException {
|
||||||
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
|
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
|
||||||
return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
|
return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -475,7 +475,7 @@ public final class OrdinalsBuilder implements Closeable {
|
|||||||
@Override
|
@Override
|
||||||
protected AcceptStatus accept(BytesRef term) throws IOException {
|
protected AcceptStatus accept(BytesRef term) throws IOException {
|
||||||
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
|
// we stop accepting terms once we moved across the prefix codec terms - redundant values!
|
||||||
return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
|
return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import org.apache.lucene.spatial.util.GeoEncodingUtils;
|
|||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefIterator;
|
import org.apache.lucene.util.BytesRefIterator;
|
||||||
import org.apache.lucene.util.CharsRefBuilder;
|
import org.apache.lucene.util.CharsRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
@ -62,7 +62,7 @@ abstract class AbstractIndexGeoPointFieldData extends AbstractIndexFieldData<Ato
|
|||||||
if (termEncoding == GeoPointField.TermEncoding.PREFIX) {
|
if (termEncoding == GeoPointField.TermEncoding.PREFIX) {
|
||||||
return GeoEncodingUtils.prefixCodedToGeoCoded(term);
|
return GeoEncodingUtils.prefixCodedToGeoCoded(term);
|
||||||
} else if (termEncoding == GeoPointField.TermEncoding.NUMERIC) {
|
} else if (termEncoding == GeoPointField.TermEncoding.NUMERIC) {
|
||||||
return NumericUtils.prefixCodedToLong(term);
|
return LegacyNumericUtils.prefixCodedToLong(term);
|
||||||
}
|
}
|
||||||
throw new IllegalArgumentException("GeoPoint.TermEncoding should be one of: " + GeoPointField.TermEncoding.PREFIX
|
throw new IllegalArgumentException("GeoPoint.TermEncoding should be one of: " + GeoPointField.TermEncoding.PREFIX
|
||||||
+ " or " + GeoPointField.TermEncoding.NUMERIC + " found: " + termEncoding);
|
+ " or " + GeoPointField.TermEncoding.NUMERIC + " found: " + termEncoding);
|
||||||
|
@ -24,7 +24,6 @@ import org.apache.lucene.index.SortedDocValues;
|
|||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.apache.lucene.util.Accountables;
|
import org.apache.lucene.util.Accountables;
|
||||||
import org.apache.lucene.util.BitSet;
|
import org.apache.lucene.util.BitSet;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.util.LongArray;
|
import org.elasticsearch.common.util.LongArray;
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
import org.elasticsearch.index.fielddata.FieldData;
|
||||||
@ -59,7 +58,7 @@ public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPoin
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT + indexedPoints.ramBytesUsed();
|
return Integer.BYTES + indexedPoints.ramBytesUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -117,7 +116,7 @@ public abstract class GeoPointArrayAtomicFieldData extends AbstractAtomicGeoPoin
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT + indexedPoint.ramBytesUsed()
|
return Integer.BYTES + indexedPoint.ramBytesUsed()
|
||||||
+ (set == null ? 0 : set.ramBytesUsed());
|
+ (set == null ? 0 : set.ramBytesUsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,6 @@ import org.apache.lucene.index.SortedDocValues;
|
|||||||
import org.apache.lucene.util.Accountable;
|
import org.apache.lucene.util.Accountable;
|
||||||
import org.apache.lucene.util.Accountables;
|
import org.apache.lucene.util.Accountables;
|
||||||
import org.apache.lucene.util.BitSet;
|
import org.apache.lucene.util.BitSet;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.geo.GeoPoint;
|
import org.elasticsearch.common.geo.GeoPoint;
|
||||||
import org.elasticsearch.common.util.DoubleArray;
|
import org.elasticsearch.common.util.DoubleArray;
|
||||||
import org.elasticsearch.index.fielddata.FieldData;
|
import org.elasticsearch.index.fielddata.FieldData;
|
||||||
@ -61,7 +60,7 @@ public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicG
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed();
|
return Integer.BYTES/*size*/ + lon.ramBytesUsed() + lat.ramBytesUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -132,7 +131,7 @@ public abstract class GeoPointArrayLegacyAtomicFieldData extends AbstractAtomicG
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_INT + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
|
return Integer.BYTES + lon.ramBytesUsed() + lat.ramBytesUsed() + (set == null ? 0 : set.ramBytesUsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -22,6 +22,10 @@ package org.elasticsearch.index.mapper;
|
|||||||
import com.carrotsearch.hppc.ObjectObjectHashMap;
|
import com.carrotsearch.hppc.ObjectObjectHashMap;
|
||||||
import com.carrotsearch.hppc.ObjectObjectMap;
|
import com.carrotsearch.hppc.ObjectObjectMap;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
|
import org.apache.lucene.document.LegacyIntField;
|
||||||
|
import org.apache.lucene.document.LegacyLongField;
|
||||||
|
import org.apache.lucene.document.LegacyFloatField;
|
||||||
|
import org.apache.lucene.document.LegacyDoubleField;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexableField;
|
import org.apache.lucene.index.IndexableField;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
@ -128,8 +132,8 @@ public abstract class ParseContext {
|
|||||||
* Returns an array of values of the field specified as the method parameter.
|
* Returns an array of values of the field specified as the method parameter.
|
||||||
* This method returns an empty array when there are no
|
* This method returns an empty array when there are no
|
||||||
* matching fields. It never returns null.
|
* matching fields. It never returns null.
|
||||||
* For {@link org.apache.lucene.document.IntField}, {@link org.apache.lucene.document.LongField}, {@link
|
* For {@link org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link
|
||||||
* org.apache.lucene.document.FloatField} and {@link org.apache.lucene.document.DoubleField} it returns the string value of the number.
|
* org.apache.lucene.document.LegacyFloatField} and {@link org.apache.lucene.document.LegacyDoubleField} it returns the string value of the number.
|
||||||
* If you want the actual numeric field instances back, use {@link #getFields}.
|
* If you want the actual numeric field instances back, use {@link #getFields}.
|
||||||
* @param name the name of the field
|
* @param name the name of the field
|
||||||
* @return a <code>String[]</code> of field values
|
* @return a <code>String[]</code> of field values
|
||||||
|
@ -23,11 +23,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
@ -116,7 +116,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
static final class ByteFieldType extends NumberFieldType {
|
static final class ByteFieldType extends NumberFieldType {
|
||||||
public ByteFieldType() {
|
public ByteFieldType() {
|
||||||
super(NumericType.INT);
|
super(LegacyNumericType.INT);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected ByteFieldType(ByteFieldType ref) {
|
protected ByteFieldType(ByteFieldType ref) {
|
||||||
@ -155,13 +155,13 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : (int)parseValue(lowerTerm),
|
lowerTerm == null ? null : (int)parseValue(lowerTerm),
|
||||||
upperTerm == null ? null : (int)parseValue(upperTerm),
|
upperTerm == null ? null : (int)parseValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -171,7 +171,7 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
byte iValue = parseValue(value);
|
byte iValue = parseValue(value);
|
||||||
byte iSim = fuzziness.asByte();
|
byte iSim = fuzziness.asByte();
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -179,8 +179,8 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
long minValue = NumericUtils.getMinInt(terms);
|
long minValue = LegacyNumericUtils.getMinInt(terms);
|
||||||
long maxValue = NumericUtils.getMaxInt(terms);
|
long maxValue = LegacyNumericUtils.getMaxInt(terms);
|
||||||
return new FieldStats.Long(
|
return new FieldStats.Long(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
|
@ -23,12 +23,11 @@ import org.apache.lucene.document.Field;
|
|||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.apache.lucene.util.ToStringUtils;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
@ -243,7 +242,6 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
.append(" TO ")
|
.append(" TO ")
|
||||||
.append((upperTerm == null) ? "*" : upperTerm.toString())
|
.append((upperTerm == null) ? "*" : upperTerm.toString())
|
||||||
.append(includeUpper ? ']' : '}')
|
.append(includeUpper ? ']' : '}')
|
||||||
.append(ToStringUtils.boost(getBoost()))
|
|
||||||
.toString();
|
.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -253,7 +251,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter);
|
protected DateMathParser dateMathParser = new DateMathParser(dateTimeFormatter);
|
||||||
|
|
||||||
public DateFieldType() {
|
public DateFieldType() {
|
||||||
super(NumericType.LONG);
|
super(LegacyNumericType.LONG);
|
||||||
setFieldDataType(new FieldDataType("long"));
|
setFieldDataType(new FieldDataType("long"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,7 +358,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,7 +390,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
// not a time format
|
// not a time format
|
||||||
iSim = fuzziness.asLong();
|
iSim = fuzziness.asLong();
|
||||||
}
|
}
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -400,8 +398,8 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
long minValue = NumericUtils.getMinLong(terms);
|
long minValue = LegacyNumericUtils.getMinLong(terms);
|
||||||
long maxValue = NumericUtils.getMaxLong(terms);
|
long maxValue = LegacyNumericUtils.getMaxLong(terms);
|
||||||
return new FieldStats.Date(
|
return new FieldStats.Date(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter()
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter()
|
||||||
);
|
);
|
||||||
@ -412,7 +410,7 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) {
|
private Query innerRangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser) {
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
|
lowerTerm == null ? null : parseToMilliseconds(lowerTerm, !includeLower, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
|
||||||
upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
|
upperTerm == null ? null : parseToMilliseconds(upperTerm, includeUpper, timeZone, forcedDateParser == null ? dateMathParser : forcedDateParser),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
|
@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.NumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
@ -49,7 +50,6 @@ import java.util.Iterator;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.apache.lucene.util.NumericUtils.doubleToSortableLong;
|
|
||||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
|
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeDoubleValue;
|
||||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||||
|
|
||||||
@ -118,7 +118,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
public static final class DoubleFieldType extends NumberFieldType {
|
public static final class DoubleFieldType extends NumberFieldType {
|
||||||
|
|
||||||
public DoubleFieldType() {
|
public DoubleFieldType() {
|
||||||
super(NumericType.DOUBLE);
|
super(LegacyNumericType.DOUBLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected DoubleFieldType(DoubleFieldType ref) {
|
protected DoubleFieldType(DoubleFieldType ref) {
|
||||||
@ -158,13 +158,13 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value));
|
long longValue = NumericUtils.doubleToSortableLong(parseDoubleValue(value));
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.longToPrefixCoded(longValue, 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseDoubleValue(lowerTerm),
|
lowerTerm == null ? null : parseDoubleValue(lowerTerm),
|
||||||
upperTerm == null ? null : parseDoubleValue(upperTerm),
|
upperTerm == null ? null : parseDoubleValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -174,7 +174,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
double iValue = parseDoubleValue(value);
|
double iValue = parseDoubleValue(value);
|
||||||
double iSim = fuzziness.asDouble();
|
double iSim = fuzziness.asDouble();
|
||||||
return NumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newDoubleRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -182,8 +182,8 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
double minValue = NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms));
|
double minValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms));
|
||||||
double maxValue = NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms));
|
double maxValue = NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms));
|
||||||
return new FieldStats.Double(
|
return new FieldStats.Double(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
@ -284,7 +284,7 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||||||
fields.add(field);
|
fields.add(field);
|
||||||
}
|
}
|
||||||
if (fieldType().hasDocValues()) {
|
if (fieldType().hasDocValues()) {
|
||||||
addDocValue(context, fields, doubleToSortableLong(value));
|
addDocValue(context, fields, NumericUtils.doubleToSortableLong(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,10 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.NumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
@ -50,7 +51,6 @@ import java.util.Iterator;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.apache.lucene.util.NumericUtils.floatToSortableInt;
|
|
||||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
|
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeFloatValue;
|
||||||
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
static final class FloatFieldType extends NumberFieldType {
|
static final class FloatFieldType extends NumberFieldType {
|
||||||
|
|
||||||
public FloatFieldType() {
|
public FloatFieldType() {
|
||||||
super(NumericType.FLOAT);
|
super(LegacyNumericType.FLOAT);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected FloatFieldType(FloatFieldType ref) {
|
protected FloatFieldType(FloatFieldType ref) {
|
||||||
@ -159,13 +159,13 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
int intValue = NumericUtils.floatToSortableInt(parseValue(value));
|
int intValue = NumericUtils.floatToSortableInt(parseValue(value));
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.intToPrefixCoded(intValue, 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseValue(lowerTerm),
|
lowerTerm == null ? null : parseValue(lowerTerm),
|
||||||
upperTerm == null ? null : parseValue(upperTerm),
|
upperTerm == null ? null : parseValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -175,7 +175,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
float iValue = parseValue(value);
|
float iValue = parseValue(value);
|
||||||
final float iSim = fuzziness.asFloat();
|
final float iSim = fuzziness.asFloat();
|
||||||
return NumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newFloatRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -183,8 +183,8 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
float minValue = NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms));
|
float minValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms));
|
||||||
float maxValue = NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms));
|
float maxValue = NumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms));
|
||||||
return new FieldStats.Float(
|
return new FieldStats.Float(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
@ -296,7 +296,7 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||||||
fields.add(field);
|
fields.add(field);
|
||||||
}
|
}
|
||||||
if (fieldType().hasDocValues()) {
|
if (fieldType().hasDocValues()) {
|
||||||
addDocValue(context, fields, floatToSortableInt(value));
|
addDocValue(context, fields, NumericUtils.floatToSortableInt(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
@ -124,7 +124,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||||||
public static final class IntegerFieldType extends NumberFieldType {
|
public static final class IntegerFieldType extends NumberFieldType {
|
||||||
|
|
||||||
public IntegerFieldType() {
|
public IntegerFieldType() {
|
||||||
super(NumericType.INT);
|
super(LegacyNumericType.INT);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected IntegerFieldType(IntegerFieldType ref) {
|
protected IntegerFieldType(IntegerFieldType ref) {
|
||||||
@ -164,13 +164,13 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseValue(lowerTerm),
|
lowerTerm == null ? null : parseValue(lowerTerm),
|
||||||
upperTerm == null ? null : parseValue(upperTerm),
|
upperTerm == null ? null : parseValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -180,7 +180,7 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
int iValue = parseValue(value);
|
int iValue = parseValue(value);
|
||||||
int iSim = fuzziness.asInt();
|
int iSim = fuzziness.asInt();
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -188,8 +188,8 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
long minValue = NumericUtils.getMinInt(terms);
|
long minValue = LegacyNumericUtils.getMinInt(terms);
|
||||||
long maxValue = NumericUtils.getMaxInt(terms);
|
long maxValue = LegacyNumericUtils.getMaxInt(terms);
|
||||||
return new FieldStats.Long(
|
return new FieldStats.Long(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
|
@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
@ -123,7 +123,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||||||
public static class LongFieldType extends NumberFieldType {
|
public static class LongFieldType extends NumberFieldType {
|
||||||
|
|
||||||
public LongFieldType() {
|
public LongFieldType() {
|
||||||
super(NumericType.LONG);
|
super(LegacyNumericType.LONG);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected LongFieldType(LongFieldType ref) {
|
protected LongFieldType(LongFieldType ref) {
|
||||||
@ -162,13 +162,13 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.longToPrefixCoded(parseLongValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseLongValue(lowerTerm),
|
lowerTerm == null ? null : parseLongValue(lowerTerm),
|
||||||
upperTerm == null ? null : parseLongValue(upperTerm),
|
upperTerm == null ? null : parseLongValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -178,7 +178,7 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
long iValue = parseLongValue(value);
|
long iValue = parseLongValue(value);
|
||||||
final long iSim = fuzziness.asLong();
|
final long iSim = fuzziness.asLong();
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -186,8 +186,8 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
long minValue = NumericUtils.getMinLong(terms);
|
long minValue = LegacyNumericUtils.getMinLong(terms);
|
||||||
long maxValue = NumericUtils.getMaxLong(terms);
|
long maxValue = LegacyNumericUtils.getMaxLong(terms);
|
||||||
return new FieldStats.Long(
|
return new FieldStats.Long(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
package org.elasticsearch.index.mapper.core;
|
package org.elasticsearch.index.mapper.core;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.document.FieldType;
|
import org.apache.lucene.document.FieldType;
|
||||||
@ -129,7 +129,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
|
|
||||||
public static abstract class NumberFieldType extends MappedFieldType {
|
public static abstract class NumberFieldType extends MappedFieldType {
|
||||||
|
|
||||||
public NumberFieldType(NumericType numericType) {
|
public NumberFieldType(LegacyNumericType numericType) {
|
||||||
setTokenized(false);
|
setTokenized(false);
|
||||||
setOmitNorms(true);
|
setOmitNorms(true);
|
||||||
setIndexOptions(IndexOptions.DOCS);
|
setIndexOptions(IndexOptions.DOCS);
|
||||||
@ -295,38 +295,38 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
// used to we can use a numeric field in a document that is then parsed twice!
|
// used to we can use a numeric field in a document that is then parsed twice!
|
||||||
public abstract static class CustomNumericField extends Field {
|
public abstract static class CustomNumericField extends Field {
|
||||||
|
|
||||||
private ThreadLocal<NumericTokenStream> tokenStream = new ThreadLocal<NumericTokenStream>() {
|
private ThreadLocal<LegacyNumericTokenStream> tokenStream = new ThreadLocal<LegacyNumericTokenStream>() {
|
||||||
@Override
|
@Override
|
||||||
protected NumericTokenStream initialValue() {
|
protected LegacyNumericTokenStream initialValue() {
|
||||||
return new NumericTokenStream(fieldType().numericPrecisionStep());
|
return new LegacyNumericTokenStream(fieldType().numericPrecisionStep());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private static ThreadLocal<NumericTokenStream> tokenStream4 = new ThreadLocal<NumericTokenStream>() {
|
private static ThreadLocal<LegacyNumericTokenStream> tokenStream4 = new ThreadLocal<LegacyNumericTokenStream>() {
|
||||||
@Override
|
@Override
|
||||||
protected NumericTokenStream initialValue() {
|
protected LegacyNumericTokenStream initialValue() {
|
||||||
return new NumericTokenStream(4);
|
return new LegacyNumericTokenStream(4);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private static ThreadLocal<NumericTokenStream> tokenStream8 = new ThreadLocal<NumericTokenStream>() {
|
private static ThreadLocal<LegacyNumericTokenStream> tokenStream8 = new ThreadLocal<LegacyNumericTokenStream>() {
|
||||||
@Override
|
@Override
|
||||||
protected NumericTokenStream initialValue() {
|
protected LegacyNumericTokenStream initialValue() {
|
||||||
return new NumericTokenStream(8);
|
return new LegacyNumericTokenStream(8);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private static ThreadLocal<NumericTokenStream> tokenStream16 = new ThreadLocal<NumericTokenStream>() {
|
private static ThreadLocal<LegacyNumericTokenStream> tokenStream16 = new ThreadLocal<LegacyNumericTokenStream>() {
|
||||||
@Override
|
@Override
|
||||||
protected NumericTokenStream initialValue() {
|
protected LegacyNumericTokenStream initialValue() {
|
||||||
return new NumericTokenStream(16);
|
return new LegacyNumericTokenStream(16);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private static ThreadLocal<NumericTokenStream> tokenStreamMax = new ThreadLocal<NumericTokenStream>() {
|
private static ThreadLocal<LegacyNumericTokenStream> tokenStreamMax = new ThreadLocal<LegacyNumericTokenStream>() {
|
||||||
@Override
|
@Override
|
||||||
protected NumericTokenStream initialValue() {
|
protected LegacyNumericTokenStream initialValue() {
|
||||||
return new NumericTokenStream(Integer.MAX_VALUE);
|
return new LegacyNumericTokenStream(Integer.MAX_VALUE);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -337,7 +337,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected NumericTokenStream getCachedStream() {
|
protected LegacyNumericTokenStream getCachedStream() {
|
||||||
if (fieldType().numericPrecisionStep() == 4) {
|
if (fieldType().numericPrecisionStep() == 4) {
|
||||||
return tokenStream4.get();
|
return tokenStream4.get();
|
||||||
} else if (fieldType().numericPrecisionStep() == 8) {
|
} else if (fieldType().numericPrecisionStep() == 8) {
|
||||||
|
@ -24,11 +24,11 @@ import org.apache.lucene.analysis.TokenStream;
|
|||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.index.Terms;
|
import org.apache.lucene.index.Terms;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
@ -121,7 +121,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||||||
static final class ShortFieldType extends NumberFieldType {
|
static final class ShortFieldType extends NumberFieldType {
|
||||||
|
|
||||||
public ShortFieldType() {
|
public ShortFieldType() {
|
||||||
super(NumericType.INT);
|
super(LegacyNumericType.INT);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected ShortFieldType(ShortFieldType ref) {
|
protected ShortFieldType(ShortFieldType ref) {
|
||||||
@ -160,13 +160,13 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.intToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : (int)parseValue(lowerTerm),
|
lowerTerm == null ? null : (int)parseValue(lowerTerm),
|
||||||
upperTerm == null ? null : (int)parseValue(upperTerm),
|
upperTerm == null ? null : (int)parseValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -176,7 +176,7 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||||||
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
public Query fuzzyQuery(Object value, Fuzziness fuzziness, int prefixLength, int maxExpansions, boolean transpositions) {
|
||||||
short iValue = parseValue(value);
|
short iValue = parseValue(value);
|
||||||
short iSim = fuzziness.asShort();
|
short iSim = fuzziness.asShort();
|
||||||
return NumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newIntRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -184,8 +184,8 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
public FieldStats stats(Terms terms, int maxDoc) throws IOException {
|
||||||
long minValue = NumericUtils.getMinInt(terms);
|
long minValue = LegacyNumericUtils.getMinInt(terms);
|
||||||
long maxValue = NumericUtils.getMaxInt(terms);
|
long maxValue = LegacyNumericUtils.getMaxInt(terms);
|
||||||
return new FieldStats.Long(
|
return new FieldStats.Long(
|
||||||
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue
|
||||||
);
|
);
|
||||||
|
@ -21,7 +21,7 @@ package org.elasticsearch.index.mapper.geo;
|
|||||||
|
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.spatial.util.GeoHashUtils;
|
import org.apache.lucene.spatial.util.GeoHashUtils;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
@ -483,7 +483,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr
|
|||||||
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
|
if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) {
|
||||||
builder.field("lat_lon", fieldType().isLatLonEnabled());
|
builder.field("lat_lon", fieldType().isLatLonEnabled());
|
||||||
}
|
}
|
||||||
if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != NumericUtils.PRECISION_STEP_DEFAULT)) {
|
if (fieldType().isLatLonEnabled() && (includeDefaults || fieldType().latFieldType().numericPrecisionStep() != LegacyNumericUtils.PRECISION_STEP_DEFAULT)) {
|
||||||
builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
|
builder.field("precision_step", fieldType().latFieldType().numericPrecisionStep());
|
||||||
}
|
}
|
||||||
if (includeDefaults || fieldType().isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
|
if (includeDefaults || fieldType().isGeoHashEnabled() != Defaults.ENABLE_GEOHASH) {
|
||||||
|
@ -84,7 +84,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||||||
fieldType.setTokenized(false);
|
fieldType.setTokenized(false);
|
||||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
fieldType.setNumericType(FieldType.LegacyNumericType.LONG);
|
||||||
}
|
}
|
||||||
setupFieldType(context);
|
setupFieldType(context);
|
||||||
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper,
|
||||||
@ -95,7 +95,7 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper {
|
|||||||
public GeoPointFieldMapper build(BuilderContext context) {
|
public GeoPointFieldMapper build(BuilderContext context) {
|
||||||
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
if (context.indexCreatedVersion().before(Version.V_2_3_0)) {
|
||||||
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
fieldType.setNumericPrecisionStep(GeoPointField.PRECISION_STEP);
|
||||||
fieldType.setNumericType(FieldType.NumericType.LONG);
|
fieldType.setNumericType(FieldType.LegacyNumericType.LONG);
|
||||||
}
|
}
|
||||||
return super.build(context);
|
return super.build(context);
|
||||||
}
|
}
|
||||||
|
@ -18,9 +18,9 @@
|
|||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.mapper.geo;
|
package org.elasticsearch.index.mapper.geo;
|
||||||
|
|
||||||
import com.spatial4j.core.shape.Point;
|
import org.locationtech.spatial4j.shape.Point;
|
||||||
import com.spatial4j.core.shape.Shape;
|
import org.locationtech.spatial4j.shape.Shape;
|
||||||
import com.spatial4j.core.shape.jts.JtsGeometry;
|
import org.locationtech.spatial4j.shape.jts.JtsGeometry;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
|
import org.apache.lucene.spatial.prefix.PrefixTreeStrategy;
|
||||||
@ -58,7 +58,7 @@ import static org.elasticsearch.common.xcontent.support.XContentMapValues.lenien
|
|||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FieldMapper for indexing {@link com.spatial4j.core.shape.Shape}s.
|
* FieldMapper for indexing {@link org.locationtech.spatial4j.shape.Shape}s.
|
||||||
* <p>
|
* <p>
|
||||||
* Currently Shapes can only be indexed and can only be queried using
|
* Currently Shapes can only be indexed and can only be queried using
|
||||||
* {@link org.elasticsearch.index.query.GeoShapeQueryParser}, consequently
|
* {@link org.elasticsearch.index.query.GeoShapeQueryParser}, consequently
|
||||||
|
@ -19,14 +19,14 @@
|
|||||||
|
|
||||||
package org.elasticsearch.index.mapper.ip;
|
package org.elasticsearch.index.mapper.ip;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.LegacyNumericTokenStream;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
import org.apache.lucene.search.NumericRangeQuery;
|
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.NumericUtils;
|
import org.apache.lucene.util.LegacyNumericUtils;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.Explicit;
|
import org.elasticsearch.common.Explicit;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
@ -206,7 +206,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||||||
@Override
|
@Override
|
||||||
public BytesRef indexedValueForSearch(Object value) {
|
public BytesRef indexedValueForSearch(Object value) {
|
||||||
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
BytesRefBuilder bytesRef = new BytesRefBuilder();
|
||||||
NumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
LegacyNumericUtils.longToPrefixCoded(parseValue(value), 0, bytesRef); // 0 because of exact match
|
||||||
return bytesRef.get();
|
return bytesRef.get();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,7 +242,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
lowerTerm == null ? null : parseValue(lowerTerm),
|
lowerTerm == null ? null : parseValue(lowerTerm),
|
||||||
upperTerm == null ? null : parseValue(upperTerm),
|
upperTerm == null ? null : parseValue(upperTerm),
|
||||||
includeLower, includeUpper);
|
includeLower, includeUpper);
|
||||||
@ -257,7 +257,7 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
iSim = fuzziness.asLong();
|
iSim = fuzziness.asLong();
|
||||||
}
|
}
|
||||||
return NumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
return LegacyNumericRangeQuery.newLongRange(name(), numericPrecisionStep(),
|
||||||
iValue - iSim,
|
iValue - iSim,
|
||||||
iValue + iSim,
|
iValue + iSim,
|
||||||
true, true);
|
true, true);
|
||||||
@ -356,11 +356,11 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||||||
public static class NumericIpTokenizer extends NumericTokenizer {
|
public static class NumericIpTokenizer extends NumericTokenizer {
|
||||||
|
|
||||||
public NumericIpTokenizer(int precisionStep, char[] buffer) throws IOException {
|
public NumericIpTokenizer(int precisionStep, char[] buffer) throws IOException {
|
||||||
super(new NumericTokenStream(precisionStep), buffer, null);
|
super(new LegacyNumericTokenStream(precisionStep), buffer, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void setValue(NumericTokenStream tokenStream, String value) {
|
protected void setValue(LegacyNumericTokenStream tokenStream, String value) {
|
||||||
tokenStream.setLongValue(ipToLong(value));
|
tokenStream.setLongValue(ipToLong(value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -69,8 +69,6 @@ public final class ElasticsearchMergePolicy extends MergePolicy {
|
|||||||
|
|
||||||
/** Return an "upgraded" view of the reader. */
|
/** Return an "upgraded" view of the reader. */
|
||||||
static CodecReader filter(CodecReader reader) throws IOException {
|
static CodecReader filter(CodecReader reader) throws IOException {
|
||||||
// convert 0.90.x _uid payloads to _version docvalues if needed
|
|
||||||
reader = VersionFieldUpgrader.wrap(reader);
|
|
||||||
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
|
// TODO: remove 0.90.x/1.x freqs/prox/payloads from _uid?
|
||||||
// the previous code never did this, so some indexes carry around trash.
|
// the previous code never did this, so some indexes carry around trash.
|
||||||
return reader;
|
return reader;
|
||||||
|
@ -1,172 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.shard;
|
|
||||||
|
|
||||||
import org.apache.lucene.codecs.DocValuesProducer;
|
|
||||||
import org.apache.lucene.index.CodecReader;
|
|
||||||
import org.apache.lucene.index.DocValuesType;
|
|
||||||
import org.apache.lucene.index.FieldInfo;
|
|
||||||
import org.apache.lucene.index.FieldInfos;
|
|
||||||
import org.apache.lucene.index.FilterCodecReader;
|
|
||||||
import org.apache.lucene.index.IndexOptions;
|
|
||||||
import org.apache.lucene.index.NumericDocValues;
|
|
||||||
import org.apache.lucene.index.PostingsEnum;
|
|
||||||
import org.apache.lucene.index.Terms;
|
|
||||||
import org.apache.lucene.index.TermsEnum;
|
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
|
||||||
import org.apache.lucene.util.Bits;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
|
||||||
import org.apache.lucene.util.packed.GrowableWriter;
|
|
||||||
import org.apache.lucene.util.packed.PackedInts;
|
|
||||||
import org.elasticsearch.common.Numbers;
|
|
||||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Converts 0.90.x _uid payloads to _version docvalues
|
|
||||||
*/
|
|
||||||
class VersionFieldUpgrader extends FilterCodecReader {
|
|
||||||
final FieldInfos infos;
|
|
||||||
|
|
||||||
VersionFieldUpgrader(CodecReader in) {
|
|
||||||
super(in);
|
|
||||||
|
|
||||||
// Find a free field number
|
|
||||||
int fieldNumber = 0;
|
|
||||||
for (FieldInfo fi : in.getFieldInfos()) {
|
|
||||||
fieldNumber = Math.max(fieldNumber, fi.number + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: lots of things can wrong here...
|
|
||||||
FieldInfo newInfo = new FieldInfo(VersionFieldMapper.NAME, // field name
|
|
||||||
fieldNumber, // field number
|
|
||||||
false, // store term vectors
|
|
||||||
false, // omit norms
|
|
||||||
false, // store payloads
|
|
||||||
IndexOptions.NONE, // index options
|
|
||||||
DocValuesType.NUMERIC, // docvalues
|
|
||||||
-1, // docvalues generation
|
|
||||||
Collections.<String, String>emptyMap() // attributes
|
|
||||||
);
|
|
||||||
newInfo.checkConsistency(); // fail merge immediately if above code is wrong
|
|
||||||
|
|
||||||
final ArrayList<FieldInfo> fieldInfoList = new ArrayList<>();
|
|
||||||
for (FieldInfo info : in.getFieldInfos()) {
|
|
||||||
if (!info.name.equals(VersionFieldMapper.NAME)) {
|
|
||||||
fieldInfoList.add(info);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldInfoList.add(newInfo);
|
|
||||||
infos = new FieldInfos(fieldInfoList.toArray(new FieldInfo[fieldInfoList.size()]));
|
|
||||||
}
|
|
||||||
|
|
||||||
static CodecReader wrap(CodecReader reader) throws IOException {
|
|
||||||
final FieldInfos fieldInfos = reader.getFieldInfos();
|
|
||||||
final FieldInfo versionInfo = fieldInfos.fieldInfo(VersionFieldMapper.NAME);
|
|
||||||
if (versionInfo != null && versionInfo.getDocValuesType() != DocValuesType.NONE) {
|
|
||||||
// the reader is a recent one, it has versions and they are stored
|
|
||||||
// in a numeric doc values field
|
|
||||||
return reader;
|
|
||||||
}
|
|
||||||
// The segment is an old one, look at the _uid field
|
|
||||||
final Terms terms = reader.terms(UidFieldMapper.NAME);
|
|
||||||
if (terms == null || !terms.hasPayloads()) {
|
|
||||||
// The segment doesn't have an _uid field or doesn't have payloads
|
|
||||||
// don't try to do anything clever. If any other segment has versions
|
|
||||||
// all versions of this segment will be initialized to 0
|
|
||||||
return reader;
|
|
||||||
}
|
|
||||||
// convert _uid payloads -> _version docvalues
|
|
||||||
return new VersionFieldUpgrader(reader);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public FieldInfos getFieldInfos() {
|
|
||||||
return infos;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DocValuesProducer getDocValuesReader() {
|
|
||||||
DocValuesProducer producer = in.getDocValuesReader();
|
|
||||||
// TODO: move this nullness stuff out
|
|
||||||
if (producer == null) {
|
|
||||||
producer = FilterDocValuesProducer.EMPTY;
|
|
||||||
}
|
|
||||||
return new UninvertedVersions(producer, this);
|
|
||||||
}
|
|
||||||
|
|
||||||
static class UninvertedVersions extends FilterDocValuesProducer {
|
|
||||||
final CodecReader reader;
|
|
||||||
|
|
||||||
UninvertedVersions(DocValuesProducer in, CodecReader reader) {
|
|
||||||
super(in);
|
|
||||||
this.reader = reader;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public NumericDocValues getNumeric(FieldInfo field) throws IOException {
|
|
||||||
if (VersionFieldMapper.NAME.equals(field.name)) {
|
|
||||||
// uninvert into a packed ints and expose as docvalues
|
|
||||||
final Terms terms = reader.terms(UidFieldMapper.NAME);
|
|
||||||
final TermsEnum uids = terms.iterator();
|
|
||||||
final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.COMPACT);
|
|
||||||
PostingsEnum dpe = null;
|
|
||||||
for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) {
|
|
||||||
dpe = uids.postings(dpe, PostingsEnum.PAYLOADS);
|
|
||||||
assert terms.hasPayloads() : "field has payloads";
|
|
||||||
final Bits liveDocs = reader.getLiveDocs();
|
|
||||||
for (int doc = dpe.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = dpe.nextDoc()) {
|
|
||||||
if (liveDocs != null && liveDocs.get(doc) == false) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
dpe.nextPosition();
|
|
||||||
final BytesRef payload = dpe.getPayload();
|
|
||||||
if (payload != null && payload.length == 8) {
|
|
||||||
final long version = Numbers.bytesToLong(payload);
|
|
||||||
versions.set(doc, version);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return versions;
|
|
||||||
} else {
|
|
||||||
return in.getNumeric(field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Bits getDocsWithField(FieldInfo field) throws IOException {
|
|
||||||
if (VersionFieldMapper.NAME.equals(field.name)) {
|
|
||||||
return new Bits.MatchAllBits(reader.maxDoc());
|
|
||||||
} else {
|
|
||||||
return in.getDocsWithField(field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public DocValuesProducer getMergeInstance() throws IOException {
|
|
||||||
return new UninvertedVersions(in.getMergeInstance(), reader);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -36,7 +36,7 @@ import java.util.Objects;
|
|||||||
*/
|
*/
|
||||||
public class StoreFileMetaData implements Writeable {
|
public class StoreFileMetaData implements Writeable {
|
||||||
|
|
||||||
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0;
|
||||||
|
|
||||||
private final String name;
|
private final String name;
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ import org.apache.lucene.store.ByteArrayDataOutput;
|
|||||||
import org.apache.lucene.store.DataInput;
|
import org.apache.lucene.store.DataInput;
|
||||||
import org.apache.lucene.store.DataOutput;
|
import org.apache.lucene.store.DataOutput;
|
||||||
import org.apache.lucene.store.InputStreamDataInput;
|
import org.apache.lucene.store.InputStreamDataInput;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.io.Channels;
|
import org.elasticsearch.common.io.Channels;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -36,9 +35,9 @@ import java.nio.file.Path;
|
|||||||
*/
|
*/
|
||||||
class Checkpoint {
|
class Checkpoint {
|
||||||
|
|
||||||
static final int BUFFER_SIZE = RamUsageEstimator.NUM_BYTES_INT // ops
|
static final int BUFFER_SIZE = Integer.BYTES // ops
|
||||||
+ RamUsageEstimator.NUM_BYTES_LONG // offset
|
+ Long.BYTES // offset
|
||||||
+ RamUsageEstimator.NUM_BYTES_LONG;// generation
|
+ Long.BYTES;// generation
|
||||||
final long offset;
|
final long offset;
|
||||||
final int numOps;
|
final int numOps;
|
||||||
final long generation;
|
final long generation;
|
||||||
|
@ -418,10 +418,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||||||
try {
|
try {
|
||||||
final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
|
final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out);
|
||||||
final long start = out.position();
|
final long start = out.position();
|
||||||
out.skip(RamUsageEstimator.NUM_BYTES_INT);
|
out.skip(Integer.BYTES);
|
||||||
writeOperationNoSize(checksumStreamOutput, operation);
|
writeOperationNoSize(checksumStreamOutput, operation);
|
||||||
final long end = out.position();
|
final long end = out.position();
|
||||||
final int operationSize = (int) (end - RamUsageEstimator.NUM_BYTES_INT - start);
|
final int operationSize = (int) (end - Integer.BYTES - start);
|
||||||
out.seek(start);
|
out.seek(start);
|
||||||
out.writeInt(operationSize);
|
out.writeInt(operationSize);
|
||||||
out.seek(end);
|
out.seek(end);
|
||||||
@ -636,7 +636,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * RamUsageEstimator.NUM_BYTES_LONG + RamUsageEstimator.NUM_BYTES_INT;
|
return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + 2 * Long.BYTES + Integer.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -1144,10 +1144,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||||||
for (Operation op : toWrite) {
|
for (Operation op : toWrite) {
|
||||||
out.reset();
|
out.reset();
|
||||||
final long start = out.position();
|
final long start = out.position();
|
||||||
out.skip(RamUsageEstimator.NUM_BYTES_INT);
|
out.skip(Integer.BYTES);
|
||||||
writeOperationNoSize(checksumStreamOutput, op);
|
writeOperationNoSize(checksumStreamOutput, op);
|
||||||
long end = out.position();
|
long end = out.position();
|
||||||
int operationSize = (int) (out.position() - RamUsageEstimator.NUM_BYTES_INT - start);
|
int operationSize = (int) (out.position() - Integer.BYTES - start);
|
||||||
out.seek(start);
|
out.seek(start);
|
||||||
out.writeInt(operationSize);
|
out.writeInt(operationSize);
|
||||||
out.seek(end);
|
out.seek(end);
|
||||||
|
@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexFormatTooOldException;
|
|||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
import org.apache.lucene.store.InputStreamDataInput;
|
import org.apache.lucene.store.InputStreamDataInput;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.io.Channels;
|
import org.elasticsearch.common.io.Channels;
|
||||||
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
|
||||||
|
|
||||||
@ -116,7 +115,7 @@ public class TranslogReader extends BaseTranslogReader implements Closeable {
|
|||||||
if (uuidBytes.bytesEquals(ref) == false) {
|
if (uuidBytes.bytesEquals(ref) == false) {
|
||||||
throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog. path:" + path);
|
throw new TranslogCorruptedException("expected shard UUID [" + uuidBytes + "] but got: [" + ref + "] this translog file belongs to a different translog. path:" + path);
|
||||||
}
|
}
|
||||||
return new TranslogReader(checkpoint.generation, channel, path, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + RamUsageEstimator.NUM_BYTES_INT, checkpoint.offset, checkpoint.numOps);
|
return new TranslogReader(checkpoint.generation, channel, path, ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES, checkpoint.offset, checkpoint.numOps);
|
||||||
default:
|
default:
|
||||||
throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path);
|
throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path);
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,6 @@ import org.apache.lucene.store.AlreadyClosedException;
|
|||||||
import org.apache.lucene.store.OutputStreamDataOutput;
|
import org.apache.lucene.store.OutputStreamDataOutput;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.io.Channels;
|
import org.elasticsearch.common.io.Channels;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
@ -76,7 +75,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static int getHeaderLength(int uuidLength) {
|
private static int getHeaderLength(int uuidLength) {
|
||||||
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + RamUsageEstimator.NUM_BYTES_INT;
|
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + Integer.BYTES;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
|
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize) throws IOException {
|
||||||
|
@ -228,7 +228,7 @@ public final class IndicesRequestCache extends AbstractComponent implements Remo
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length();
|
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + Long.BYTES + value.length();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
package org.elasticsearch.indices.analysis;
|
package org.elasticsearch.indices.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.hunspell.Dictionary;
|
import org.apache.lucene.analysis.hunspell.Dictionary;
|
||||||
|
import org.apache.lucene.store.Directory;
|
||||||
|
import org.apache.lucene.store.SimpleFSDirectory;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
@ -183,7 +185,9 @@ public class HunspellService extends AbstractComponent {
|
|||||||
|
|
||||||
affixStream = Files.newInputStream(affixFiles[0]);
|
affixStream = Files.newInputStream(affixFiles[0]);
|
||||||
|
|
||||||
return new Dictionary(affixStream, dicStreams, ignoreCase);
|
try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
|
||||||
|
return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
|
||||||
|
}
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
logger.error("Could not load hunspell dictionary [{}]", e, locale);
|
logger.error("Could not load hunspell dictionary [{}]", e, locale);
|
||||||
|
@ -115,10 +115,6 @@ final class PercolatorQuery extends Query {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Query rewrite(IndexReader reader) throws IOException {
|
public Query rewrite(IndexReader reader) throws IOException {
|
||||||
if (getBoost() != 1f) {
|
|
||||||
return super.rewrite(reader);
|
|
||||||
}
|
|
||||||
|
|
||||||
Query rewritten = percolatorQueriesQuery.rewrite(reader);
|
Query rewritten = percolatorQueriesQuery.rewrite(reader);
|
||||||
if (rewritten != percolatorQueriesQuery) {
|
if (rewritten != percolatorQueriesQuery) {
|
||||||
return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries);
|
return new PercolatorQuery(rewritten, percolatorIndexSearcher, percolatorQueries);
|
||||||
|
@ -167,7 +167,7 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
|||||||
BytesReference bytes = write(obj);
|
BytesReference bytes = write(obj);
|
||||||
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
|
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
|
||||||
final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")";
|
final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")";
|
||||||
try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, byteArrayOutputStream, BUFFER_SIZE)) {
|
try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, byteArrayOutputStream, BUFFER_SIZE)) {
|
||||||
CodecUtil.writeHeader(indexOutput, codec, VERSION);
|
CodecUtil.writeHeader(indexOutput, codec, VERSION);
|
||||||
try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) {
|
try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) {
|
||||||
@Override
|
@Override
|
||||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -108,7 +109,7 @@ public class SignificantStringTerms extends InternalSignificantTerms<Significant
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(SignificantTerms.Bucket other) {
|
int compareTerm(SignificantTerms.Bucket other) {
|
||||||
return BytesRef.getUTF8SortedAsUnicodeComparator().compare(termBytes, ((Bucket) other).termBytes);
|
return termBytes.compareTo(((Bucket) other).termBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -26,7 +26,6 @@ import org.apache.lucene.index.SortedDocValues;
|
|||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.LongBitSet;
|
import org.apache.lucene.util.LongBitSet;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
@ -136,7 +135,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||||||
|
|
||||||
protected static void copy(BytesRef from, BytesRef to) {
|
protected static void copy(BytesRef from, BytesRef to) {
|
||||||
if (to.bytes.length < from.length) {
|
if (to.bytes.length < from.length) {
|
||||||
to.bytes = new byte[ArrayUtil.oversize(from.length, RamUsageEstimator.NUM_BYTES_BYTE)];
|
to.bytes = new byte[ArrayUtil.oversize(from.length, 1)];
|
||||||
}
|
}
|
||||||
to.offset = 0;
|
to.offset = 0;
|
||||||
to.length = from.length;
|
to.length = from.length;
|
||||||
|
@ -105,7 +105,7 @@ public class StringTerms extends InternalTerms<StringTerms, StringTerms.Bucket>
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
int compareTerm(Terms.Bucket other) {
|
int compareTerm(Terms.Bucket other) {
|
||||||
return BytesRef.getUTF8SortedAsUnicodeComparator().compare(termBytes, ((Bucket) other).termBytes);
|
return termBytes.compareTo(((Bucket) other).termBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user