Merge remote-tracking branch 'origin/master' into feature/client_aggs_parsing

# Conflicts:
#	core/src/main/java/org/elasticsearch/search/DocValueFormat.java
#	core/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationTestCase.java
#	core/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalMaxTests.java
#	core/src/test/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvgTests.java
#	core/src/test/java/org/elasticsearch/search/aggregations/metrics/min/InternalMinTests.java
This commit is contained in:
Tanguy Leroux 2017-04-19 10:12:11 +02:00
commit 5717ac3cc6
402 changed files with 5969 additions and 4960 deletions

View File

@ -345,7 +345,6 @@ VM running trusty by running
These are the linux flavors the Vagrantfile currently supports:
* ubuntu-1204 aka precise
* ubuntu-1404 aka trusty
* ubuntu-1604 aka xenial
* debian-8 aka jessie, the current debian stable distribution
@ -431,7 +430,7 @@ gradle vagrantFedora24#up
-------------------------------------------------
Or any of vagrantCentos6#up, vagrantDebian8#up, vagrantFedora24#up, vagrantOel6#up,
vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1204#up,
vagrantOel7#up, vagrantOpensuse13#up, vagrantSles12#up, vagrantUbuntu1404#up,
vagrantUbuntu1604#up.
Once up, you can then connect to the VM using SSH from the elasticsearch directory:

4
Vagrantfile vendored
View File

@ -22,10 +22,6 @@
# under the License.
Vagrant.configure(2) do |config|
config.vm.define "ubuntu-1204" do |config|
config.vm.box = "elastic/ubuntu-12.04-x86_64"
ubuntu_common config
end
config.vm.define "ubuntu-1404" do |config|
config.vm.box = "elastic/ubuntu-14.04-x86_64"
ubuntu_common config

View File

@ -63,13 +63,13 @@ public class PluginBuildPlugin extends BuildPlugin {
project.ext.set("nebulaPublish.maven.jar", false)
}
project.integTest.dependsOn(project.bundlePlugin)
project.integTestCluster.dependsOn(project.bundlePlugin)
project.tasks.run.dependsOn(project.bundlePlugin)
if (isModule) {
project.integTest.clusterConfig.module(project)
project.integTestCluster.module(project)
project.tasks.run.clusterConfig.module(project)
} else {
project.integTest.clusterConfig.plugin(project.path)
project.integTestCluster.plugin(project.path)
project.tasks.run.clusterConfig.plugin(project.path)
addZipPomGeneration(project)
addNoticeGeneration(project)

View File

@ -22,7 +22,6 @@ class VagrantTestPlugin implements Plugin<Project> {
'oel-7',
'opensuse-13',
'sles-12',
'ubuntu-1204',
'ubuntu-1404',
'ubuntu-1604'
]

View File

@ -26,12 +26,6 @@ java.util.concurrent.ThreadLocalRandom
java.security.MessageDigest#clone() @ use org.elasticsearch.common.hash.MessageDigests
@defaultMessage this should not have been added to lucene in the first place
org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
@defaultMessage Soon to be removed
org.apache.lucene.document.FieldType#numericType()
@defaultMessage Don't use MethodHandles in slow ways, don't be lenient in tests.
java.lang.invoke.MethodHandle#invoke(java.lang.Object[])
java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[])

View File

@ -36,16 +36,6 @@ org.apache.lucene.index.IndexReader#decRef()
org.apache.lucene.index.IndexReader#incRef()
org.apache.lucene.index.IndexReader#tryIncRef()
@defaultMessage Close listeners can only installed via ElasticsearchDirectoryReader#addReaderCloseListener
org.apache.lucene.index.IndexReader#addReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
org.apache.lucene.index.IndexReader#removeReaderClosedListener(org.apache.lucene.index.IndexReader$ReaderClosedListener)
@defaultMessage Pass the precision step from the mappings explicitly instead
org.apache.lucene.search.LegacyNumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean)
org.apache.lucene.search.LegacyNumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean)
org.apache.lucene.search.LegacyNumericRangeQuery#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean)
org.apache.lucene.search.LegacyNumericRangeQuery#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean)
@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead.
java.lang.Object#wait()
java.lang.Object#wait(long)

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha1
lucene = 6.5.0
lucene = 7.0.0-snapshot-89f6d17
# optional dependencies
spatial4j = 0.6

View File

@ -1 +0,0 @@
3989779b05ecd0ace6affe19223b1c27156604f1

View File

@ -0,0 +1 @@
e69234c2e898d86a53edbe8d22e33bebc45286cd

View File

@ -1 +0,0 @@
6a8660e7133f357ef40d9cac26316ccd9937a2eb

View File

@ -0,0 +1 @@
48172a8e1fe6562f55ab671d42af53652794d5df

View File

@ -1 +0,0 @@
ff176c9bde4228b43827849f5d2ff2e2717e3297

View File

@ -0,0 +1 @@
3dab251d4c7ab4ff5095e5f1d1e127ec2cf3c07d

View File

@ -1 +0,0 @@
10d2e5b36f460527ac9b948be0ec3077bde5b0ca

View File

@ -0,0 +1 @@
c01ae8a23b733d75d058a76bd85fcb49b9fd06fd

View File

@ -1 +0,0 @@
0019bb6a631ea0123e8e553b0510fa81c9d3c3eb

View File

@ -0,0 +1 @@
c53df048b97946fe66035505306b5651b702adb1

View File

@ -1 +0,0 @@
dad85baba266793b9ceb80a9b08c4ee9838e09df

View File

@ -0,0 +1 @@
1ecb349ba29abab75359e5125ac8a94fc81441d5

View File

@ -1 +0,0 @@
938f9f7efe8a403fd57c99aedd75d040d9caa896

View File

@ -0,0 +1 @@
e5f53b38652b1284ff254fba39e624ec117aef7d

View File

@ -1 +0,0 @@
afdff39ecb30f6e2c6f056a5bdfcb13d928a25af

View File

@ -0,0 +1 @@
2f340ed3f46d6b4c89fa31975b675c19028c15eb

View File

@ -1 +0,0 @@
8e3971a008070712d57b59cf1f7b44c0d9d3df25

View File

@ -0,0 +1 @@
a13862fb62cc1e516d16d6b6bb3cdb906c4925f6

View File

@ -1 +0,0 @@
225b904edf91ccdffffa398e1924ebadd5677c09

View File

@ -0,0 +1 @@
4e014f72a588453bae7dd1a555d741cf3bf39032

View File

@ -1 +0,0 @@
5c994fc5dc4f37133a861571211303d81c5d51ff

View File

@ -0,0 +1 @@
5e87d61c604d6b1c0ee5c38f09441d1b8b9c8c2b

View File

@ -1 +0,0 @@
553b7b13bef994f14076a85557df03cad67322e9

View File

@ -0,0 +1 @@
be14aa163b339403d8ec904493c1be5dfa9baeaf

View File

@ -1 +0,0 @@
73deae791d861820974600705ba06e9f801cbe56

View File

@ -0,0 +1 @@
a2c13be0fe4c5a98a30ec6ae673be1442409817c

View File

@ -1 +0,0 @@
c2aad69500dac79338ef45f570cab47bec3d2724

View File

@ -0,0 +1 @@
92b8282e474845fdae31f9f239f953bc7164401f

View File

@ -1 +0,0 @@
acf211f2bf901dfc8155a46c5a42c5650edf74ef

View File

@ -0,0 +1 @@
1c4aaea267ed41657ebf01769bfddbcab5b27414

View File

@ -296,16 +296,15 @@ public abstract class BlendedTermQuery extends Query {
return Objects.hash(classHash(), Arrays.hashCode(equalsTerms()));
}
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) {
return booleanBlendedQuery(terms, null, disableCoord);
public static BlendedTermQuery booleanBlendedQuery(Term[] terms) {
return booleanBlendedQuery(terms, null);
}
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final float[] boosts, final boolean disableCoord) {
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final float[] boosts) {
return new BlendedTermQuery(terms, boosts) {
@Override
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
BooleanQuery.Builder booleanQueryBuilder = new BooleanQuery.Builder();
booleanQueryBuilder.setDisableCoord(disableCoord);
for (int i = 0; i < terms.length; i++) {
Query query = new TermQuery(terms[i], ctx[i]);
if (boosts != null && boosts[i] != 1f) {
@ -318,14 +317,12 @@ public abstract class BlendedTermQuery extends Query {
};
}
public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final boolean disableCoord, final float maxTermFrequency) {
public static BlendedTermQuery commonTermsBlendedQuery(Term[] terms, final float[] boosts, final float maxTermFrequency) {
return new BlendedTermQuery(terms, boosts) {
@Override
protected Query topLevelQuery(Term[] terms, TermContext[] ctx, int[] docFreqs, int maxDoc) {
BooleanQuery.Builder highBuilder = new BooleanQuery.Builder();
highBuilder.setDisableCoord(disableCoord);
BooleanQuery.Builder lowBuilder = new BooleanQuery.Builder();
lowBuilder.setDisableCoord(disableCoord);
for (int i = 0; i < terms.length; i++) {
Query query = new TermQuery(terms[i], ctx[i]);
if (boosts != null && boosts[i] != 1f) {
@ -343,7 +340,6 @@ public abstract class BlendedTermQuery extends Query {
BooleanQuery low = lowBuilder.build();
if (low.clauses().isEmpty()) {
BooleanQuery.Builder queryBuilder = new BooleanQuery.Builder();
queryBuilder.setDisableCoord(disableCoord);
for (BooleanClause booleanClause : high) {
queryBuilder.add(booleanClause.getQuery(), Occur.MUST);
}
@ -352,7 +348,6 @@ public abstract class BlendedTermQuery extends Query {
return low;
} else {
return new BooleanQuery.Builder()
.setDisableCoord(true)
.add(high, BooleanClause.Occur.SHOULD)
.add(low, BooleanClause.Occur.MUST)
.build();

View File

@ -35,8 +35,8 @@ public class ExtendedCommonTermsQuery extends CommonTermsQuery {
private final MappedFieldType fieldType;
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, boolean disableCoord, MappedFieldType fieldType) {
super(highFreqOccur, lowFreqOccur, maxTermFrequency, disableCoord);
public ExtendedCommonTermsQuery(Occur highFreqOccur, Occur lowFreqOccur, float maxTermFrequency, MappedFieldType fieldType) {
super(highFreqOccur, lowFreqOccur, maxTermFrequency);
this.fieldType = fieldType;
}

View File

@ -57,8 +57,8 @@ public final class MinDocQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new ConstantScoreWeight(this, boost) {
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final int maxDoc = context.reader().maxDoc();

View File

@ -25,9 +25,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.analyzing.AnalyzingQueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
@ -70,7 +68,7 @@ import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfN
* Also breaks fields with [type].[name] into a boolean query that must include the type
* as well as the query on the name.
*/
public class MapperQueryParser extends AnalyzingQueryParser {
public class MapperQueryParser extends QueryParser {
public static final Map<String, FieldQueryExtension> FIELD_QUERY_EXTENSIONS;
@ -103,14 +101,13 @@ public class MapperQueryParser extends AnalyzingQueryParser {
setAnalyzer(settings.analyzer());
setMultiTermRewriteMethod(settings.rewriteMethod());
setEnablePositionIncrements(settings.enablePositionIncrements());
setSplitOnWhitespace(settings.splitOnWhitespace());
setAutoGeneratePhraseQueries(settings.autoGeneratePhraseQueries());
setMaxDeterminizedStates(settings.maxDeterminizedStates());
setAllowLeadingWildcard(settings.allowLeadingWildcard());
setLowercaseExpandedTerms(false);
setPhraseSlop(settings.phraseSlop());
setDefaultOperator(settings.defaultOperator());
setFuzzyPrefixLength(settings.fuzzyPrefixLength());
setSplitOnWhitespace(settings.splitOnWhitespace());
}
/**
@ -175,7 +172,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return getFieldQuerySingle(field, queryText, quoted);
@ -277,7 +274,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return super.getFieldQuery(field, queryText, slop);
@ -328,7 +325,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
}
@ -386,7 +383,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
clauses.add(new BooleanClause(applyBoost(mField, q), BooleanClause.Occur.SHOULD));
}
}
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return getFuzzyQuerySingle(field, termStr, minSimilarity);
@ -450,7 +447,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return getPrefixQuerySingle(field, termStr);
@ -559,7 +556,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
innerClauses.add(new BooleanClause(super.getPrefixQuery(field, token),
BooleanClause.Occur.SHOULD));
}
posQuery = getBooleanQueryCoordDisabled(innerClauses);
posQuery = getBooleanQuery(innerClauses);
}
clauses.add(new BooleanClause(posQuery,
getDefaultOperator() == Operator.AND ? BooleanClause.Occur.MUST : BooleanClause.Occur.SHOULD));
@ -612,7 +609,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return getWildcardQuerySingle(field, termStr);
@ -676,7 +673,7 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
if (clauses.isEmpty()) return null; // happens for stopwords
return getBooleanQueryCoordDisabled(clauses);
return getBooleanQuery(clauses);
}
} else {
return getRegexpQuerySingle(field, termStr);
@ -713,19 +710,6 @@ public class MapperQueryParser extends AnalyzingQueryParser {
}
}
/**
* @deprecated review all use of this, don't rely on coord
*/
@Deprecated
protected Query getBooleanQueryCoordDisabled(List<BooleanClause> clauses) throws ParseException {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setDisableCoord(true);
for (BooleanClause clause : clauses) {
builder.add(clause);
}
return fixNegativeQueryIfNeeded(builder.build());
}
@Override
protected Query getBooleanQuery(List<BooleanClause> clauses) throws ParseException {

View File

@ -22,31 +22,32 @@ import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.fielddata.AbstractNumericDocValues;
import org.elasticsearch.index.fielddata.AbstractSortedDocValues;
import java.io.IOException;
import java.util.Collection;
/**
* Utility class that ensures that a single collapse key is extracted per document.
*/
abstract class CollapsingDocValuesSource<T> {
abstract class CollapsingDocValuesSource<T> extends GroupSelector<T> {
protected final String field;
CollapsingDocValuesSource(String field) throws IOException {
this.field = field;
}
abstract T get(int doc);
abstract T copy(T value, T reuse);
abstract void setNextReader(LeafReader reader) throws IOException;
@Override
public void setGroups(Collection<SearchGroup<T>> groups) {
throw new UnsupportedOperationException();
}
/**
* Implementation for {@link NumericDocValues} and {@link SortedNumericDocValues}.
@ -54,35 +55,43 @@ abstract class CollapsingDocValuesSource<T> {
*/
static class Numeric extends CollapsingDocValuesSource<Long> {
private NumericDocValues values;
private Bits docsWithField;
private long value;
private boolean hasValue;
Numeric(String field) throws IOException {
super(field);
}
@Override
public Long get(int doc) {
if (docsWithField.get(doc)) {
return values.get(doc);
public State advanceTo(int doc) throws IOException {
if (values.advanceExact(doc)) {
hasValue = true;
value = values.longValue();
return State.ACCEPT;
} else {
return null;
hasValue = false;
return State.SKIP;
}
}
@Override
public Long copy(Long value, Long reuse) {
return value;
public Long currentValue() {
return hasValue ? value : null;
}
@Override
public void setNextReader(LeafReader reader) throws IOException {
public Long copyValue() {
return currentValue();
}
@Override
public void setNextReader(LeafReaderContext readerContext) throws IOException {
LeafReader reader = readerContext.reader();
DocValuesType type = getDocValuesType(reader, field);
if (type == null || type == DocValuesType.NONE) {
values = DocValues.emptyNumeric();
docsWithField = new Bits.MatchNoBits(reader.maxDoc());
return ;
}
docsWithField = DocValues.getDocsWithField(reader, field);
switch (type) {
case NUMERIC:
values = DocValues.getNumeric(reader, field);
@ -92,17 +101,34 @@ abstract class CollapsingDocValuesSource<T> {
final SortedNumericDocValues sorted = DocValues.getSortedNumeric(reader, field);
values = DocValues.unwrapSingleton(sorted);
if (values == null) {
values = new NumericDocValues() {
values = new AbstractNumericDocValues() {
private long value;
@Override
public long get(int docID) {
sorted.setDocument(docID);
assert sorted.count() > 0;
if (sorted.count() > 1) {
throw new IllegalStateException("failed to collapse " + docID +
", the collapse field must be single valued");
public boolean advanceExact(int target) throws IOException {
if (sorted.advanceExact(target)) {
if (sorted.docValueCount() > 1) {
throw new IllegalStateException("failed to collapse " + target +
", the collapse field must be single valued");
}
value = sorted.nextValue();
return true;
} else {
return false;
}
return sorted.valueAt(0);
}
@Override
public int docID() {
return sorted.docID();
}
@Override
public long longValue() throws IOException {
return value;
}
};
}
break;
@ -119,47 +145,56 @@ abstract class CollapsingDocValuesSource<T> {
* Fails with an {@link IllegalStateException} if a document contains multiple values for the specified field.
*/
static class Keyword extends CollapsingDocValuesSource<BytesRef> {
private Bits docsWithField;
private SortedDocValues values;
private int ord;
Keyword(String field) throws IOException {
super(field);
}
@Override
public BytesRef get(int doc) {
if (docsWithField.get(doc)) {
return values.get(doc);
public org.apache.lucene.search.grouping.GroupSelector.State advanceTo(int doc)
throws IOException {
if (values.advanceExact(doc)) {
ord = values.ordValue();
return State.ACCEPT;
} else {
return null;
ord = -1;
return State.SKIP;
}
}
@Override
public BytesRef copy(BytesRef value, BytesRef reuse) {
public BytesRef currentValue() {
if (ord == -1) {
return null;
} else {
try {
return values.lookupOrd(ord);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
@Override
public BytesRef copyValue() {
BytesRef value = currentValue();
if (value == null) {
return null;
}
if (reuse != null) {
reuse.bytes = ArrayUtil.grow(reuse.bytes, value.length);
reuse.offset = 0;
reuse.length = value.length;
System.arraycopy(value.bytes, value.offset, reuse.bytes, 0, value.length);
return reuse;
} else {
return BytesRef.deepCopyOf(value);
}
}
@Override
public void setNextReader(LeafReader reader) throws IOException {
public void setNextReader(LeafReaderContext readerContext) throws IOException {
LeafReader reader = readerContext.reader();
DocValuesType type = getDocValuesType(reader, field);
if (type == null || type == DocValuesType.NONE) {
values = DocValues.emptySorted();
docsWithField = new Bits.MatchNoBits(reader.maxDoc());
return ;
}
docsWithField = DocValues.getDocsWithField(reader, field);
switch (type) {
case SORTED:
values = DocValues.getSorted(reader, field);
@ -169,20 +204,36 @@ abstract class CollapsingDocValuesSource<T> {
final SortedSetDocValues sorted = DocValues.getSortedSet(reader, field);
values = DocValues.unwrapSingleton(sorted);
if (values == null) {
values = new SortedDocValues() {
values = new AbstractSortedDocValues() {
private int ord;
@Override
public int getOrd(int docID) {
sorted.setDocument(docID);
int ord = (int) sorted.nextOrd();
if (sorted.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) {
throw new IllegalStateException("failed to collapse " + docID +
", the collapse field must be single valued");
public boolean advanceExact(int target) throws IOException {
if (sorted.advanceExact(target)) {
ord = (int) sorted.nextOrd();
if (sorted.nextOrd() != SortedSetDocValues.NO_MORE_ORDS) {
throw new IllegalStateException("failed to collapse " + target +
", the collapse field must be single valued");
}
return true;
} else {
return false;
}
}
@Override
public int docID() {
return sorted.docID();
}
@Override
public int ordValue() {
return ord;
}
@Override
public BytesRef lookupOrd(int ord) {
public BytesRef lookupOrd(int ord) throws IOException {
return sorted.lookupOrd(ord);
}

View File

@ -18,13 +18,11 @@
*/
package org.apache.lucene.search.grouping;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
import java.util.Collection;
@ -37,7 +35,7 @@ import static org.apache.lucene.search.SortField.Type.SCORE;
* output. The collapsing is done in a single pass by selecting only the top sorted document per collapse key.
* The value used for the collapse key of each group can be found in {@link CollapseTopFieldDocs#collapseValues}.
*/
public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollector<T> {
public final class CollapsingTopDocsCollector<T> extends FirstPassGroupingCollector<T> {
protected final String collapseField;
protected final Sort sort;
@ -47,9 +45,9 @@ public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCol
private float maxScore;
private final boolean trackMaxScore;
private CollapsingTopDocsCollector(String collapseField, Sort sort,
CollapsingTopDocsCollector(GroupSelector<T> groupSelector, String collapseField, Sort sort,
int topN, boolean trackMaxScore) throws IOException {
super(sort, topN);
super(groupSelector, sort, topN);
this.collapseField = collapseField;
this.trackMaxScore = trackMaxScore;
if (trackMaxScore) {
@ -65,7 +63,7 @@ public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCol
* {@link CollapseTopFieldDocs}. The collapsing needs only one pass so we can create the final top docs at the end
* of the first pass.
*/
public CollapseTopFieldDocs getTopDocs() {
public CollapseTopFieldDocs getTopDocs() throws IOException {
Collection<SearchGroup<T>> groups = super.getTopGroups(0, true);
if (groups == null) {
return new CollapseTopFieldDocs(collapseField, totalHitCount, new ScoreDoc[0],
@ -121,57 +119,6 @@ public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCol
totalHitCount++;
}
private static class Numeric extends CollapsingTopDocsCollector<Long> {
private final CollapsingDocValuesSource.Numeric source;
private Numeric(String collapseField, Sort sort, int topN, boolean trackMaxScore) throws IOException {
super(collapseField, sort, topN, trackMaxScore);
source = new CollapsingDocValuesSource.Numeric(collapseField);
}
@Override
protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
super.doSetNextReader(readerContext);
source.setNextReader(readerContext.reader());
}
@Override
protected Long getDocGroupValue(int doc) {
return source.get(doc);
}
@Override
protected Long copyDocGroupValue(Long groupValue, Long reuse) {
return source.copy(groupValue, reuse);
}
}
private static class Keyword extends CollapsingTopDocsCollector<BytesRef> {
private final CollapsingDocValuesSource.Keyword source;
private Keyword(String collapseField, Sort sort, int topN, boolean trackMaxScore) throws IOException {
super(collapseField, sort, topN, trackMaxScore);
source = new CollapsingDocValuesSource.Keyword(collapseField);
}
@Override
protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
super.doSetNextReader(readerContext);
source.setNextReader(readerContext.reader());
}
@Override
protected BytesRef getDocGroupValue(int doc) {
return source.get(doc);
}
@Override
protected BytesRef copyDocGroupValue(BytesRef groupValue, BytesRef reuse) {
return source.copy(groupValue, reuse);
}
}
/**
* Create a collapsing top docs collector on a {@link org.apache.lucene.index.NumericDocValues} field.
* It accepts also {@link org.apache.lucene.index.SortedNumericDocValues} field but
@ -189,7 +136,8 @@ public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCol
*/
public static CollapsingTopDocsCollector<?> createNumeric(String collapseField, Sort sort,
int topN, boolean trackMaxScore) throws IOException {
return new Numeric(collapseField, sort, topN, trackMaxScore);
return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Numeric(collapseField),
collapseField, sort, topN, trackMaxScore);
}
/**
@ -208,7 +156,8 @@ public abstract class CollapsingTopDocsCollector<T> extends FirstPassGroupingCol
*/
public static CollapsingTopDocsCollector<?> createKeyword(String collapseField, Sort sort,
int topN, boolean trackMaxScore) throws IOException {
return new Keyword(collapseField, sort, topN, trackMaxScore);
return new CollapsingTopDocsCollector<>(new CollapsingDocValuesSource.Keyword(collapseField),
collapseField, sort, topN, trackMaxScore);
}
}

View File

@ -82,7 +82,7 @@ public class Version implements Comparable<Version> {
public static final Version V_5_5_0_UNRELEASED = new Version(V_5_5_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
public static final Version V_6_0_0_alpha1_UNRELEASED =
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)

View File

@ -38,7 +38,8 @@ public class BulkItemRequest implements Streamable {
}
protected BulkItemRequest(int id, DocWriteRequest request) {
// NOTE: public for testing only
public BulkItemRequest(int id, DocWriteRequest request) {
this.id = id;
this.request = request;
}
@ -56,13 +57,11 @@ public class BulkItemRequest implements Streamable {
return request.indices()[0];
}
// NOTE: protected for testing only
protected BulkItemResponse getPrimaryResponse() {
BulkItemResponse getPrimaryResponse() {
return primaryResponse;
}
// NOTE: protected for testing only
protected void setPrimaryResponse(BulkItemResponse primaryResponse) {
void setPrimaryResponse(BulkItemResponse primaryResponse) {
this.primaryResponse = primaryResponse;
}

View File

@ -37,6 +37,8 @@ import org.elasticsearch.common.xcontent.StatusToXContentObject;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException;
@ -171,17 +173,34 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
private final String id;
private final Exception cause;
private final RestStatus status;
private final long seqNo;
Failure(String index, String type, String id, Exception cause, RestStatus status) {
/**
* For write failures before operation was assigned a sequence number.
*
* use @{link {@link #Failure(String, String, String, Exception, long)}}
* to record operation sequence no with failure
*/
public Failure(String index, String type, String id, Exception cause) {
this(index, type, id, cause, ExceptionsHelper.status(cause), SequenceNumbersService.UNASSIGNED_SEQ_NO);
}
public Failure(String index, String type, String id, Exception cause, RestStatus status) {
this(index, type, id, cause, status, SequenceNumbersService.UNASSIGNED_SEQ_NO);
}
/** For write failures after operation was assigned a sequence number. */
public Failure(String index, String type, String id, Exception cause, long seqNo) {
this(index, type, id, cause, ExceptionsHelper.status(cause), seqNo);
}
public Failure(String index, String type, String id, Exception cause, RestStatus status, long seqNo) {
this.index = index;
this.type = type;
this.id = id;
this.cause = cause;
this.status = status;
}
public Failure(String index, String type, String id, Exception cause) {
this(index, type, id, cause, ExceptionsHelper.status(cause));
this.seqNo = seqNo;
}
/**
@ -193,6 +212,11 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
id = in.readOptionalString();
cause = in.readException();
status = ExceptionsHelper.status(cause);
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
seqNo = in.readZLong();
} else {
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
}
@Override
@ -201,6 +225,9 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
out.writeString(getType());
out.writeOptionalString(getId());
out.writeException(getCause());
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
out.writeZLong(getSeqNo());
}
}
@ -246,6 +273,15 @@ public class BulkItemResponse implements Streamable, StatusToXContentObject {
return cause;
}
/**
* The operation sequence number generated by primary
* NOTE: {@link SequenceNumbersService#UNASSIGNED_SEQ_NO}
* indicates sequence number was not generated by primary
*/
public long getSeqNo() {
return seqNo;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(INDEX_FIELD, index);

View File

@ -23,6 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteRequest;
@ -43,7 +44,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
@ -65,13 +65,9 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.index.translog.Translog.Location;
import org.elasticsearch.action.bulk.BulkItemResultHolder;
import org.elasticsearch.action.bulk.BulkItemResponse;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import java.util.function.LongSupplier;
/** Performs shard-level bulk (index, delete or update) operations */
@ -113,12 +109,20 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
@Override
public WritePrimaryResult<BulkShardRequest, BulkShardResponse> shardOperationOnPrimary(
BulkShardRequest request, IndexShard primary) throws Exception {
return performOnPrimary(request, primary, updateHelper, threadPool::absoluteTimeInMillis, new ConcreteMappingUpdatePerformer());
}
public static WritePrimaryResult<BulkShardRequest, BulkShardResponse> performOnPrimary(
BulkShardRequest request,
IndexShard primary,
UpdateHelper updateHelper,
LongSupplier nowInMillisSupplier,
MappingUpdatePerformer mappingUpdater) throws Exception {
final IndexMetaData metaData = primary.indexSettings().getIndexMetaData();
Translog.Location location = null;
final MappingUpdatePerformer mappingUpdater = new ConcreteMappingUpdatePerformer();
for (int requestIndex = 0; requestIndex < request.items().length; requestIndex++) {
location = executeBulkItemRequest(metaData, primary, request, location, requestIndex,
updateHelper, threadPool::absoluteTimeInMillis, mappingUpdater);
updateHelper, nowInMillisSupplier, mappingUpdater);
}
BulkItemResponse[] responses = new BulkItemResponse[request.items().length];
BulkItemRequest[] items = request.items();
@ -129,7 +133,6 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return new WritePrimaryResult<>(request, response, location, null, primary, logger);
}
private static BulkItemResultHolder executeIndexRequest(final IndexRequest indexRequest,
final BulkItemRequest bulkItemRequest,
final IndexShard primary,
@ -208,7 +211,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
// Make sure to use request.index() here, if you
// use docWriteRequest.index() it will use the
// concrete index instead of an alias if used!
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(), failure));
new BulkItemResponse.Failure(request.index(), docWriteRequest.type(), docWriteRequest.id(),
failure, operationResult.getSeqNo()));
} else {
assert replicaRequest.getPrimaryResponse() != null : "replica request must have a primary response";
return null;
@ -221,7 +225,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
BulkShardRequest request, Translog.Location location,
int requestIndex, UpdateHelper updateHelper,
LongSupplier nowInMillisSupplier,
final MappingUpdatePerformer mappingUpdater) throws Exception {
final MappingUpdatePerformer mappingUpdater) throws Exception {
final DocWriteRequest itemRequest = request.items()[requestIndex].request();
final DocWriteRequest.OpType opType = itemRequest.opType();
final BulkItemResultHolder responseHolder;
@ -358,58 +362,129 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return new BulkItemResultHolder(updateResponse, updateOperationResult, replicaRequest);
}
static boolean shouldExecuteReplicaItem(final BulkItemRequest request, final int index) {
/** Modes for executing item request on replica depending on corresponding primary execution result */
public enum ReplicaItemExecutionMode {
/**
* When primary execution succeeded
*/
NORMAL,
/**
* When primary execution failed before sequence no was generated
* or primary execution was a noop (only possible when request is originating from pre-6.0 nodes)
*/
NOOP,
/**
* When primary execution failed after sequence no was generated
*/
FAILURE
}
static {
assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false:
"Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" +
" as the current minimum compatible version [" +
Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0";
}
/**
* Determines whether a bulk item request should be executed on the replica.
* @return {@link ReplicaItemExecutionMode#NORMAL} upon normal primary execution with no failures
* {@link ReplicaItemExecutionMode#FAILURE} upon primary execution failure after sequence no generation
* {@link ReplicaItemExecutionMode#NOOP} upon primary execution failure before sequence no generation or
* when primary execution resulted in noop (only possible for write requests from pre-6.0 nodes)
*/
static ReplicaItemExecutionMode replicaItemExecutionMode(final BulkItemRequest request, final int index) {
final BulkItemResponse primaryResponse = request.getPrimaryResponse();
assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request ["+ request.request() +"]";
return primaryResponse.isFailed() == false &&
primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP;
assert primaryResponse != null : "expected primary response to be set for item [" + index + "] request [" + request.request() + "]";
if (primaryResponse.isFailed()) {
return primaryResponse.getFailure().getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO
? ReplicaItemExecutionMode.FAILURE // we have a seq no generated with the failure, replicate as no-op
: ReplicaItemExecutionMode.NOOP; // no seq no generated, ignore replication
} else {
// NOTE: write requests originating from pre-6.0 nodes can send a no-op operation to
// the replica; we ignore replication
// TODO: remove noOp result check from primary response, when pre-6.0 nodes are not supported
// we should return ReplicationItemExecutionMode.NORMAL instead
return primaryResponse.getResponse().getResult() != DocWriteResponse.Result.NOOP
? ReplicaItemExecutionMode.NORMAL // execution successful on primary
: ReplicaItemExecutionMode.NOOP; // ignore replication
}
}
@Override
public WriteReplicaResult<BulkShardRequest> shardOperationOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
final Translog.Location location = performOnReplica(request, replica);
return new WriteReplicaResult<>(request, location, null, replica, logger);
}
public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
Translog.Location location = null;
for (int i = 0; i < request.items().length; i++) {
BulkItemRequest item = request.items()[i];
if (shouldExecuteReplicaItem(item, i)) {
DocWriteRequest docWriteRequest = item.request();
DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
final Engine.Result operationResult;
try {
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica);
break;
case DELETE:
operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica);
break;
default:
throw new IllegalStateException("Unexpected request operation type on replica: "
+ docWriteRequest.opType().getLowercase());
}
if (operationResult.hasFailure()) {
// check if any transient write operation failures should be bubbled up
Exception failure = operationResult.getFailure();
assert failure instanceof VersionConflictEngineException
|| failure instanceof MapperParsingException
: "expected any one of [version conflict, mapper parsing, engine closed, index shard closed]" +
" failures. got " + failure;
if (!TransportActions.isShardNotAvailableException(failure)) {
throw failure;
final Engine.Result operationResult;
DocWriteRequest docWriteRequest = item.request();
try {
switch (replicaItemExecutionMode(item, i)) {
case NORMAL:
final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
switch (docWriteRequest.opType()) {
case CREATE:
case INDEX:
operationResult = executeIndexRequestOnReplica(primaryResponse, (IndexRequest) docWriteRequest, replica);
break;
case DELETE:
operationResult = executeDeleteRequestOnReplica(primaryResponse, (DeleteRequest) docWriteRequest, replica);
break;
default:
throw new IllegalStateException("Unexpected request operation type on replica: "
+ docWriteRequest.opType().getLowercase());
}
} else {
location = locationToSync(location, operationResult.getTranslogLocation());
}
} catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure
// so we will fail the shard
if (!TransportActions.isShardNotAvailableException(e)) {
throw e;
}
assert operationResult != null : "operation result must never be null when primary response has no failure";
location = syncOperationResultOrThrow(operationResult, location);
break;
case NOOP:
break;
case FAILURE:
final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure();
assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned";
operationResult = executeFailureNoOpOnReplica(failure, replica);
assert operationResult != null : "operation result must never be null when primary response has no failure";
location = syncOperationResultOrThrow(operationResult, location);
break;
default:
throw new IllegalStateException("illegal replica item execution mode for: " + item.request());
}
} catch (Exception e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure
// so we will fail the shard
if (!TransportActions.isShardNotAvailableException(e)) {
throw e;
}
}
}
return new WriteReplicaResult<>(request, location, null, replica, logger);
return location;
}
/** Syncs operation result to the translog or throws a shard not available failure */
private static Translog.Location syncOperationResultOrThrow(final Engine.Result operationResult,
final Translog.Location currentLocation) throws Exception {
final Translog.Location location;
if (operationResult.hasFailure()) {
// check if any transient write operation failures should be bubbled up
Exception failure = operationResult.getFailure();
assert failure instanceof MapperParsingException : "expected mapper parsing failures. got " + failure;
if (!TransportActions.isShardNotAvailableException(failure)) {
throw failure;
} else {
location = currentLocation;
}
} else {
location = locationToSync(currentLocation, operationResult.getTranslogLocation());
}
return location;
}
private static Translog.Location locationToSync(Translog.Location current,
@ -429,7 +504,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
public static Engine.IndexResult executeIndexRequestOnReplica(
private static Engine.IndexResult executeIndexRequestOnReplica(
DocWriteResponse primaryResponse,
IndexRequest request,
IndexShard replica) throws IOException {
@ -472,7 +547,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
/** Utility method to prepare an index operation on primary shards */
static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
final SourceToParse sourceToParse =
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(),
request.id(), request.source(), request.getContentType())
@ -482,8 +557,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
}
/** Executes index operation on primary shard after updates mapping if dynamic mappings are found */
public static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
MappingUpdatePerformer mappingUpdater) throws Exception {
static Engine.IndexResult executeIndexRequestOnPrimary(IndexRequest request, IndexShard primary,
MappingUpdatePerformer mappingUpdater) throws Exception {
// Update the mappings if parsing the documents includes new dynamic updates
final Engine.Index preUpdateOperation;
final Mapping mappingUpdate;
@ -533,6 +608,12 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
return replica.delete(delete);
}
private static Engine.NoOpResult executeFailureNoOpOnReplica(BulkItemResponse.Failure primaryFailure, IndexShard replica) throws IOException {
final Engine.NoOp noOp = replica.prepareMarkingSeqNoAsNoOp(
primaryFailure.getSeqNo(), primaryFailure.getMessage());
return replica.markSeqNoAsNoOp(noOp);
}
class ConcreteMappingUpdatePerformer implements MappingUpdatePerformer {
public void updateMappings(final Mapping update, final ShardId shardId,

View File

@ -328,15 +328,15 @@ public final class SearchPhaseController extends AbstractComponent {
continue;
}
FetchSearchResult fetchResult = searchResultProvider.fetchResult();
int fetchResultIndex = fetchResult.counterGetAndIncrement();
if (fetchResultIndex < fetchResult.hits().internalHits().length) {
SearchHit hit = fetchResult.hits().internalHits()[fetchResultIndex];
CompletionSuggestion.Entry.Option suggestOption =
suggestionOptions.get(scoreDocIndex - currentOffset);
hit.score(shardDoc.score);
hit.shard(fetchResult.getSearchShardTarget());
suggestOption.setHit(hit);
}
final int index = fetchResult.counterGetAndIncrement();
assert index < fetchResult.hits().internalHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().internalHits().length;
SearchHit hit = fetchResult.hits().internalHits()[index];
CompletionSuggestion.Entry.Option suggestOption =
suggestionOptions.get(scoreDocIndex - currentOffset);
hit.score(shardDoc.score);
hit.shard(fetchResult.getSearchShardTarget());
suggestOption.setHit(hit);
}
currentOffset += suggestionOptions.size();
}
@ -380,20 +380,20 @@ public final class SearchPhaseController extends AbstractComponent {
continue;
}
FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
int index = fetchResult.counterGetAndIncrement();
if (index < fetchResult.hits().internalHits().length) {
SearchHit searchHit = fetchResult.hits().internalHits()[index];
searchHit.score(shardDoc.score);
searchHit.shard(fetchResult.getSearchShardTarget());
if (sorted) {
FieldDoc fieldDoc = (FieldDoc) shardDoc;
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.sortValueFormats);
if (sortScoreIndex != -1) {
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
}
final int index = fetchResult.counterGetAndIncrement();
assert index < fetchResult.hits().internalHits().length : "not enough hits fetched. index [" + index + "] length: "
+ fetchResult.hits().internalHits().length;
SearchHit searchHit = fetchResult.hits().internalHits()[index];
searchHit.score(shardDoc.score);
searchHit.shard(fetchResult.getSearchShardTarget());
if (sorted) {
FieldDoc fieldDoc = (FieldDoc) shardDoc;
searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.sortValueFormats);
if (sortScoreIndex != -1) {
searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
}
hits.add(searchHit);
}
hits.add(searchHit);
}
}
return new SearchHits(hits.toArray(new SearchHit[hits.size()]), reducedQueryPhase.totalHits,

View File

@ -94,8 +94,10 @@ public abstract class TransportWriteAction<
/**
* Result of taking the action on the primary.
*
* NOTE: public for testing
*/
protected static class WritePrimaryResult<ReplicaRequest extends ReplicatedWriteRequest<ReplicaRequest>,
public static class WritePrimaryResult<ReplicaRequest extends ReplicatedWriteRequest<ReplicaRequest>,
Response extends ReplicationResponse & WriteResponse> extends PrimaryResult<ReplicaRequest, Response>
implements RespondingWriteResult {
boolean finishedAsyncActions;

View File

@ -18,13 +18,13 @@
*/
package org.elasticsearch.common.geo;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.spatial.util.MortonEncoder;
import org.apache.lucene.util.BitUtil;
import java.util.ArrayList;
import java.util.Collection;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.util.BitUtil;
/**
* Utilities for converting to/from the GeoHash standard
*
@ -42,19 +42,35 @@ public class GeoHashUtils {
/** maximum precision for geohash strings */
public static final int PRECISION = 12;
private static final short MORTON_OFFSET = (GeoPointField.BITS<<1) - (PRECISION*5);
/** number of bits used for quantizing latitude and longitude values */
public static final short BITS = 31;
/** scaling factors to convert lat/lon into unsigned space */
private static final double LAT_SCALE = (0x1L<<BITS)/180.0D;
private static final double LON_SCALE = (0x1L<<BITS)/360.0D;
private static final short MORTON_OFFSET = (BITS<<1) - (PRECISION*5);
// No instance:
private GeoHashUtils() {
}
/*************************
* 31 bit encoding utils *
*************************/
public static long encodeLatLon(final double lat, final double lon) {
long result = MortonEncoder.encode(lat, lon);
if (result == 0xFFFFFFFFFFFFFFFFL) {
return result & 0xC000000000000000L;
}
return result >>> 2;
}
/**
* Encode lon/lat to the geohash based long format (lon/lat interleaved, 4 least significant bits = level)
*/
public static final long longEncode(final double lon, final double lat, final int level) {
// shift to appropriate level
final short msf = (short)(((12 - level) * 5) + MORTON_OFFSET);
return ((BitUtil.flipFlop(GeoPointField.encodeLatLon(lat, lon)) >>> msf) << 4) | level;
return ((BitUtil.flipFlop(encodeLatLon(lat, lon)) >>> msf) << 4) | level;
}
/**
@ -120,7 +136,7 @@ public class GeoHashUtils {
*/
public static final String stringEncode(final double lon, final double lat, final int level) {
// convert to geohashlong
final long ghLong = fromMorton(GeoPointField.encodeLatLon(lat, lon), level);
final long ghLong = fromMorton(encodeLatLon(lat, lon), level);
return stringEncode(ghLong);
}
@ -141,7 +157,7 @@ public class GeoHashUtils {
StringBuilder geoHash = new StringBuilder();
short precision = 0;
final short msf = (GeoPointField.BITS<<1)-5;
final short msf = (BITS<<1)-5;
long mask = 31L<<msf;
do {
geoHash.append(BASE_32[(int)((mask & hashedVal)>>>(msf-(precision*5)))]);
@ -303,13 +319,31 @@ public class GeoHashUtils {
return neighbors;
}
/** decode longitude value from morton encoded geo point */
public static final double decodeLongitude(final long hash) {
return unscaleLon(BitUtil.deinterleave(hash));
}
/** decode latitude value from morton encoded geo point */
public static final double decodeLatitude(final long hash) {
return unscaleLat(BitUtil.deinterleave(hash >>> 1));
}
private static double unscaleLon(final long val) {
return (val / LON_SCALE) - 180;
}
private static double unscaleLat(final long val) {
return (val / LAT_SCALE) - 90;
}
/** returns the latitude value from the string based geohash */
public static final double decodeLatitude(final String geohash) {
return GeoPointField.decodeLatitude(mortonEncode(geohash));
return decodeLatitude(mortonEncode(geohash));
}
/** returns the latitude value from the string based geohash */
public static final double decodeLongitude(final String geohash) {
return GeoPointField.decodeLongitude(mortonEncode(geohash));
return decodeLongitude(mortonEncode(geohash));
}
}

View File

@ -23,7 +23,6 @@ import org.apache.lucene.document.LatLonDocValuesField;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.geo.GeoEncodingUtils;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.util.BitUtil;
import org.apache.lucene.util.BytesRef;
@ -87,8 +86,8 @@ public final class GeoPoint {
}
public GeoPoint resetFromIndexHash(long hash) {
lon = GeoPointField.decodeLongitude(hash);
lat = GeoPointField.decodeLatitude(hash);
lon = GeoHashUtils.decodeLongitude(hash);
lat = GeoHashUtils.decodeLatitude(hash);
return this;
}
@ -112,7 +111,7 @@ public final class GeoPoint {
public GeoPoint resetFromGeoHash(String geohash) {
final long hash = mortonEncode(geohash);
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
return this.reset(GeoHashUtils.decodeLatitude(hash), GeoHashUtils.decodeLongitude(hash));
}
public GeoPoint resetFromGeoHash(long geohashLong) {

View File

@ -22,7 +22,6 @@ package org.elasticsearch.common.geo;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.SloppyMath;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.unit.DistanceUnit;
@ -511,35 +510,40 @@ public class GeoUtils {
final GeoPoint... fromPoints) {
final GeoPointValues singleValues = FieldData.unwrapSingleton(geoPointValues);
if (singleValues != null && fromPoints.length == 1) {
final Bits docsWithField = FieldData.unwrapSingletonBits(geoPointValues);
return FieldData.singleton(new NumericDoubleValues() {
@Override
public double get(int docID) {
if (docsWithField != null && !docsWithField.get(docID)) {
return 0d;
}
final GeoPoint to = singleValues.get(docID);
public boolean advanceExact(int doc) throws IOException {
return singleValues.advanceExact(doc);
}
@Override
public double doubleValue() throws IOException {
final GeoPoint from = fromPoints[0];
final GeoPoint to = singleValues.geoPointValue();
return distance.calculate(from.lat(), from.lon(), to.lat(), to.lon(), unit);
}
}, docsWithField);
});
} else {
return new SortingNumericDoubleValues() {
@Override
public void setDocument(int doc) {
geoPointValues.setDocument(doc);
resize(geoPointValues.count() * fromPoints.length);
int v = 0;
for (GeoPoint from : fromPoints) {
for (int i = 0; i < geoPointValues.count(); ++i) {
final GeoPoint point = geoPointValues.valueAt(i);
values[v] = distance.calculate(from.lat(), from.lon(), point.lat(), point.lon(), unit);
v++;
public boolean advanceExact(int target) throws IOException {
if (geoPointValues.advanceExact(target)) {
resize(geoPointValues.docValueCount() * fromPoints.length);
int v = 0;
for (int i = 0; i < geoPointValues.docValueCount(); ++i) {
final GeoPoint point = geoPointValues.nextValue();
for (GeoPoint from : fromPoints) {
values[v] = distance.calculate(from.lat(), from.lon(), point.lat(), point.lon(), unit);
v++;
}
}
sort();
return true;
} else {
return false;
}
sort();
}
};
}

View File

@ -51,14 +51,14 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.grouping.CollapseTopFieldDocs;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
@ -89,9 +89,9 @@ import java.util.Map;
import java.util.Objects;
public class Lucene {
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene54";
public static final String LATEST_DOC_VALUES_FORMAT = "Lucene70";
public static final String LATEST_POSTINGS_FORMAT = "Lucene50";
public static final String LATEST_CODEC = "Lucene62";
public static final String LATEST_CODEC = "Lucene70";
static {
Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class);

View File

@ -19,8 +19,8 @@
package org.elasticsearch.common.lucene;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
@ -46,8 +46,8 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public final class ShardCoreKeyMap {
private final Map<Object, ShardId> coreKeyToShard;
private final Map<String, Set<Object>> indexToCoreKey;
private final Map<IndexReader.CacheKey, ShardId> coreKeyToShard;
private final Map<String, Set<IndexReader.CacheKey>> indexToCoreKey;
public ShardCoreKeyMap() {
coreKeyToShard = new ConcurrentHashMap<>();
@ -63,7 +63,11 @@ public final class ShardCoreKeyMap {
if (shardId == null) {
throw new IllegalArgumentException("Could not extract shard id from " + reader);
}
final Object coreKey = reader.getCoreCacheKey();
final IndexReader.CacheHelper cacheHelper = reader.getCoreCacheHelper();
if (cacheHelper == null) {
throw new IllegalArgumentException("Reader " + reader + " does not support caching");
}
final IndexReader.CacheKey coreKey = cacheHelper.getKey();
if (coreKeyToShard.containsKey(coreKey)) {
// Do this check before entering the synchronized block in order to
@ -75,18 +79,18 @@ public final class ShardCoreKeyMap {
final String index = shardId.getIndexName();
synchronized (this) {
if (coreKeyToShard.containsKey(coreKey) == false) {
Set<Object> objects = indexToCoreKey.get(index);
Set<IndexReader.CacheKey> objects = indexToCoreKey.get(index);
if (objects == null) {
objects = new HashSet<>();
indexToCoreKey.put(index, objects);
}
final boolean added = objects.add(coreKey);
assert added;
CoreClosedListener listener = ownerCoreCacheKey -> {
IndexReader.ClosedListener listener = ownerCoreCacheKey -> {
assert coreKey == ownerCoreCacheKey;
synchronized (ShardCoreKeyMap.this) {
coreKeyToShard.remove(ownerCoreCacheKey);
final Set<Object> coreKeys = indexToCoreKey.get(index);
final Set<IndexReader.CacheKey> coreKeys = indexToCoreKey.get(index);
final boolean removed = coreKeys.remove(coreKey);
assert removed;
if (coreKeys.isEmpty()) {
@ -96,7 +100,7 @@ public final class ShardCoreKeyMap {
};
boolean addedListener = false;
try {
reader.addCoreClosedListener(listener);
cacheHelper.addClosedListener(listener);
addedListener = true;
// Only add the core key to the map as a last operation so that
@ -131,7 +135,7 @@ public final class ShardCoreKeyMap {
* Get the set of core cache keys associated with the given index.
*/
public synchronized Set<Object> getCoreKeysForIndex(String index) {
final Set<Object> objects = indexToCoreKey.get(index);
final Set<IndexReader.CacheKey> objects = indexToCoreKey.get(index);
if (objects == null) {
return Collections.emptySet();
}
@ -154,9 +158,9 @@ public final class ShardCoreKeyMap {
if (assertionsEnabled == false) {
throw new AssertionError("only run this if assertions are enabled");
}
Collection<Set<Object>> values = indexToCoreKey.values();
Collection<Set<IndexReader.CacheKey>> values = indexToCoreKey.values();
int size = 0;
for (Set<Object> value : values) {
for (Set<IndexReader.CacheKey> value : values) {
size += value.size();
}
return size == coreKeyToShard.size();

View File

@ -105,27 +105,17 @@ public final class AllTermQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
if (needsScores == false) {
return new TermQuery(term).createWeight(searcher, needsScores);
return new TermQuery(term).createWeight(searcher, needsScores, boost);
}
final TermContext termStates = TermContext.build(searcher.getTopReaderContext(), term);
final CollectionStatistics collectionStats = searcher.collectionStatistics(term.field());
final TermStatistics termStats = searcher.termStatistics(term, termStates);
final Similarity similarity = searcher.getSimilarity(needsScores);
final SimWeight stats = similarity.computeWeight(collectionStats, termStats);
final SimWeight stats = similarity.computeWeight(boost, collectionStats, termStats);
return new Weight(this) {
@Override
public float getValueForNormalization() throws IOException {
return stats.getValueForNormalization();
}
@Override
public void normalize(float norm, float topLevelBoost) {
stats.normalize(norm, topLevelBoost);
}
@Override
public void extractTerms(Set<Term> terms) {
terms.add(term);

View File

@ -49,6 +49,12 @@ public final class ElasticsearchDirectoryReader extends FilterDirectoryReader {
return this.shardId;
}
@Override
public CacheHelper getReaderCacheHelper() {
// safe to delegate since this reader does not alter the index
return in.getReaderCacheHelper();
}
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return new ElasticsearchDirectoryReader(in, wrapper, shardId);
@ -84,14 +90,17 @@ public final class ElasticsearchDirectoryReader extends FilterDirectoryReader {
* @throws IllegalArgumentException if the reader doesn't contain an {@link ElasticsearchDirectoryReader} in it's hierarchy
*/
@SuppressForbidden(reason = "This is the only sane way to add a ReaderClosedListener")
public static void addReaderCloseListener(DirectoryReader reader, IndexReader.ReaderClosedListener listener) {
public static void addReaderCloseListener(DirectoryReader reader, IndexReader.ClosedListener listener) {
ElasticsearchDirectoryReader elasticsearchDirectoryReader = getElasticsearchDirectoryReader(reader);
if (elasticsearchDirectoryReader != null) {
assert reader.getCoreCacheKey() == elasticsearchDirectoryReader.getCoreCacheKey();
elasticsearchDirectoryReader.addReaderClosedListener(listener);
return;
if (elasticsearchDirectoryReader == null) {
throw new IllegalArgumentException("Can't install close listener reader is not an ElasticsearchDirectoryReader/ElasticsearchLeafReader");
}
throw new IllegalArgumentException("Can't install close listener reader is not an ElasticsearchDirectoryReader/ElasticsearchLeafReader");
IndexReader.CacheHelper cacheHelper = elasticsearchDirectoryReader.getReaderCacheHelper();
if (cacheHelper == null) {
throw new IllegalArgumentException("Reader " + elasticsearchDirectoryReader + " does not support caching");
}
assert cacheHelper.getKey() == reader.getReaderCacheHelper().getKey();
cacheHelper.addClosedListener(listener);
}
/**

View File

@ -49,8 +49,13 @@ public final class ElasticsearchLeafReader extends FilterLeafReader {
}
@Override
public Object getCoreCacheKey() {
return in.getCoreCacheKey();
public CacheHelper getCoreCacheHelper() {
return in.getCoreCacheHelper();
}
@Override
public CacheHelper getReaderCacheHelper() {
return in.getReaderCacheHelper();
}
public static ElasticsearchLeafReader getElasticsearchLeafReader(LeafReader reader) {

View File

@ -121,7 +121,6 @@ public class Queries {
if (isNegativeQuery(q)) {
BooleanQuery bq = (BooleanQuery) q;
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setDisableCoord(bq.isCoordDisabled());
for (BooleanClause clause : bq) {
builder.add(clause);
}
@ -154,7 +153,6 @@ public class Queries {
int msm = calculateMinShouldMatch(optionalClauses, minimumShouldMatch);
if (0 < msm) {
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setDisableCoord(query.isCoordDisabled());
for (BooleanClause clause : query) {
builder.add(clause);
}
@ -170,10 +168,7 @@ public class Queries {
* otherwise return the original query.
*/
public static Query maybeApplyMinimumShouldMatch(Query query, @Nullable String minimumShouldMatch) {
// If the coordination factor is disabled on a boolean query we don't apply the minimum should match.
// This is done to make sure that the minimum_should_match doesn't get applied when there is only one word
// and multiple variations of the same word in the query (synonyms for instance).
if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
if (query instanceof BooleanQuery) {
return applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
} else if (query instanceof ExtendedCommonTermsQuery) {
((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch);

View File

@ -62,7 +62,7 @@ public class FieldValueFactorFunction extends ScoreFunction {
public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) {
final SortedNumericDoubleValues values;
if(indexFieldData == null) {
values = FieldData.emptySortedNumericDoubles(ctx.reader().maxDoc());
values = FieldData.emptySortedNumericDoubles();
} else {
values = this.indexFieldData.load(ctx).getDoubleValues();
}
@ -70,16 +70,16 @@ public class FieldValueFactorFunction extends ScoreFunction {
return new LeafScoreFunction() {
@Override
public double score(int docId, float subQueryScore) {
values.setDocument(docId);
final int numValues = values.count();
public double score(int docId, float subQueryScore) throws IOException {
double value;
if (numValues > 0) {
value = values.valueAt(0);
} else if (missing != null) {
value = missing;
if (values.advanceExact(docId)) {
value = values.nextValue();
} else {
throw new ElasticsearchException("Missing value for field [" + field + "]");
if (missing != null) {
value = missing;
} else {
throw new ElasticsearchException("Missing value for field [" + field + "]");
}
}
double val = value * boostFactor;
double result = modifier.apply(val);
@ -91,7 +91,7 @@ public class FieldValueFactorFunction extends ScoreFunction {
}
@Override
public Explanation explainScore(int docId, Explanation subQueryScore) {
public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException {
String modifierStr = modifier != null ? modifier.toString() : "";
String defaultStr = missing != null ? "?:" + missing : "";
double score = score(docId, subQueryScore.getValue());

View File

@ -135,9 +135,9 @@ public class FiltersFunctionScoreQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
if (needsScores == false && minScore == null) {
return subQuery.createWeight(searcher, needsScores);
return subQuery.createWeight(searcher, needsScores, boost);
}
boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE;
@ -146,7 +146,7 @@ public class FiltersFunctionScoreQuery extends Query {
subQueryNeedsScores |= filterFunctions[i].function.needsScores();
filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false);
}
Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores);
Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores, boost);
return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores);
}
@ -168,16 +168,6 @@ public class FiltersFunctionScoreQuery extends Query {
subQueryWeight.extractTerms(terms);
}
@Override
public float getValueForNormalization() throws IOException {
return subQueryWeight.getValueForNormalization();
}
@Override
public void normalize(float norm, float boost) {
subQueryWeight.normalize(norm, boost);
}
private FiltersFunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context);
if (subQueryScorer == null) {
@ -281,7 +271,7 @@ public class FiltersFunctionScoreQuery extends Query {
return scoreCombiner.combine(subQueryScore, factor, maxBoost);
}
protected double computeScore(int docId, float subQueryScore) {
protected double computeScore(int docId, float subQueryScore) throws IOException {
double factor = 1d;
switch(scoreMode) {
case FIRST:

View File

@ -91,16 +91,16 @@ public class FunctionScoreQuery extends Query {
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
if (needsScores == false && minScore == null) {
return subQuery.createWeight(searcher, needsScores);
return subQuery.createWeight(searcher, needsScores, boost);
}
boolean subQueryNeedsScores =
combineFunction != CombineFunction.REPLACE // if we don't replace we need the original score
|| function == null // when the function is null, we just multiply the score, so we need it
|| function.needsScores(); // some scripts can replace with a script that returns eg. 1/_score
Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores);
Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores, boost);
return new CustomBoostFactorWeight(this, subQueryWeight, subQueryNeedsScores);
}
@ -120,16 +120,6 @@ public class FunctionScoreQuery extends Query {
subQueryWeight.extractTerms(terms);
}
@Override
public float getValueForNormalization() throws IOException {
return subQueryWeight.getValueForNormalization();
}
@Override
public void normalize(float norm, float boost) {
subQueryWeight.normalize(norm, boost);
}
private FunctionFactorScorer functionScorer(LeafReaderContext context) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context);
if (subQueryScorer == null) {

View File

@ -26,7 +26,7 @@ import java.io.IOException;
/** Per-leaf {@link ScoreFunction}. */
public abstract class LeafScoreFunction {
public abstract double score(int docId, float subQueryScore);
public abstract double score(int docId, float subQueryScore) throws IOException;
public abstract Explanation explainScore(int docId, Explanation subQueryScore) throws IOException;

View File

@ -25,6 +25,7 @@ import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import java.io.IOException;
import java.util.Objects;
/**
@ -68,14 +69,16 @@ public class RandomScoreFunction extends ScoreFunction {
return new LeafScoreFunction() {
@Override
public double score(int docId, float subQueryScore) {
uidByteData.setDocument(docId);
int hash = StringHelper.murmurhash3_x86_32(uidByteData.valueAt(0), saltedSeed);
public double score(int docId, float subQueryScore) throws IOException {
if (uidByteData.advanceExact(docId) == false) {
throw new AssertionError("Document without a _uid");
}
int hash = StringHelper.murmurhash3_x86_32(uidByteData.nextValue(), saltedSeed);
return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0
}
@Override
public Explanation explainScore(int docId, Explanation subQueryScore) {
public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException {
return Explanation.match(
CombineFunction.toFloat(score(docId, subQueryScore.getValue())),
"random score function (seed: " + originalSeed + ")");

View File

@ -52,7 +52,7 @@ public class WeightFactorFunction extends ScoreFunction {
final LeafScoreFunction leafFunction = scoreFunction.getLeafScoreFunction(ctx);
return new LeafScoreFunction() {
@Override
public double score(int docId, float subQueryScore) {
public double score(int docId, float subQueryScore) throws IOException {
return leafFunction.score(docId, subQueryScore) * getWeight();
}

View File

@ -52,12 +52,7 @@ final class PerThreadIDVersionAndSeqNoLookup {
/** terms enum for uid field */
private final TermsEnum termsEnum;
/** _version data */
private final NumericDocValues versions;
/** _seq_no data */
private final NumericDocValues seqNos;
/** _primary_term data */
private final NumericDocValues primaryTerms;
/** Reused for iteration (when the term exists) */
private PostingsEnum docsEnum;
@ -72,30 +67,33 @@ final class PerThreadIDVersionAndSeqNoLookup {
Terms terms = fields.terms(UidFieldMapper.NAME);
termsEnum = terms.iterator();
if (termsEnum == null) {
throw new IllegalArgumentException("reader misses the [" + UidFieldMapper.NAME +
"] field");
throw new IllegalArgumentException("reader misses the [" + UidFieldMapper.NAME + "] field");
}
versions = reader.getNumericDocValues(VersionFieldMapper.NAME);
if (versions == null) {
throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME +
"] field");
if (reader.getNumericDocValues(VersionFieldMapper.NAME) == null) {
throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field");
}
seqNos = reader.getNumericDocValues(SeqNoFieldMapper.NAME);
primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
Object readerKey = null;
assert (readerKey = reader.getCoreCacheKey()) != null;
assert (readerKey = reader.getCoreCacheHelper().getKey()) != null;
this.readerKey = readerKey;
}
/** Return null if id is not found. */
public DocIdAndVersion lookupVersion(BytesRef id, Bits liveDocs, LeafReaderContext context)
throws IOException {
assert context.reader().getCoreCacheKey().equals(readerKey) :
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, liveDocs);
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
return new DocIdAndVersion(docID, versions.get(docID), context);
final NumericDocValues versions = context.reader().getNumericDocValues(VersionFieldMapper.NAME);
if (versions == null) {
throw new IllegalArgumentException("reader misses the [" + VersionFieldMapper.NAME + "] field");
}
if (versions.advanceExact(docID) == false) {
throw new IllegalArgumentException("Document [" + docID + "] misses the [" + VersionFieldMapper.NAME + "] field");
}
return new DocIdAndVersion(docID, versions.longValue(), context);
} else {
return null;
}
@ -124,11 +122,18 @@ final class PerThreadIDVersionAndSeqNoLookup {
/** Return null if id is not found. */
DocIdAndSeqNo lookupSeqNo(BytesRef id, Bits liveDocs, LeafReaderContext context) throws IOException {
assert context.reader().getCoreCacheKey().equals(readerKey) :
assert context.reader().getCoreCacheHelper().getKey().equals(readerKey) :
"context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, liveDocs);
if (docID != DocIdSetIterator.NO_MORE_DOCS) {
return new DocIdAndSeqNo(docID, seqNos == null ? SequenceNumbersService.UNASSIGNED_SEQ_NO : seqNos.get(docID), context);
NumericDocValues seqNos = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
long seqNo;
if (seqNos != null && seqNos.advanceExact(docID)) {
seqNo = seqNos.longValue();
} else {
seqNo = SequenceNumbersService.UNASSIGNED_SEQ_NO;
}
return new DocIdAndSeqNo(docID, seqNo, context);
} else {
return null;
}
@ -139,7 +144,12 @@ final class PerThreadIDVersionAndSeqNoLookup {
*
* Note that 0 is an illegal primary term. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
**/
long lookUpPrimaryTerm(int docID) throws IOException {
return primaryTerms == null ? 0 : primaryTerms.get(docID);
long lookUpPrimaryTerm(int docID, LeafReader reader) throws IOException {
NumericDocValues primaryTerms = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
if (primaryTerms != null && primaryTerms.advanceExact(docID)) {
return primaryTerms.longValue();
} else {
return 0;
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.common.lucene.uid;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReader.CoreClosedListener;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.CloseableThreadLocal;
@ -41,7 +40,7 @@ public final class VersionsAndSeqNoResolver {
ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
// Evict this reader from lookupStates once it's closed:
private static final CoreClosedListener removeLookupState = key -> {
private static final IndexReader.ClosedListener removeLookupState = key -> {
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.remove(key);
if (ctl != null) {
ctl.close();
@ -49,15 +48,15 @@ public final class VersionsAndSeqNoResolver {
};
private static PerThreadIDVersionAndSeqNoLookup getLookupState(LeafReader reader) throws IOException {
Object key = reader.getCoreCacheKey();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.get(key);
IndexReader.CacheHelper cacheHelper = reader.getCoreCacheHelper();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> ctl = lookupStates.get(cacheHelper.getKey());
if (ctl == null) {
// First time we are seeing this reader's core; make a new CTL:
ctl = new CloseableThreadLocal<>();
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> other = lookupStates.putIfAbsent(key, ctl);
CloseableThreadLocal<PerThreadIDVersionAndSeqNoLookup> other = lookupStates.putIfAbsent(cacheHelper.getKey(), ctl);
if (other == null) {
// Our CTL won, we must remove it when the core is closed:
reader.addCoreClosedListener(removeLookupState);
cacheHelper.addClosedListener(removeLookupState);
} else {
// Another thread beat us to it: just use their CTL:
ctl = other;
@ -161,7 +160,7 @@ public final class VersionsAndSeqNoResolver {
public static long loadPrimaryTerm(DocIdAndSeqNo docIdAndSeqNo) throws IOException {
LeafReader leaf = docIdAndSeqNo.context.reader();
PerThreadIDVersionAndSeqNoLookup lookup = getLookupState(leaf);
long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId);
long result = lookup.lookUpPrimaryTerm(docIdAndSeqNo.docId, leaf);
assert result > 0 : "should always resolve a primary term for a resolved sequence number. primary_term [" + result + "]"
+ " docId [" + docIdAndSeqNo.docId + "] seqNo [" + docIdAndSeqNo.seqNo + "]";
return result;

View File

@ -150,7 +150,8 @@ public class ElectMasterService extends AbstractComponent {
}
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
return minimumMasterNodes < 1 || countMasterNodes(nodes) >= minimumMasterNodes;
final int count = countMasterNodes(nodes);
return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
}
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {

View File

@ -21,8 +21,8 @@ package org.elasticsearch.index.cache.bitset;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.IndexSearcher;
@ -71,13 +71,13 @@ import java.util.concurrent.Executor;
* and require that it should always be around should use this cache, otherwise the
* {@link org.elasticsearch.index.cache.query.QueryCache} should be used instead.
*/
public final class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener<Object, Cache<Query, BitsetFilterCache.Value>>, Closeable {
public final class BitsetFilterCache extends AbstractIndexComponent implements IndexReader.ClosedListener, RemovalListener<IndexReader.CacheKey, Cache<Query, BitsetFilterCache.Value>>, Closeable {
public static final Setting<Boolean> INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING =
Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope);
private final boolean loadRandomAccessFiltersEagerly;
private final Cache<Object, Cache<Query, Value>> loadedFilters;
private final Cache<IndexReader.CacheKey, Cache<Query, Value>> loadedFilters;
private final Listener listener;
public BitsetFilterCache(IndexSettings indexSettings, Listener listener) {
@ -86,7 +86,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
throw new IllegalArgumentException("listener must not be null");
}
this.loadRandomAccessFiltersEagerly = this.indexSettings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING);
this.loadedFilters = CacheBuilder.<Object, Cache<Query, Value>>builder().removalListener(this).build();
this.loadedFilters = CacheBuilder.<IndexReader.CacheKey, Cache<Query, Value>>builder().removalListener(this).build();
this.listener = listener;
}
@ -100,7 +100,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
@Override
public void onClose(Object ownerCoreCacheKey) {
public void onClose(IndexReader.CacheKey ownerCoreCacheKey) {
loadedFilters.invalidate(ownerCoreCacheKey);
}
@ -115,7 +115,11 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
final Object coreCacheReader = context.reader().getCoreCacheKey();
final IndexReader.CacheHelper cacheHelper = context.reader().getCoreCacheHelper();
if (cacheHelper == null) {
throw new IllegalArgumentException("Reader " + context.reader() + " does not support caching");
}
final IndexReader.CacheKey coreCacheReader = cacheHelper.getKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null // can't require it because of the percolator
&& indexSettings.getIndex().equals(shardId.getIndex()) == false) {
@ -124,7 +128,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
+ " with cache of index " + indexSettings.getIndex());
}
Cache<Query, Value> filterToFbs = loadedFilters.computeIfAbsent(coreCacheReader, key -> {
context.reader().addCoreClosedListener(BitsetFilterCache.this);
cacheHelper.addClosedListener(BitsetFilterCache.this);
return CacheBuilder.<Query, Value>builder().build();
});
@ -148,7 +152,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
@Override
public void onRemoval(RemovalNotification<Object, Cache<Query, Value>> notification) {
public void onRemoval(RemovalNotification<IndexReader.CacheKey, Cache<Query, Value>> notification) {
if (notification.getKey() == null) {
return;
}
@ -272,7 +276,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
Cache<Object, Cache<Query, Value>> getLoadedFilters() {
Cache<IndexReader.CacheKey, Cache<Query, Value>> getLoadedFilters() {
return loadedFilters;
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.index.codec;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode;
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
import org.apache.lucene.codecs.lucene70.Lucene70Codec;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.index.mapper.MapperService;
@ -47,8 +47,8 @@ public class CodecService {
public CodecService(@Nullable MapperService mapperService, Logger logger) {
final MapBuilder<String, Codec> codecs = MapBuilder.<String, Codec>newMapBuilder();
if (mapperService == null) {
codecs.put(DEFAULT_CODEC, new Lucene62Codec());
codecs.put(BEST_COMPRESSION_CODEC, new Lucene62Codec(Mode.BEST_COMPRESSION));
codecs.put(DEFAULT_CODEC, new Lucene70Codec());
codecs.put(BEST_COMPRESSION_CODEC, new Lucene70Codec(Mode.BEST_COMPRESSION));
} else {
codecs.put(DEFAULT_CODEC,
new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger));

View File

@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat;
import org.apache.lucene.codecs.lucene62.Lucene62Codec;
import org.apache.lucene.codecs.lucene70.Lucene70Codec;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.mapper.CompletionFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -38,7 +38,7 @@ import org.elasticsearch.index.mapper.MapperService;
* configured for a specific field the default postings format is used.
*/
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
public class PerFieldMappingPostingFormatCodec extends Lucene62Codec {
public class PerFieldMappingPostingFormatCodec extends Lucene70Codec {
private final Logger logger;
private final MapperService mapperService;

View File

@ -363,7 +363,6 @@ public abstract class Engine implements Closeable {
void setTranslogLocation(Translog.Location translogLocation) {
if (freeze.get() == null) {
assert failure == null : "failure has to be null to set translog location";
this.translogLocation = translogLocation;
} else {
throw new IllegalStateException("result is already frozen");
@ -432,7 +431,7 @@ public abstract class Engine implements Closeable {
}
static class NoOpResult extends Result {
public static class NoOpResult extends Result {
NoOpResult(long seqNo) {
super(Operation.TYPE.NO_OP, 0, seqNo);
@ -1154,24 +1153,31 @@ public abstract class Engine implements Closeable {
return reason;
}
public NoOp(
final Term uid,
final long seqNo,
final long primaryTerm,
final long version,
final VersionType versionType,
final Origin origin,
final long startTime,
final String reason) {
super(uid, seqNo, primaryTerm, version, versionType, origin, startTime);
public NoOp(final long seqNo, final long primaryTerm, final Origin origin, final long startTime, final String reason) {
super(null, seqNo, primaryTerm, Versions.NOT_FOUND, null, origin, startTime);
this.reason = reason;
}
@Override
public Term uid() {
throw new UnsupportedOperationException();
}
@Override
public String type() {
throw new UnsupportedOperationException();
}
@Override
public long version() {
throw new UnsupportedOperationException();
}
@Override
public VersionType versionType() {
throw new UnsupportedOperationException();
}
@Override
String id() {
throw new UnsupportedOperationException();

View File

@ -128,6 +128,7 @@ public class InternalEngine extends Engine {
private final AtomicInteger throttleRequestCount = new AtomicInteger();
private final EngineConfig.OpenMode openMode;
private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false);
private static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp";
private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
private final CounterMetric numVersionLookups = new CounterMetric();
private final CounterMetric numIndexVersionsLookups = new CounterMetric();
@ -178,6 +179,7 @@ public class InternalEngine extends Engine {
}
logger.trace("recovered [{}]", seqNoStats);
seqNoService = sequenceNumberService(shardId, engineConfig.getIndexSettings(), seqNoStats);
updateMaxUnsafeAutoIdTimestampFromWriter(writer);
// norelease
/*
* We have no guarantees that all operations above the local checkpoint are in the Lucene commit or the translog. This means
@ -226,6 +228,17 @@ public class InternalEngine extends Engine {
logger.trace("created new InternalEngine");
}
private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) {
long commitMaxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
for (Map.Entry<String, String> entry : writer.getLiveCommitData()) {
if (entry.getKey().equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) {
commitMaxUnsafeAutoIdTimestamp = Long.parseLong(entry.getValue());
break;
}
}
maxUnsafeAutoIdTimestamp.set(Math.max(maxUnsafeAutoIdTimestamp.get(), commitMaxUnsafeAutoIdTimestamp));
}
private static SequenceNumbersService sequenceNumberService(
final ShardId shardId,
final IndexSettings indexSettings,
@ -500,7 +513,7 @@ public class InternalEngine extends Engine {
return true;
case LOCAL_TRANSLOG_RECOVERY:
assert index.isRetry();
return false; // even if retry is set we never optimize local recovery
return true; // allow to optimize in order to update the max safe time stamp
default:
throw new IllegalArgumentException("unknown origin " + index.origin());
}
@ -601,10 +614,16 @@ public class InternalEngine extends Engine {
indexResult = new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing,
plan.currentNotFoundOrDeleted);
}
if (indexResult.hasFailure() == false &&
index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
Translog.Location location =
translog.add(new Translog.Index(index, indexResult));
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location location;
if (indexResult.hasFailure() == false) {
location = translog.add(new Translog.Index(index, indexResult));
} else if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
// if we have document failure, record it as a no-op in the translog with the generated seq_no
location = translog.add(new Translog.NoOp(indexResult.getSeqNo(), index.primaryTerm(), indexResult.getFailure().getMessage()));
} else {
location = null;
}
indexResult.setTranslogLocation(location);
}
if (indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
@ -736,7 +755,7 @@ public class InternalEngine extends Engine {
* we return a `MATCH_ANY` version to indicate no document was index. The value is
* not used anyway
*/
return new IndexResult(ex, Versions.MATCH_ANY, index.seqNo());
return new IndexResult(ex, Versions.MATCH_ANY, plan.seqNoForIndexing);
} else {
throw ex;
}
@ -887,10 +906,16 @@ public class InternalEngine extends Engine {
deleteResult = new DeleteResult(plan.versionOfDeletion, plan.seqNoOfDeletion,
plan.currentlyDeleted == false);
}
if (!deleteResult.hasFailure() &&
delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
Translog.Location location =
translog.add(new Translog.Delete(delete, deleteResult));
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
final Translog.Location location;
if (deleteResult.hasFailure() == false) {
location = translog.add(new Translog.Delete(delete, deleteResult));
} else if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
location = translog.add(new Translog.NoOp(deleteResult.getSeqNo(),
delete.primaryTerm(), deleteResult.getFailure().getMessage()));
} else {
location = null;
}
deleteResult.setTranslogLocation(location);
}
if (deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
@ -1770,6 +1795,7 @@ public class InternalEngine extends Engine {
commitData.put(Engine.SYNC_COMMIT_ID, syncId);
}
commitData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(seqNoService().getMaxSeqNo()));
commitData.put(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, Long.toString(maxUnsafeAutoIdTimestamp.get()));
logger.trace("committing writer with commit data [{}]", commitData);
return commitData.entrySet().iterator();
});

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #binaryValue()}.
*/
public abstract class AbstractBinaryDocValues extends BinaryDocValues {
@Override
public int docID() {
throw new UnsupportedOperationException();
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -0,0 +1,50 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #longValue()}.
*/
public abstract class AbstractNumericDocValues extends NumericDocValues {
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -19,30 +19,32 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation of a {@link RandomAccessOrds} instance.
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #ordValue()}.
*/
public abstract class AbstractRandomAccessOrds extends RandomAccessOrds {
int i = 0;
protected abstract void doSetDocument(int docID);
public abstract class AbstractSortedDocValues extends SortedDocValues {
@Override
public final void setDocument(int docID) {
doSetDocument(docID);
i = 0;
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long nextOrd() {
if (i < cardinality()) {
return ordAt(i++);
} else {
return NO_MORE_ORDS;
}
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #docValueCount()} and {@link #nextValue()}.
*/
public abstract class AbstractSortedNumericDocValues extends SortedNumericDocValues {
@Override
public int docID() {
throw new UnsupportedOperationException();
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #getValueCount()} and {@link #nextOrd()} and {@link #lookupOrd(long)}.
*/
public abstract class AbstractSortedSetDocValues extends SortedSetDocValues {
@Override
public int docID() {
throw new UnsupportedOperationException();
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.fielddata;
import org.apache.lucene.search.DocIdSetIterator;
import java.io.IOException;
/**
* Base implementation that throws an {@link IOException} for the
* {@link DocIdSetIterator} APIs. This impl is safe to use for sorting and
* aggregations, which only use {@link #advanceExact(int)} and
* {@link #docValueCount()} and {@link #nextValue()}.
*/
public abstract class AbstractSortingNumericDocValues extends SortingNumericDocValues {
@Override
public int docID() {
throw new UnsupportedOperationException();
}
@Override
public int nextDoc() throws IOException {
throw new UnsupportedOperationException();
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
throw new UnsupportedOperationException();
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedSetDocValues;
/**
* Specialization of {@link AtomicFieldData} for data that is indexed with
@ -30,6 +30,6 @@ public interface AtomicOrdinalsFieldData extends AtomicFieldData {
/**
* Return the ordinals values for the current atomic reader.
*/
RandomAccessOrds getOrdinalsValues();
SortedSetDocValues getOrdinalsValues();
}

View File

@ -22,15 +22,13 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.elasticsearch.common.geo.GeoPoint;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -43,8 +41,8 @@ public enum FieldData {
/**
* Return a {@link SortedBinaryDocValues} that doesn't contain any value.
*/
public static SortedBinaryDocValues emptySortedBinary(int maxDoc) {
return singleton(DocValues.emptyBinary(), new Bits.MatchNoBits(maxDoc));
public static SortedBinaryDocValues emptySortedBinary() {
return singleton(DocValues.emptyBinary());
}
/**
@ -53,8 +51,13 @@ public enum FieldData {
public static NumericDoubleValues emptyNumericDouble() {
return new NumericDoubleValues() {
@Override
public double get(int docID) {
return 0;
public boolean advanceExact(int doc) throws IOException {
return false;
}
@Override
public double doubleValue() throws IOException {
throw new UnsupportedOperationException();
}
};
@ -63,16 +66,20 @@ public enum FieldData {
/**
* Return a {@link SortedNumericDoubleValues} that doesn't contain any value.
*/
public static SortedNumericDoubleValues emptySortedNumericDoubles(int maxDoc) {
return singleton(emptyNumericDouble(), new Bits.MatchNoBits(maxDoc));
public static SortedNumericDoubleValues emptySortedNumericDoubles() {
return singleton(emptyNumericDouble());
}
public static GeoPointValues emptyGeoPoint() {
final GeoPoint point = new GeoPoint();
return new GeoPointValues() {
@Override
public GeoPoint get(int docID) {
return point;
public boolean advanceExact(int doc) throws IOException {
return false;
}
@Override
public GeoPoint geoPointValue() {
throw new UnsupportedOperationException();
}
};
}
@ -80,68 +87,123 @@ public enum FieldData {
/**
* Return a {@link SortedNumericDoubleValues} that doesn't contain any value.
*/
public static MultiGeoPointValues emptyMultiGeoPoints(int maxDoc) {
return singleton(emptyGeoPoint(), new Bits.MatchNoBits(maxDoc));
public static MultiGeoPointValues emptyMultiGeoPoints() {
return singleton(emptyGeoPoint());
}
/**
* Returns a {@link Bits} representing all documents from <code>dv</code> that have a value.
*/
public static Bits docsWithValue(final SortedBinaryDocValues dv, final int maxDoc) {
return new Bits() {
@Override
public boolean get(int index) {
dv.setDocument(index);
return dv.count() != 0;
}
return new Bits() {
@Override
public boolean get(int index) {
try {
return dv.advanceExact(index);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return maxDoc;
}
};
@Override
public int length() {
return maxDoc;
}
};
}
/**
* Returns a Bits representing all documents from <code>dv</code> that have a value.
* Returns a {@link Bits} representing all documents from <code>dv</code>
* that have a value.
*/
public static Bits docsWithValue(final SortedSetDocValues dv, final int maxDoc) {
return new Bits() {
@Override
public boolean get(int index) {
try {
return dv.advanceExact(index);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return maxDoc;
}
};
}
/**
* Returns a Bits representing all documents from <code>dv</code> that have
* a value.
*/
public static Bits docsWithValue(final MultiGeoPointValues dv, final int maxDoc) {
return new Bits() {
@Override
public boolean get(int index) {
dv.setDocument(index);
return dv.count() != 0;
}
return new Bits() {
@Override
public boolean get(int index) {
try {
return dv.advanceExact(index);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return maxDoc;
}
};
@Override
public int length() {
return maxDoc;
}
};
}
/**
* Returns a Bits representing all documents from <code>dv</code> that have a value.
*/
public static Bits docsWithValue(final SortedNumericDoubleValues dv, final int maxDoc) {
return new Bits() {
@Override
public boolean get(int index) {
dv.setDocument(index);
return dv.count() != 0;
}
return new Bits() {
@Override
public boolean get(int index) {
try {
return dv.advanceExact(index);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return maxDoc;
}
};
@Override
public int length() {
return maxDoc;
}
};
}
/**
* Given a {@link SortedNumericDoubleValues}, return a {@link SortedNumericDocValues}
* instance that will translate double values to sortable long bits using
* {@link NumericUtils#doubleToSortableLong(double)}.
* Returns a Bits representing all documents from <code>dv</code> that have
* a value.
*/
public static Bits docsWithValue(final SortedNumericDocValues dv, final int maxDoc) {
return new Bits() {
@Override
public boolean get(int index) {
try {
return dv.advanceExact(index);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public int length() {
return maxDoc;
}
};
}
/**
* Given a {@link SortedNumericDoubleValues}, return a
* {@link SortedNumericDocValues} instance that will translate double values
* to sortable long bits using
* {@link org.apache.lucene.util.NumericUtils#doubleToSortableLong(double)}.
*/
public static SortedNumericDocValues toSortableLongBits(SortedNumericDoubleValues values) {
final NumericDoubleValues singleton = unwrapSingleton(values);
@ -152,8 +214,7 @@ public enum FieldData {
} else {
longBits = new SortableLongBitsNumericDocValues(singleton);
}
final Bits docsWithField = unwrapSingletonBits(values);
return DocValues.singleton(longBits, docsWithField);
return DocValues.singleton(longBits);
} else {
if (values instanceof SortableLongBitsToSortedNumericDoubleValues) {
return ((SortableLongBitsToSortedNumericDoubleValues) values).getLongValues();
@ -166,7 +227,7 @@ public enum FieldData {
/**
* Given a {@link SortedNumericDocValues}, return a {@link SortedNumericDoubleValues}
* instance that will translate long values to doubles using
* {@link NumericUtils#sortableLongToDouble(long)}.
* {@link org.apache.lucene.util.NumericUtils#sortableLongToDouble(long)}.
*/
public static SortedNumericDoubleValues sortableLongBitsToDoubles(SortedNumericDocValues values) {
final NumericDocValues singleton = DocValues.unwrapSingleton(values);
@ -177,8 +238,7 @@ public enum FieldData {
} else {
doubles = new SortableLongBitsToNumericDoubleValues(singleton);
}
final Bits docsWithField = DocValues.unwrapSingletonBits(values);
return singleton(doubles, docsWithField);
return singleton(doubles);
} else {
if (values instanceof SortableLongBitsSortedNumericDocValues) {
return ((SortableLongBitsSortedNumericDocValues) values).getDoubleValues();
@ -194,8 +254,7 @@ public enum FieldData {
public static SortedNumericDoubleValues castToDouble(final SortedNumericDocValues values) {
final NumericDocValues singleton = DocValues.unwrapSingleton(values);
if (singleton != null) {
final Bits docsWithField = DocValues.unwrapSingletonBits(values);
return singleton(new DoubleCastedValues(singleton), docsWithField);
return singleton(new DoubleCastedValues(singleton));
} else {
return new SortedDoubleCastedValues(values);
}
@ -207,8 +266,7 @@ public enum FieldData {
public static SortedNumericDocValues castToLong(final SortedNumericDoubleValues values) {
final NumericDoubleValues singleton = unwrapSingleton(values);
if (singleton != null) {
final Bits docsWithField = unwrapSingletonBits(values);
return DocValues.singleton(new LongCastedValues(singleton), docsWithField);
return DocValues.singleton(new LongCastedValues(singleton));
} else {
return new SortedLongCastedValues(values);
}
@ -217,15 +275,14 @@ public enum FieldData {
/**
* Returns a multi-valued view over the provided {@link NumericDoubleValues}.
*/
public static SortedNumericDoubleValues singleton(NumericDoubleValues values, Bits docsWithField) {
return new SingletonSortedNumericDoubleValues(values, docsWithField);
public static SortedNumericDoubleValues singleton(NumericDoubleValues values) {
return new SingletonSortedNumericDoubleValues(values);
}
/**
* Returns a single-valued view of the {@link SortedNumericDoubleValues},
* if it was previously wrapped with {@link DocValues#singleton(NumericDocValues, Bits)},
* if it was previously wrapped with {@link DocValues#singleton(NumericDocValues)},
* or null.
* @see DocValues#unwrapSingletonBits(SortedNumericDocValues)
*/
public static NumericDoubleValues unwrapSingleton(SortedNumericDoubleValues values) {
if (values instanceof SingletonSortedNumericDoubleValues) {
@ -234,31 +291,17 @@ public enum FieldData {
return null;
}
/**
* Returns the documents with a value for the {@link SortedNumericDoubleValues},
* if it was previously wrapped with {@link #singleton(NumericDoubleValues, Bits)},
* or null.
*/
public static Bits unwrapSingletonBits(SortedNumericDoubleValues dv) {
if (dv instanceof SingletonSortedNumericDoubleValues) {
return ((SingletonSortedNumericDoubleValues)dv).getDocsWithField();
} else {
return null;
}
}
/**
* Returns a multi-valued view over the provided {@link GeoPointValues}.
*/
public static MultiGeoPointValues singleton(GeoPointValues values, Bits docsWithField) {
return new SingletonMultiGeoPointValues(values, docsWithField);
public static MultiGeoPointValues singleton(GeoPointValues values) {
return new SingletonMultiGeoPointValues(values);
}
/**
* Returns a single-valued view of the {@link MultiGeoPointValues},
* if it was previously wrapped with {@link #singleton(GeoPointValues, Bits)},
* if it was previously wrapped with {@link #singleton(GeoPointValues)},
* or null.
* @see #unwrapSingletonBits(MultiGeoPointValues)
*/
public static GeoPointValues unwrapSingleton(MultiGeoPointValues values) {
if (values instanceof SingletonMultiGeoPointValues) {
@ -267,30 +310,17 @@ public enum FieldData {
return null;
}
/**
* Returns the documents with a value for the {@link MultiGeoPointValues},
* if it was previously wrapped with {@link #singleton(GeoPointValues, Bits)},
* or null.
*/
public static Bits unwrapSingletonBits(MultiGeoPointValues values) {
if (values instanceof SingletonMultiGeoPointValues) {
return ((SingletonMultiGeoPointValues) values).getDocsWithField();
}
return null;
}
/**
* Returns a multi-valued view over the provided {@link BinaryDocValues}.
*/
public static SortedBinaryDocValues singleton(BinaryDocValues values, Bits docsWithField) {
return new SingletonSortedBinaryDocValues(values, docsWithField);
public static SortedBinaryDocValues singleton(BinaryDocValues values) {
return new SingletonSortedBinaryDocValues(values);
}
/**
* Returns a single-valued view of the {@link SortedBinaryDocValues},
* if it was previously wrapped with {@link #singleton(BinaryDocValues, Bits)},
* if it was previously wrapped with {@link #singleton(BinaryDocValues)},
* or null.
* @see #unwrapSingletonBits(SortedBinaryDocValues)
*/
public static BinaryDocValues unwrapSingleton(SortedBinaryDocValues values) {
if (values instanceof SingletonSortedBinaryDocValues) {
@ -299,18 +329,6 @@ public enum FieldData {
return null;
}
/**
* Returns the documents with a value for the {@link SortedBinaryDocValues},
* if it was previously wrapped with {@link #singleton(BinaryDocValues, Bits)},
* or null.
*/
public static Bits unwrapSingletonBits(SortedBinaryDocValues values) {
if (values instanceof SingletonSortedBinaryDocValues) {
return ((SingletonSortedBinaryDocValues) values).getDocsWithField();
}
return null;
}
/**
* Returns whether the provided values *might* be multi-valued. There is no
* guarantee that this method will return <tt>false</tt> in the single-valued case.
@ -359,10 +377,13 @@ public enum FieldData {
public static SortedBinaryDocValues toString(final SortedNumericDocValues values) {
return toString(new ToStringValues() {
@Override
public void get(int docID, List<CharSequence> list) {
values.setDocument(docID);
for (int i = 0, count = values.count(); i < count; ++i) {
list.add(Long.toString(values.valueAt(i)));
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
@Override
public void get(List<CharSequence> list) throws IOException {
for (int i = 0, count = values.docValueCount(); i < count; ++i) {
list.add(Long.toString(values.nextValue()));
}
}
});
@ -376,10 +397,13 @@ public enum FieldData {
public static SortedBinaryDocValues toString(final SortedNumericDoubleValues values) {
return toString(new ToStringValues() {
@Override
public void get(int docID, List<CharSequence> list) {
values.setDocument(docID);
for (int i = 0, count = values.count(); i < count; ++i) {
list.add(Double.toString(values.valueAt(i)));
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
@Override
public void get(List<CharSequence> list) throws IOException {
for (int i = 0, count = values.docValueCount(); i < count; ++i) {
list.add(Double.toString(values.nextValue()));
}
}
});
@ -390,23 +414,37 @@ public enum FieldData {
* typically used for scripts or for the `map` execution mode of terms aggs.
* NOTE: this is slow!
*/
public static SortedBinaryDocValues toString(final RandomAccessOrds values) {
public static SortedBinaryDocValues toString(final SortedSetDocValues values) {
return new SortedBinaryDocValues() {
private int count = 0;
@Override
public BytesRef valueAt(int index) {
return values.lookupOrd(values.ordAt(index));
public boolean advanceExact(int doc) throws IOException {
if (values.advanceExact(doc) == false) {
return false;
}
for (int i = 0; ; ++i) {
if (values.nextOrd() == SortedSetDocValues.NO_MORE_ORDS) {
count = i;
break;
}
}
// reset the iterator on the current doc
boolean advanced = values.advanceExact(doc);
assert advanced;
return true;
}
@Override
public void setDocument(int docId) {
values.setDocument(docId);
public int docValueCount() {
return count;
}
@Override
public int count() {
return values.cardinality();
public BytesRef nextValue() throws IOException {
return values.lookupOrd(values.nextOrd());
}
};
}
@ -418,78 +456,30 @@ public enum FieldData {
public static SortedBinaryDocValues toString(final MultiGeoPointValues values) {
return toString(new ToStringValues() {
@Override
public void get(int docID, List<CharSequence> list) {
values.setDocument(docID);
for (int i = 0, count = values.count(); i < count; ++i) {
list.add(values.valueAt(i).toString());
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
@Override
public void get(List<CharSequence> list) throws IOException {
for (int i = 0, count = values.docValueCount(); i < count; ++i) {
list.add(values.nextValue().toString());
}
}
});
}
/**
* If <code>dv</code> is an instance of {@link RandomAccessOrds}, then return
* it, otherwise wrap it into a slow wrapper that implements random access.
*/
public static RandomAccessOrds maybeSlowRandomAccessOrds(final SortedSetDocValues dv) {
if (dv instanceof RandomAccessOrds) {
return (RandomAccessOrds) dv;
} else {
assert DocValues.unwrapSingleton(dv) == null : "this method expect singleton to return random-access ords";
return new RandomAccessOrds() {
int cardinality;
long[] ords = new long[0];
int ord;
@Override
public void setDocument(int docID) {
cardinality = 0;
dv.setDocument(docID);
for (long ord = dv.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = dv.nextOrd()) {
ords = ArrayUtil.grow(ords, cardinality + 1);
ords[cardinality++] = ord;
}
ord = 0;
}
@Override
public long nextOrd() {
return ords[ord++];
}
@Override
public BytesRef lookupOrd(long ord) {
return dv.lookupOrd(ord);
}
@Override
public long getValueCount() {
return dv.getValueCount();
}
@Override
public long ordAt(int index) {
return ords[index];
}
@Override
public int cardinality() {
return cardinality;
}
};
}
}
private static SortedBinaryDocValues toString(final ToStringValues toStringValues) {
return new SortingBinaryDocValues() {
final List<CharSequence> list = new ArrayList<>();
@Override
public void setDocument(int docID) {
public boolean advanceExact(int docID) throws IOException {
if (toStringValues.advanceExact(docID) == false) {
return false;
}
list.clear();
toStringValues.get(docID, list);
toStringValues.get(list);
count = list.size();
grow();
for (int i = 0; i < count; ++i) {
@ -497,6 +487,7 @@ public enum FieldData {
values[i].copyChars(s);
}
sort();
return true;
}
};
@ -504,7 +495,14 @@ public enum FieldData {
private interface ToStringValues {
void get(int docID, List<CharSequence> values);
/**
* Advance this instance to the given document id
* @return true if there is a value for this document
*/
boolean advanceExact(int doc) throws IOException;
/** Fill the list of charsquences with the list of values for the current document. */
void get(List<CharSequence> values) throws IOException;
}
@ -517,8 +515,13 @@ public enum FieldData {
}
@Override
public double get(int docID) {
return values.get(docID);
public double doubleValue() throws IOException {
return values.longValue();
}
@Override
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
}
@ -532,38 +535,49 @@ public enum FieldData {
}
@Override
public double valueAt(int index) {
return values.valueAt(index);
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public void setDocument(int doc) {
values.setDocument(doc);
public double nextValue() throws IOException {
return values.nextValue();
}
@Override
public int count() {
return values.count();
public int docValueCount() {
return values.docValueCount();
}
}
private static class LongCastedValues extends NumericDocValues {
private static class LongCastedValues extends AbstractNumericDocValues {
private final NumericDoubleValues values;
private int docID = -1;
LongCastedValues(NumericDoubleValues values) {
this.values = values;
}
@Override
public long get(int docID) {
return (long) values.get(docID);
public boolean advanceExact(int target) throws IOException {
docID = target;
return values.advanceExact(target);
}
@Override
public long longValue() throws IOException {
return (long) values.doubleValue();
}
@Override
public int docID() {
return docID;
}
}
private static class SortedLongCastedValues extends SortedNumericDocValues {
private static class SortedLongCastedValues extends AbstractSortedNumericDocValues {
private final SortedNumericDoubleValues values;
@ -572,18 +586,18 @@ public enum FieldData {
}
@Override
public long valueAt(int index) {
return (long) values.valueAt(index);
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public void setDocument(int doc) {
values.setDocument(doc);
public int docValueCount() {
return values.docValueCount();
}
@Override
public int count() {
return values.count();
public long nextValue() throws IOException {
return (long) values.nextValue();
}
}

View File

@ -21,17 +21,23 @@ package org.elasticsearch.index.fielddata;
import org.elasticsearch.common.geo.GeoPoint;
import java.io.IOException;
/**
* Per-document geo-point values.
*/
public abstract class GeoPointValues {
/**
* Get the {@link GeoPoint} associated with <code>docID</code>.
* The returned {@link GeoPoint} might be reused across calls.
* If the given <code>docID</code> does not have a value then the returned
* geo point mught have both latitude and longitude set to 0.
* Advance this instance to the given document id
* @return true if there is a value for this document
*/
public abstract GeoPoint get(int docID);
public abstract boolean advanceExact(int doc) throws IOException;
/**
* Get the {@link GeoPoint} associated with the current document.
* The returned {@link GeoPoint} might be reused across calls.
*/
public abstract GeoPoint geoPointValue();
}

View File

@ -20,6 +20,8 @@ package org.elasticsearch.index.fielddata;
import org.elasticsearch.common.geo.GeoPoint;
import java.io.IOException;
/**
* A stateful lightweight per document set of {@link GeoPoint} values.
* To iterate over values in a document use the following pattern:
@ -44,28 +46,24 @@ public abstract class MultiGeoPointValues {
}
/**
* Sets iteration to the specified docID.
* @param docId document ID
*
* @see #valueAt(int)
* @see #count()
* Advance this instance to the given document id
* @return true if there is a value for this document
*/
public abstract void setDocument(int docId);
public abstract boolean advanceExact(int doc) throws IOException;
/**
* Return the number of geo points the current document has.
*/
public abstract int count();
public abstract int docValueCount();
/**
* Return the <code>i-th</code> value associated with the current document.
* Behavior is undefined when <code>i</code> is undefined or greater than
* or equal to {@link #count()}.
* Return the next value associated with the current document. This must not be
* called more than {@link #docValueCount()} times.
*
* Note: the returned {@link GeoPoint} might be shared across invocations.
*
* @return the next value for the current docID set to {@link #setDocument(int)}.
* @return the next value for the current docID set to {@link #advanceExact(int)}.
*/
public abstract GeoPoint valueAt(int i);
public abstract GeoPoint nextValue() throws IOException;
}

View File

@ -20,43 +20,58 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.DoubleValues;
import java.io.IOException;
/**
* A per-document numeric value.
*/
public abstract class NumericDoubleValues {
public abstract class NumericDoubleValues extends DoubleValues {
/** Sole constructor. (For invocation by subclass
* constructors, typically implicit.) */
protected NumericDoubleValues() {}
/**
* Returns the numeric value for the specified document ID. This must return
* <tt>0d</tt> if the given doc ID has no value.
* @param docID document ID to lookup
* @return numeric value
*/
public abstract double get(int docID);
// TODO: this interaction with sort comparators is really ugly...
/** Returns numeric docvalues view of raw double bits */
public NumericDocValues getRawDoubleValues() {
return new NumericDocValues() {
@Override
public long get(int docID) {
return Double.doubleToRawLongBits(NumericDoubleValues.this.get(docID));
}
return new AbstractNumericDocValues() {
private int docID = -1;
@Override
public boolean advanceExact(int target) throws IOException {
docID = target;
return NumericDoubleValues.this.advanceExact(target);
}
@Override
public long longValue() throws IOException {
return Double.doubleToRawLongBits(NumericDoubleValues.this.doubleValue());
}
@Override
public int docID() {
return docID;
}
};
}
// yes... this is doing what the previous code was doing...
/** Returns numeric docvalues view of raw float bits */
public NumericDocValues getRawFloatValues() {
return new NumericDocValues() {
@Override
public long get(int docID) {
return Float.floatToRawIntBits((float)NumericDoubleValues.this.get(docID));
}
return new AbstractNumericDocValues() {
private int docID = -1;
@Override
public boolean advanceExact(int target) throws IOException {
docID = target;
return NumericDoubleValues.this.advanceExact(target);
}
@Override
public long longValue() throws IOException {
return Float.floatToRawIntBits((float)NumericDoubleValues.this.doubleValue());
}
@Override
public int docID() {
return docID;
}
};
}
}

View File

@ -21,7 +21,9 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
@ -32,7 +34,9 @@ import org.joda.time.DateTimeZone;
import org.joda.time.MutableDateTime;
import org.joda.time.ReadableDateTime;
import java.io.IOException;
import java.util.AbstractList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.function.UnaryOperator;
@ -46,7 +50,7 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
/**
* Set the current doc ID.
*/
public abstract void setNextDocId(int docId);
public abstract void setNextDocId(int docId) throws IOException;
/**
* Return a copy of the list of the values for the current document.
@ -83,24 +87,48 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
public static final class Strings extends ScriptDocValues<String> {
private final SortedBinaryDocValues values;
private final SortedBinaryDocValues in;
private BytesRefBuilder[] values = new BytesRefBuilder[0];
private int count;
public Strings(SortedBinaryDocValues values) {
this.values = values;
public Strings(SortedBinaryDocValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
values[i].copyBytes(in.nextValue());
}
} else {
resize(0);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
if (newSize > values.length) {
final int oldLength = values.length;
values = ArrayUtil.grow(values, count);
for (int i = oldLength; i < values.length; ++i) {
values[i] = new BytesRefBuilder();
}
}
}
public SortedBinaryDocValues getInternalValues() {
return this.values;
return this.in;
}
public BytesRef getBytesValue() {
if (values.count() > 0) {
return values.valueAt(0);
if (size() > 0) {
return values[0].get();
} else {
return null;
}
@ -117,12 +145,12 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
@Override
public String get(int index) {
return values.valueAt(index).utf8ToString();
return values[index].get().utf8ToString();
}
@Override
public int size() {
return values.count();
return count;
}
}
@ -130,61 +158,81 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
public static final class Longs extends ScriptDocValues<Long> {
protected static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(Longs.class));
private final SortedNumericDocValues values;
private final SortedNumericDocValues in;
private long[] values = new long[0];
private int count;
private Dates dates;
private int docId = -1;
public Longs(SortedNumericDocValues in) {
this.in = in;
public Longs(SortedNumericDocValues values) {
this.values = values;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
if (dates != null) {
dates.refreshArray();
public void setNextDocId(int docId) throws IOException {
this.docId = docId;
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = in.nextValue();
}
} else {
resize(0);
}
if (dates != null) {
dates.setNextDocId(docId);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
values = ArrayUtil.grow(values, count);
}
public SortedNumericDocValues getInternalValues() {
return this.values;
return this.in;
}
public long getValue() {
int numValues = values.count();
if (numValues == 0) {
if (count == 0) {
return 0L;
}
return values.valueAt(0);
return values[0];
}
@Deprecated
public ReadableDateTime getDate() {
public ReadableDateTime getDate() throws IOException {
deprecationLogger.deprecated("getDate on numeric fields is deprecated. Use a date field to get dates.");
if (dates == null) {
dates = new Dates(values);
dates.refreshArray();
dates = new Dates(in);
dates.setNextDocId(docId);
}
return dates.getValue();
}
@Deprecated
public List<ReadableDateTime> getDates() {
public List<ReadableDateTime> getDates() throws IOException {
deprecationLogger.deprecated("getDates on numeric fields is deprecated. Use a date field to get dates.");
if (dates == null) {
dates = new Dates(values);
dates.refreshArray();
dates = new Dates(in);
dates.setNextDocId(docId);
}
return dates;
}
@Override
public Long get(int index) {
return values.valueAt(index);
return values[index];
}
@Override
public int size() {
return values.count();
return count;
}
}
@ -193,22 +241,24 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
private static final ReadableDateTime EPOCH = new DateTime(0, DateTimeZone.UTC);
private final SortedNumericDocValues values;
private final SortedNumericDocValues in;
/**
* Values wrapped in {@link MutableDateTime}. Null by default an allocated on first usage so we allocate a reasonably size. We keep
* this array so we don't have allocate new {@link MutableDateTime}s on every usage. Instead we reuse them for every document.
*/
private MutableDateTime[] dates;
private int count;
public Dates(SortedNumericDocValues values) {
this.values = values;
public Dates(SortedNumericDocValues in) {
this.in = in;
}
/**
* Fetch the first field value or 0 millis after epoch if there are no values.
* Fetch the first field value or 0 millis after epoch if there are no
* in.
*/
public ReadableDateTime getValue() {
if (values.count() == 0) {
if (count == 0) {
return EPOCH;
}
return get(0);
@ -234,113 +284,159 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
@Override
public ReadableDateTime get(int index) {
if (index >= values.count()) {
if (index >= count) {
throw new IndexOutOfBoundsException(
"attempted to fetch the [" + index + "] date when there are only [" + values.count() + "] dates.");
"attempted to fetch the [" + index + "] date when there are only ["
+ count + "] dates.");
}
return dates[index];
}
@Override
public int size() {
return values.count();
return count;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
count = in.docValueCount();
} else {
count = 0;
}
refreshArray();
}
/**
* Refresh the backing array. Package private so it can be called when {@link Longs} loads dates.
*/
void refreshArray() {
if (values.count() == 0) {
void refreshArray() throws IOException {
if (count == 0) {
return;
}
if (dates == null) {
// Happens for the document. We delay allocating dates so we can allocate it with a reasonable size.
dates = new MutableDateTime[values.count()];
dates = new MutableDateTime[count];
for (int i = 0; i < dates.length; i++) {
dates[i] = new MutableDateTime(values.valueAt(i), DateTimeZone.UTC);
dates[i] = new MutableDateTime(in.nextValue(), DateTimeZone.UTC);
}
return;
}
if (values.count() > dates.length) {
if (count > dates.length) {
// Happens when we move to a new document and it has more dates than any documents before it.
MutableDateTime[] backup = dates;
dates = new MutableDateTime[values.count()];
dates = new MutableDateTime[count];
System.arraycopy(backup, 0, dates, 0, backup.length);
for (int i = 0; i < backup.length; i++) {
dates[i].setMillis(values.valueAt(i));
dates[i].setMillis(in.nextValue());
}
for (int i = backup.length; i < dates.length; i++) {
dates[i] = new MutableDateTime(values.valueAt(i), DateTimeZone.UTC);
dates[i] = new MutableDateTime(in.nextValue(), DateTimeZone.UTC);
}
return;
}
for (int i = 0; i < values.count(); i++) {
dates[i].setMillis(values.valueAt(i));
for (int i = 0; i < count; i++) {
dates[i].setMillis(in.nextValue());
}
}
}
public static final class Doubles extends ScriptDocValues<Double> {
private final SortedNumericDoubleValues values;
private final SortedNumericDoubleValues in;
private double[] values = new double[0];
private int count;
public Doubles(SortedNumericDoubleValues values) {
this.values = values;
public Doubles(SortedNumericDoubleValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = in.nextValue();
}
} else {
resize(0);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
values = ArrayUtil.grow(values, count);
}
public SortedNumericDoubleValues getInternalValues() {
return this.values;
return this.in;
}
public double getValue() {
int numValues = values.count();
if (numValues == 0) {
if (count == 0) {
return 0d;
}
return values.valueAt(0);
return values[0];
}
@Override
public Double get(int index) {
return values.valueAt(index);
return values[index];
}
@Override
public int size() {
return values.count();
return count;
}
}
public static final class GeoPoints extends ScriptDocValues<GeoPoint> {
private final MultiGeoPointValues values;
private final MultiGeoPointValues in;
private GeoPoint[] values = new GeoPoint[0];
private int count;
public GeoPoints(MultiGeoPointValues values) {
this.values = values;
public GeoPoints(MultiGeoPointValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
GeoPoint point = in.nextValue();
values[i].reset(point.lat(), point.lon());
}
} else {
resize(0);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
if (newSize > values.length) {
int oldLength = values.length;
values = ArrayUtil.grow(values, count);
for (int i = oldLength; i < values.length; ++i) {
values[i] = new GeoPoint();
}
}
}
public GeoPoint getValue() {
int numValues = values.count();
if (numValues == 0) {
if (count == 0) {
return null;
}
return values.valueAt(0);
return values[0];
}
public double getLat() {
@ -371,13 +467,13 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
@Override
public GeoPoint get(int index) {
final GeoPoint point = values.valueAt(index);
final GeoPoint point = values[index];
return new GeoPoint(point.lat(), point.lon());
}
@Override
public int size() {
return values.count();
return count;
}
public double arcDistance(double lat, double lon) {
@ -420,66 +516,114 @@ public abstract class ScriptDocValues<T> extends AbstractList<T> {
public static final class Booleans extends ScriptDocValues<Boolean> {
private final SortedNumericDocValues values;
private final SortedNumericDocValues in;
private boolean[] values = new boolean[0];
private int count;
public Booleans(SortedNumericDocValues values) {
this.values = values;
public Booleans(SortedNumericDocValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = in.nextValue() == 1;
}
} else {
resize(0);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
values = grow(values, count);
}
public boolean getValue() {
return values.count() != 0 && values.valueAt(0) == 1;
return count != 0 && values[0];
}
@Override
public Boolean get(int index) {
return values.valueAt(index) == 1;
return values[index];
}
@Override
public int size() {
return values.count();
return count;
}
private static boolean[] grow(boolean[] array, int minSize) {
assert minSize >= 0 : "size must be positive (got " + minSize
+ "): likely integer overflow?";
if (array.length < minSize) {
return Arrays.copyOf(array, ArrayUtil.oversize(minSize, 1));
} else
return array;
}
}
public static final class BytesRefs extends ScriptDocValues<BytesRef> {
private final SortedBinaryDocValues values;
private final SortedBinaryDocValues in;
private BytesRef[] values;
private int count;
public BytesRefs(SortedBinaryDocValues values) {
this.values = values;
public BytesRefs(SortedBinaryDocValues in) {
this.in = in;
}
@Override
public void setNextDocId(int docId) {
values.setDocument(docId);
public void setNextDocId(int docId) throws IOException {
if (in.advanceExact(docId)) {
resize(in.docValueCount());
for (int i = 0; i < count; i++) {
values[i] = in.nextValue();
}
} else {
resize(0);
}
}
/**
* Set the {@link #size()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected void resize(int newSize) {
count = newSize;
if (values == null) {
values = new BytesRef[newSize];
} else {
values = ArrayUtil.grow(values, count);
}
}
public SortedBinaryDocValues getInternalValues() {
return this.values;
return this.in;
}
public BytesRef getValue() {
int numValues = values.count();
if (numValues == 0) {
if (count == 0) {
return new BytesRef();
}
return values.valueAt(0);
return values[0];
}
@Override
public BytesRef get(int index) {
return values.valueAt(index);
return values[index];
}
@Override
public int size() {
return values.count();
return count;
}
}
}

View File

@ -19,48 +19,34 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.geo.GeoPoint;
import java.io.IOException;
final class SingletonMultiGeoPointValues extends MultiGeoPointValues {
private final GeoPointValues in;
private final Bits docsWithField;
private GeoPoint value;
private int count;
SingletonMultiGeoPointValues(GeoPointValues in, Bits docsWithField) {
SingletonMultiGeoPointValues(GeoPointValues in) {
this.in = in;
this.docsWithField = docsWithField;
}
@Override
public void setDocument(int docID) {
value = in.get(docID);
if (value.lat() == Double.NaN && value.lon() == Double.NaN || (docsWithField != null && !docsWithField.get(docID))) {
count = 0;
} else {
count = 1;
}
public boolean advanceExact(int doc) throws IOException {
return in.advanceExact(doc);
}
@Override
public int count() {
return count;
public int docValueCount() {
return 1;
}
@Override
public GeoPoint valueAt(int index) {
assert index == 0;
return value;
public GeoPoint nextValue() {
return in.geoPointValue();
}
public GeoPointValues getGeoPointValues() {
GeoPointValues getGeoPointValues() {
return in;
}
public Bits getDocsWithField() {
return docsWithField;
}
}

View File

@ -20,49 +20,35 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.Bits.MatchAllBits;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
final class SingletonSortedBinaryDocValues extends SortedBinaryDocValues {
private final BinaryDocValues in;
private final Bits docsWithField;
private BytesRef value;
private int count;
SingletonSortedBinaryDocValues(BinaryDocValues in, Bits docsWithField) {
SingletonSortedBinaryDocValues(BinaryDocValues in) {
this.in = in;
this.docsWithField = docsWithField instanceof MatchAllBits ? null : docsWithField;
}
@Override
public void setDocument(int docID) {
value = in.get(docID);
if (value.length == 0 && docsWithField != null && !docsWithField.get(docID)) {
count = 0;
} else {
count = 1;
}
public boolean advanceExact(int doc) throws IOException {
return in.advanceExact(doc);
}
@Override
public int count() {
return count;
public int docValueCount() {
return 1;
}
@Override
public BytesRef valueAt(int index) {
assert index == 0;
return value;
public BytesRef nextValue() throws IOException {
return in.binaryValue();
}
public BinaryDocValues getBinaryDocValues() {
return in;
}
public Bits getDocsWithField() {
return docsWithField;
}
}

View File

@ -19,8 +19,7 @@
package org.elasticsearch.index.fielddata;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.Bits.MatchAllBits;
import java.io.IOException;
/**
* Exposes multi-valued view over a single-valued instance.
@ -29,43 +28,30 @@ import org.apache.lucene.util.Bits.MatchAllBits;
* that works for single or multi-valued types.
*/
final class SingletonSortedNumericDoubleValues extends SortedNumericDoubleValues {
private final NumericDoubleValues in;
private final Bits docsWithField;
private double value;
private int count;
private final NumericDoubleValues in;
SingletonSortedNumericDoubleValues(NumericDoubleValues in, Bits docsWithField) {
this.in = in;
this.docsWithField = docsWithField instanceof MatchAllBits ? null : docsWithField;
}
/** Return the wrapped {@link NumericDoubleValues} */
public NumericDoubleValues getNumericDoubleValues() {
return in;
}
/** Return the wrapped {@link Bits} */
public Bits getDocsWithField() {
return docsWithField;
}
@Override
public void setDocument(int doc) {
value = in.get(doc);
if (docsWithField != null && value == 0 && docsWithField.get(doc) == false) {
count = 0;
} else {
count = 1;
SingletonSortedNumericDoubleValues(NumericDoubleValues in) {
this.in = in;
}
}
@Override
public double valueAt(int index) {
return value;
}
/** Return the wrapped {@link NumericDoubleValues} */
public NumericDoubleValues getNumericDoubleValues() {
return in;
}
@Override
public boolean advanceExact(int target) throws IOException {
return in.advanceExact(target);
}
@Override
public int docValueCount() {
return 1;
}
@Override
public double nextValue() throws IOException {
return in.doubleValue();
}
@Override
public int count() {
return count;
}
}

View File

@ -22,13 +22,16 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
/**
* {@link NumericDocValues} instance that wraps a {@link NumericDoubleValues}
* and converts the doubles to sortable long bits using
* {@link NumericUtils#doubleToSortableLong(double)}.
*/
final class SortableLongBitsNumericDocValues extends NumericDocValues {
final class SortableLongBitsNumericDocValues extends AbstractNumericDocValues {
private int docID = -1;
private final NumericDoubleValues values;
SortableLongBitsNumericDocValues(NumericDoubleValues values) {
@ -36,8 +39,19 @@ final class SortableLongBitsNumericDocValues extends NumericDocValues {
}
@Override
public long get(int docID) {
return NumericUtils.doubleToSortableLong(values.get(docID));
public long longValue() throws IOException {
return NumericUtils.doubleToSortableLong(values.doubleValue());
}
@Override
public boolean advanceExact(int target) throws IOException {
docID = target;
return values.advanceExact(target);
}
@Override
public int docID() {
return docID;
}
/** Return the wrapped values. */

View File

@ -22,12 +22,14 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
/**
* {@link SortedNumericDocValues} instance that wraps a {@link SortedNumericDoubleValues}
* and converts the doubles to sortable long bits using
* {@link NumericUtils#doubleToSortableLong(double)}.
*/
final class SortableLongBitsSortedNumericDocValues extends SortedNumericDocValues {
final class SortableLongBitsSortedNumericDocValues extends AbstractSortedNumericDocValues {
private final SortedNumericDoubleValues values;
@ -36,18 +38,18 @@ final class SortableLongBitsSortedNumericDocValues extends SortedNumericDocValue
}
@Override
public void setDocument(int doc) {
values.setDocument(doc);
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public long valueAt(int index) {
return NumericUtils.doubleToSortableLong(values.valueAt(index));
public long nextValue() throws IOException {
return NumericUtils.doubleToSortableLong(values.nextValue());
}
@Override
public int count() {
return values.count();
public int docValueCount() {
return values.docValueCount();
}
/** Return the wrapped values. */

View File

@ -22,6 +22,8 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
/**
* {@link NumericDoubleValues} instance that wraps a {@link NumericDocValues}
* and converts the doubles to sortable long bits using
@ -36,8 +38,13 @@ final class SortableLongBitsToNumericDoubleValues extends NumericDoubleValues {
}
@Override
public double get(int docID) {
return NumericUtils.sortableLongToDouble(values.get(docID));
public double doubleValue() throws IOException {
return NumericUtils.sortableLongToDouble(values.longValue());
}
@Override
public boolean advanceExact(int doc) throws IOException {
return values.advanceExact(doc);
}
/** Return the wrapped values. */

View File

@ -22,6 +22,8 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.NumericUtils;
import java.io.IOException;
/**
* {@link SortedNumericDoubleValues} instance that wraps a {@link SortedNumericDocValues}
* and converts the doubles to sortable long bits using
@ -36,18 +38,18 @@ final class SortableLongBitsToSortedNumericDoubleValues extends SortedNumericDou
}
@Override
public void setDocument(int doc) {
values.setDocument(doc);
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public double valueAt(int index) {
return NumericUtils.sortableLongToDouble(values.valueAt(index));
public double nextValue() throws IOException {
return NumericUtils.sortableLongToDouble(values.nextValue());
}
@Override
public int count() {
return values.count();
public int docValueCount() {
return values.docValueCount();
}
/** Return the wrapped values. */

View File

@ -21,28 +21,35 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.util.BytesRef;
import java.io.IOException;
/**
* A list of per-document binary values, sorted
* according to {@link BytesRef#compareTo(BytesRef)}.
* There might be dups however.
*/
// TODO: Should it expose a count (current approach) or return null when there are no more values?
public abstract class SortedBinaryDocValues {
/**
* Positions to the specified document
* Advance this instance to the given document id
* @return true if there is a value for this document
*/
public abstract void setDocument(int docId);
public abstract boolean advanceExact(int doc) throws IOException;
/**
* Return the number of values of the current document.
/**
* Retrieves the number of values for the current document. This must always
* be greater than zero.
* It is illegal to call this method after {@link #advanceExact(int)}
* returned {@code false}.
*/
public abstract int count();
public abstract int docValueCount();
/**
* Retrieve the value for the current document at the specified index.
* An index ranges from {@code 0} to {@code count()-1}.
/**
* Iterates to the next value in the current document. Do not call this more than
* {@link #docValueCount} times for the document.
* Note that the returned {@link BytesRef} might be reused across invocations.
*/
public abstract BytesRef valueAt(int index);
public abstract BytesRef nextValue() throws IOException;
}

View File

@ -21,6 +21,8 @@ package org.elasticsearch.index.fielddata;
import org.apache.lucene.index.SortedNumericDocValues;
import java.io.IOException;
/**
* Clone of {@link SortedNumericDocValues} for double values.
*/
@ -30,21 +32,25 @@ public abstract class SortedNumericDoubleValues {
* constructors, typically implicit.) */
protected SortedNumericDoubleValues() {}
/**
* Positions to the specified document
*/
public abstract void setDocument(int doc);
/** Advance the iterator to exactly {@code target} and return whether
* {@code target} has a value.
* {@code target} must be greater than or equal to the current
* doc ID and must be a valid doc ID, ie. &ge; 0 and
* &lt; {@code maxDoc}.*/
public abstract boolean advanceExact(int target) throws IOException;
/**
* Retrieve the value for the current document at the specified index.
* An index ranges from {@code 0} to {@code count()-1}.
/**
* Iterates to the next value in the current document. Do not call this more than
* {@link #docValueCount} times for the document.
*/
public abstract double valueAt(int index);
/**
* Retrieves the count of values for the current document.
* This may be zero if a document has no values.
public abstract double nextValue() throws IOException;
/**
* Retrieves the number of values for the current document. This must always
* be greater than zero.
* It is illegal to call this method after {@link #advanceExact(int)}
* returned {@code false}.
*/
public abstract int count();
public abstract int docValueCount();
}

View File

@ -33,6 +33,7 @@ import java.util.Arrays;
*/
public abstract class SortingBinaryDocValues extends SortedBinaryDocValues {
private int index;
protected int count;
protected BytesRefBuilder[] values;
private final Sorter sorter;
@ -73,15 +74,17 @@ public abstract class SortingBinaryDocValues extends SortedBinaryDocValues {
*/
protected final void sort() {
sorter.sort(0, count);
index = 0;
}
@Override
public final int count() {
public int docValueCount() {
return count;
}
@Override
public final BytesRef valueAt(int index) {
return values[index].get();
public final BytesRef nextValue() {
assert index < count;
return values[index++].get();
}
}

View File

@ -31,10 +31,12 @@ public abstract class SortingNumericDocValues extends SortedNumericDocValues {
private int count;
protected long[] values;
protected int valuesCursor;
private final Sorter sorter;
protected SortingNumericDocValues() {
values = new long[1];
valuesCursor = 0;
sorter = new InPlaceMergeSorter() {
@Override
@ -52,12 +54,13 @@ public abstract class SortingNumericDocValues extends SortedNumericDocValues {
}
/**
* Set the {@link #count()} and ensure that the {@link #values} array can
* Set the {@link #docValueCount()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected final void resize(int newSize) {
count = newSize;
values = ArrayUtil.grow(values, count);
valuesCursor = 0;
}
/**
@ -69,12 +72,12 @@ public abstract class SortingNumericDocValues extends SortedNumericDocValues {
}
@Override
public final int count() {
public final int docValueCount() {
return count;
}
@Override
public final long valueAt(int index) {
return values[index];
public final long nextValue() {
return values[valuesCursor++];
}
}

View File

@ -29,11 +29,13 @@ import org.apache.lucene.util.Sorter;
public abstract class SortingNumericDoubleValues extends SortedNumericDoubleValues {
private int count;
private int valuesCursor;
protected double[] values;
private final Sorter sorter;
protected SortingNumericDoubleValues() {
values = new double[1];
valuesCursor = 0;
sorter = new InPlaceMergeSorter() {
@Override
@ -51,29 +53,30 @@ public abstract class SortingNumericDoubleValues extends SortedNumericDoubleValu
}
/**
* Set the {@link #count()} and ensure that the {@link #values} array can
* Set the {@link #docValueCount()} and ensure that the {@link #values} array can
* store at least that many entries.
*/
protected final void resize(int newSize) {
count = newSize;
values = ArrayUtil.grow(values, count);
valuesCursor = 0;
}
/**
* Sort values that are stored between offsets <code>0</code> and
* {@link #count} of {@link #values}.
* {@link #docValueCount} of {@link #values}.
*/
protected final void sort() {
sorter.sort(0, count);
}
@Override
public final int count() {
public final int docValueCount() {
return count;
}
@Override
public final double valueAt(int index) {
return values[index];
public final double nextValue() {
return values[valuesCursor++];
}
}

View File

@ -21,15 +21,15 @@ package org.elasticsearch.index.fielddata.fieldcomparator;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.index.fielddata.AbstractSortedDocValues;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
@ -89,7 +89,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
@Override
protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException {
final RandomAccessOrds values = ((IndexOrdinalsFieldData) indexFieldData).load(context).getOrdinalsValues();
final SortedSetDocValues values = ((IndexOrdinalsFieldData) indexFieldData).load(context).getOrdinalsValues();
final SortedDocValues selectedValues;
if (nested == null) {
selectedValues = sortMode.select(values);
@ -113,8 +113,6 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
};
}
final BytesRef nullPlaceHolder = new BytesRef();
final BytesRef nonNullMissingBytes = missingBytes == null ? nullPlaceHolder : missingBytes;
return new FieldComparator.TermValComparator(numHits, null, sortMissingLast) {
@Override
@ -122,25 +120,15 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
final SortedBinaryDocValues values = getValues(context);
final BinaryDocValues selectedValues;
if (nested == null) {
selectedValues = sortMode.select(values, nonNullMissingBytes);
selectedValues = sortMode.select(values, missingBytes);
} else {
final BitSet rootDocs = nested.rootDocs(context);
final DocIdSetIterator innerDocs = nested.innerDocs(context);
selectedValues = sortMode.select(values, nonNullMissingBytes, rootDocs, innerDocs, context.reader().maxDoc());
selectedValues = sortMode.select(values, missingBytes, rootDocs, innerDocs, context.reader().maxDoc());
}
return selectedValues;
}
@Override
protected Bits getDocsWithField(LeafReaderContext context, String field) throws IOException {
return new Bits.MatchAllBits(context.reader().maxDoc());
}
@Override
protected boolean isNull(int doc, BytesRef term) {
return term == nullPlaceHolder;
}
@Override
public void setScorer(Scorer scorer) {
BytesRefFieldComparatorSource.this.setScorer(scorer);
@ -154,13 +142,14 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
* are replaced with the specified term
*/
// TODO: move this out if we need it for other reasons
static class ReplaceMissing extends SortedDocValues {
static class ReplaceMissing extends AbstractSortedDocValues {
final SortedDocValues in;
final int substituteOrd;
final BytesRef substituteTerm;
final boolean exists;
boolean hasValue;
ReplaceMissing(SortedDocValues in, BytesRef term) {
ReplaceMissing(SortedDocValues in, BytesRef term) throws IOException {
this.in = in;
this.substituteTerm = term;
int sub = in.lookupTerm(term);
@ -174,17 +163,29 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
}
@Override
public int getOrd(int docID) {
int ord = in.getOrd(docID);
if (ord < 0) {
public int ordValue() throws IOException {
if (hasValue == false) {
return substituteOrd;
} else if (exists == false && ord >= substituteOrd) {
}
int ord = in.ordValue();
if (exists == false && ord >= substituteOrd) {
return ord + 1;
} else {
return ord;
}
}
@Override
public boolean advanceExact(int target) throws IOException {
hasValue = in.advanceExact(target);
return true;
}
@Override
public int docID() {
return in.docID();
}
@Override
public int getValueCount() {
if (exists) {
@ -195,7 +196,7 @@ public class BytesRefFieldComparatorSource extends IndexFieldData.XFieldComparat
}
@Override
public BytesRef lookupOrd(int ord) {
public BytesRef lookupOrd(int ord) throws IOException {
if (ord == substituteOrd) {
return substituteTerm;
} else if (exists == false && ord > substituteOrd) {

View File

@ -20,22 +20,23 @@
package org.elasticsearch.index.fielddata.ordinals;
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LongValues;
import org.elasticsearch.index.fielddata.AbstractRandomAccessOrds;
import java.io.IOException;
/**
* A {@link RandomAccessOrds} implementation that returns ordinals that are global.
* A {@link SortedSetDocValues} implementation that returns ordinals that are global.
*/
public class GlobalOrdinalMapping extends AbstractRandomAccessOrds {
public class GlobalOrdinalMapping extends SortedSetDocValues {
private final RandomAccessOrds values;
private final SortedSetDocValues values;
private final OrdinalMap ordinalMap;
private final LongValues mapping;
private final RandomAccessOrds[] bytesValues;
private final SortedSetDocValues[] bytesValues;
GlobalOrdinalMapping(OrdinalMap ordinalMap, RandomAccessOrds[] bytesValues, int segmentIndex) {
GlobalOrdinalMapping(OrdinalMap ordinalMap, SortedSetDocValues[] bytesValues, int segmentIndex) {
super();
this.values = bytesValues[segmentIndex];
this.bytesValues = bytesValues;
@ -53,25 +54,45 @@ public class GlobalOrdinalMapping extends AbstractRandomAccessOrds {
}
@Override
public long ordAt(int index) {
return getGlobalOrd(values.ordAt(index));
public boolean advanceExact(int target) throws IOException {
return values.advanceExact(target);
}
@Override
public void doSetDocument(int docId) {
values.setDocument(docId);
public long nextOrd() throws IOException {
long segmentOrd = values.nextOrd();
if (segmentOrd == SortedSetDocValues.NO_MORE_ORDS) {
return SortedSetDocValues.NO_MORE_ORDS;
} else {
return getGlobalOrd(segmentOrd);
}
}
@Override
public int cardinality() {
return values.cardinality();
}
@Override
public BytesRef lookupOrd(long globalOrd) {
public BytesRef lookupOrd(long globalOrd) throws IOException {
final long segmentOrd = ordinalMap.getFirstSegmentOrd(globalOrd);
int readerIndex = ordinalMap.getFirstSegmentNumber(globalOrd);
return bytesValues[readerIndex].lookupOrd(segmentOrd);
}
@Override
public int docID() {
return values.docID();
}
@Override
public int nextDoc() throws IOException {
return values.nextDoc();
}
@Override
public int advance(int target) throws IOException {
return values.advance(target);
}
@Override
public long cost() {
return values.cost();
}
}

View File

@ -23,7 +23,7 @@ import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.packed.PackedInts;
import org.elasticsearch.common.breaker.CircuitBreaker;
@ -52,12 +52,12 @@ public enum GlobalOrdinalsBuilder {
*/
public static IndexOrdinalsFieldData build(final IndexReader indexReader, IndexOrdinalsFieldData indexFieldData,
IndexSettings indexSettings, CircuitBreakerService breakerService, Logger logger,
Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) throws IOException {
Function<SortedSetDocValues, ScriptDocValues<?>> scriptFunction) throws IOException {
assert indexReader.leaves().size() > 1;
long startTimeNS = System.nanoTime();
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
final SortedSetDocValues[] subs = new SortedSetDocValues[indexReader.leaves().size()];
for (int i = 0; i < indexReader.leaves().size(); ++i) {
atomicFD[i] = indexFieldData.load(indexReader.leaves().get(i));
subs[i] = atomicFD[i].getOrdinalsValues();
@ -83,11 +83,11 @@ public enum GlobalOrdinalsBuilder {
assert indexReader.leaves().size() > 1;
final AtomicOrdinalsFieldData[] atomicFD = new AtomicOrdinalsFieldData[indexReader.leaves().size()];
final RandomAccessOrds[] subs = new RandomAccessOrds[indexReader.leaves().size()];
final SortedSetDocValues[] subs = new SortedSetDocValues[indexReader.leaves().size()];
for (int i = 0; i < indexReader.leaves().size(); ++i) {
atomicFD[i] = new AbstractAtomicOrdinalsFieldData(AbstractAtomicOrdinalsFieldData.DEFAULT_SCRIPT_FUNCTION) {
@Override
public RandomAccessOrds getOrdinalsValues() {
public SortedSetDocValues getOrdinalsValues() {
return DocValues.emptySortedSet();
}

View File

@ -20,7 +20,7 @@ package org.elasticsearch.index.fielddata.ordinals;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
import org.apache.lucene.index.RandomAccessOrds;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicOrdinalsFieldData;
@ -36,10 +36,10 @@ import java.util.function.Function;
final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFieldData {
private final Atomic[] atomicReaders;
private final Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction;
private final Function<SortedSetDocValues, ScriptDocValues<?>> scriptFunction;
InternalGlobalOrdinalsIndexFieldData(IndexSettings indexSettings, String fieldName, AtomicOrdinalsFieldData[] segmentAfd,
OrdinalMap ordinalMap, long memorySizeInBytes, Function<RandomAccessOrds, ScriptDocValues<?>> scriptFunction) {
OrdinalMap ordinalMap, long memorySizeInBytes, Function<SortedSetDocValues, ScriptDocValues<?>> scriptFunction) {
super(indexSettings, fieldName, memorySizeInBytes);
this.atomicReaders = new Atomic[segmentAfd.length];
for (int i = 0; i < segmentAfd.length; i++) {
@ -67,13 +67,13 @@ final class InternalGlobalOrdinalsIndexFieldData extends GlobalOrdinalsIndexFiel
}
@Override
public RandomAccessOrds getOrdinalsValues() {
final RandomAccessOrds values = afd.getOrdinalsValues();
public SortedSetDocValues getOrdinalsValues() {
final SortedSetDocValues values = afd.getOrdinalsValues();
if (values.getValueCount() == ordinalMap.getValueCount()) {
// segment ordinals match global ordinals
return values;
}
final RandomAccessOrds[] bytesValues = new RandomAccessOrds[atomicReaders.length];
final SortedSetDocValues[] bytesValues = new SortedSetDocValues[atomicReaders.length];
for (int i = 0; i < bytesValues.length; i++) {
bytesValues[i] = atomicReaders[i].afd.getOrdinalsValues();
}

Some files were not shown because too many files have changed in this diff Show More