lucene4: switched setNextReader from IndexReader to AtomicReaderContext

This commit is contained in:
Igor Motov 2012-10-30 22:37:43 -04:00 committed by Shay Banon
parent 25d03a6a7d
commit 93906903b6
7 changed files with 37 additions and 33 deletions

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.lucene.search;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Scorer;
@ -57,9 +57,9 @@ public class FilteredCollector extends Collector {
}
@Override
public void setNextReader(IndexReader reader, int docBase) throws IOException {
collector.setNextReader(reader, docBase);
docSet = DocSets.convert(reader, filter.getDocIdSet(reader));
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
docSet = DocSets.convert(context.reader(), filter.getDocIdSet(context));
}
@Override

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Explanation;
/**
@ -39,7 +39,7 @@ public class BoostScoreFunction implements ScoreFunction {
}
@Override
public void setNextReader(IndexReader reader) {
public void setNextReader(AtomicReaderContext context) {
// nothing to do here...
}

View File

@ -19,9 +19,11 @@
package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
import org.elasticsearch.common.lucene.docset.DocSet;
import org.elasticsearch.common.lucene.docset.DocSets;
@ -106,15 +108,15 @@ public class FiltersFunctionScoreQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new CustomBoostFactorWeight(searcher);
}
class CustomBoostFactorWeight extends Weight {
Searcher searcher;
IndexSearcher searcher;
Weight subQueryWeight;
public CustomBoostFactorWeight(Searcher searcher) throws IOException {
public CustomBoostFactorWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.subQueryWeight = subQuery.weight(searcher);
}
@ -141,31 +143,31 @@ public class FiltersFunctionScoreQuery extends Query {
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(reader, scoreDocsInOrder, false);
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);
if (subQueryScorer == null) {
return null;
}
for (int i = 0; i < filterFunctions.length; i++) {
FilterFunction filterFunction = filterFunctions[i];
filterFunction.function.setNextReader(reader);
docSets[i] = DocSets.convert(reader, filterFunction.filter.getDocIdSet(reader));
filterFunction.function.setNextReader(context);
docSets[i] = DocSets.convert(context.reader(), filterFunction.filter.getDocIdSet(context, acceptDocs));
}
return new CustomBoostFactorScorer(getSimilarity(searcher), this, subQueryScorer, scoreMode, filterFunctions, maxBoost, docSets);
}
@Override
public Explanation explain(IndexReader reader, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(reader, doc);
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
if (scoreMode == ScoreMode.First) {
for (FilterFunction filterFunction : filterFunctions) {
DocSet docSet = DocSets.convert(reader, filterFunction.filter.getDocIdSet(reader));
DocSet docSet = DocSets.convert(context.reader(), filterFunction.filter.getDocIdSet(context));
if (docSet.get(doc)) {
filterFunction.function.setNextReader(reader);
filterFunction.function.setNextReader(context);
Explanation functionExplanation = filterFunction.function.explainFactor(doc);
float sc = getValue() * subQueryExpl.getValue() * functionExplanation.getValue();
Explanation filterExplanation = new ComplexExplanation(true, sc, "custom score, product of:");
@ -189,9 +191,9 @@ public class FiltersFunctionScoreQuery extends Query {
float min = Float.POSITIVE_INFINITY;
ArrayList<Explanation> filtersExplanations = new ArrayList<Explanation>();
for (FilterFunction filterFunction : filterFunctions) {
DocSet docSet = DocSets.convert(reader, filterFunction.filter.getDocIdSet(reader));
DocSet docSet = DocSets.convert(context.reader(), filterFunction.filter.getDocIdSet(context));
if (docSet.get(doc)) {
filterFunction.function.setNextReader(reader);
filterFunction.function.setNextReader(context);
Explanation functionExplanation = filterFunction.function.explainFactor(doc);
float factor = functionExplanation.getValue();
count++;

View File

@ -19,9 +19,11 @@
package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.*;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
@ -63,17 +65,17 @@ public class FunctionScoreQuery extends Query {
}
@Override
public Weight createWeight(Searcher searcher) throws IOException {
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new CustomBoostFactorWeight(searcher);
}
class CustomBoostFactorWeight extends Weight {
Searcher searcher;
IndexSearcher searcher;
Weight subQueryWeight;
public CustomBoostFactorWeight(Searcher searcher) throws IOException {
public CustomBoostFactorWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
this.subQueryWeight = subQuery.weight(searcher);
this.subQueryWeight = subQuery.createWeight(searcher);
}
public Query getQuery() {
@ -98,23 +100,23 @@ public class FunctionScoreQuery extends Query {
}
@Override
public Scorer scorer(IndexReader reader, boolean scoreDocsInOrder, boolean topScorer) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(reader, scoreDocsInOrder, false);
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder, boolean topScorer, Bits acceptDocs) throws IOException {
Scorer subQueryScorer = subQueryWeight.scorer(context, scoreDocsInOrder, false, acceptDocs);
if (subQueryScorer == null) {
return null;
}
function.setNextReader(reader);
function.setNextReader(context);
return new CustomBoostFactorScorer(getSimilarity(searcher), this, subQueryScorer, function);
}
@Override
public Explanation explain(IndexReader reader, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(reader, doc);
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Explanation subQueryExpl = subQueryWeight.explain(context, doc);
if (!subQueryExpl.isMatch()) {
return subQueryExpl;
}
function.setNextReader(reader);
function.setNextReader(context);
Explanation functionExplanation = function.explainScore(doc, subQueryExpl);
float sc = getValue() * functionExplanation.getValue();
Explanation res = new ComplexExplanation(true, sc, "custom score, product of:");

View File

@ -19,7 +19,7 @@
package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.search.Explanation;
/**
@ -27,7 +27,7 @@ import org.apache.lucene.search.Explanation;
*/
public interface ScoreFunction {
void setNextReader(IndexReader reader);
void setNextReader(AtomicReaderContext context);
float score(int docId, float subQueryScore);

View File

@ -166,7 +166,7 @@ public class ScriptFilterParser implements FilterParser {
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
searchScript.setNextReader(context.reader());
searchScript.setNextReader(context);
// LUCENE 4 UPGRADE: we can simply wrap this here since it is not cacheable and if we are not top level we will get a null passed anyway
return BitsFilteredDocIdSet.wrap(new ScriptDocSet(context.reader(), searchScript), acceptDocs);
}

View File

@ -67,7 +67,7 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase {
@Override
public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticSearchException {
for (ScriptFieldsContext.ScriptField scriptField : context.scriptFields().fields()) {
scriptField.script().setNextReader(hitContext.reader());
scriptField.script().setNextReader(hitContext.readerContext());
scriptField.script().setNextDocId(hitContext.docId());
Object value;