mirror of https://github.com/apache/lucene.git
Upgraded to Lucene 2.9-dev r801856
git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@801872 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1b2549e92f
commit
b51b515f6e
|
@ -575,6 +575,8 @@ Other Changes
|
|||
|
||||
43. SOLR-1261: Lucene trunk renamed RangeQuery & Co to TermRangeQuery (Uwe Schindler via shalin)
|
||||
|
||||
44. Upgraded to Lucene 2.9-dev r801856 (Mark Miller)
|
||||
|
||||
Build
|
||||
----------------------
|
||||
1. SOLR-776: Added in ability to sign artifacts via Ant for releases (gsingers)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[3913f541b7e2915956524f4fc7ee4254dabc1449] was removed in git history.
|
||||
AnyObjectId[bed34d9cc9433b6a5153a308b4eb165fd4dfbec4] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[51b1184b0a653dbe09561e08cb7bb30936ccdd19] was removed in git history.
|
||||
AnyObjectId[2c09a22e4fd3c278ab827a66ba6c2783cd33b5aa] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[8732882f60d8c2c314257d02e1fb35e662313c14] was removed in git history.
|
||||
AnyObjectId[070f98d8b8f1734cea153a32d191fd0810a64165] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[4a6bad8fd3391c2dabdd8762d7fdff47511c8012] was removed in git history.
|
||||
AnyObjectId[ee9ef071b5dcaaec074131120f78c08865d638b6] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[89ffe35842473c57edcbecd24a116b6993826ae1] was removed in git history.
|
||||
AnyObjectId[b955549403c836db189f3a7af0322751d5fadda8] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[a2210e09cef58fe74c62b3cd67b995263477c999] was removed in git history.
|
||||
AnyObjectId[de31833e9d2bd31c40bc60fab1478699a9db771d] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[53b91de6e65f2610ba49f5870efb18df8b6f8398] was removed in git history.
|
||||
AnyObjectId[72f0d5c49c3bdb0371df2d34a558751b0ea564b4] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -1,2 +1,2 @@
|
|||
AnyObjectId[668555685e6f196033d4aff7aaf22e1913205c23] was removed in git history.
|
||||
AnyObjectId[f23c609ef48c657d6c96490292fd7d6b5752af80] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -225,8 +225,8 @@ public class QueryComponent extends SearchComponent
|
|||
}
|
||||
|
||||
if (comparator == null) {
|
||||
comparator = sortField.getComparator(1,0,sortField.getReverse());
|
||||
comparator.setNextReader(subReader, offset, 0);
|
||||
comparator = sortField.getComparator(1,0);
|
||||
comparator.setNextReader(subReader, offset);
|
||||
if (comparators != null)
|
||||
comparators[idx] = comparator;
|
||||
}
|
||||
|
|
|
@ -477,7 +477,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
values[slot] = docVal(doc);
|
||||
}
|
||||
|
||||
public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException {
|
||||
public void setNextReader(IndexReader reader, int docBase) throws IOException {
|
||||
idIndex = FieldCache.DEFAULT.getStringIndex(reader, fieldname);
|
||||
}
|
||||
|
||||
|
|
|
@ -133,19 +133,21 @@ public class DefaultSolrHighlighter extends SolrHighlighter
|
|||
* @param request The SolrQueryRequest
|
||||
* @throws IOException
|
||||
*/
|
||||
private SpanScorer getSpanQueryScorer(Query query, String fieldName, CachingTokenFilter tokenStream, SolrQueryRequest request) throws IOException {
|
||||
private QueryScorer getSpanQueryScorer(Query query, String fieldName, TokenStream tokenStream, SolrQueryRequest request) throws IOException {
|
||||
boolean reqFieldMatch = request.getParams().getFieldBool(fieldName, HighlightParams.FIELD_MATCH, false);
|
||||
Boolean highlightMultiTerm = request.getParams().getBool(HighlightParams.HIGHLIGHT_MULTI_TERM);
|
||||
if(highlightMultiTerm == null) {
|
||||
highlightMultiTerm = false;
|
||||
}
|
||||
|
||||
QueryScorer scorer;
|
||||
if (reqFieldMatch) {
|
||||
return new SpanScorer(query, fieldName, tokenStream, highlightMultiTerm);
|
||||
scorer = new QueryScorer(query, fieldName);
|
||||
}
|
||||
else {
|
||||
return new SpanScorer(query, null, tokenStream, highlightMultiTerm);
|
||||
scorer = new QueryScorer(query, null);
|
||||
}
|
||||
scorer.setExpandMultiTermQuery(highlightMultiTerm);
|
||||
return scorer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -154,13 +156,13 @@ public class DefaultSolrHighlighter extends SolrHighlighter
|
|||
* @param fieldName The name of the field
|
||||
* @param request The SolrQueryRequest
|
||||
*/
|
||||
protected QueryScorer getQueryScorer(Query query, String fieldName, SolrQueryRequest request) {
|
||||
protected QueryTermScorer getQueryScorer(Query query, String fieldName, SolrQueryRequest request) {
|
||||
boolean reqFieldMatch = request.getParams().getFieldBool(fieldName, HighlightParams.FIELD_MATCH, false);
|
||||
if (reqFieldMatch) {
|
||||
return new QueryScorer(query, request.getSearcher().getReader(), fieldName);
|
||||
return new QueryTermScorer(query, request.getSearcher().getReader(), fieldName);
|
||||
}
|
||||
else {
|
||||
return new QueryScorer(query);
|
||||
return new QueryTermScorer(query);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,10 @@
|
|||
package org.apache.solr.highlight;
|
||||
|
||||
import org.apache.lucene.analysis.Token;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
|
||||
import org.apache.lucene.search.highlight.Fragmenter;
|
||||
import org.apache.lucene.search.highlight.NullFragmenter;
|
||||
import org.apache.lucene.search.highlight.SimpleFragmenter;
|
||||
|
@ -77,6 +81,9 @@ class LuceneGapFragmenter extends SimpleFragmenter {
|
|||
public static final int INCREMENT_THRESHOLD = 50;
|
||||
protected int fragOffset = 0;
|
||||
|
||||
private OffsetAttribute offsetAtt;
|
||||
private PositionIncrementAttribute posIncAtt;
|
||||
|
||||
public LuceneGapFragmenter() {
|
||||
}
|
||||
|
||||
|
@ -87,19 +94,22 @@ class LuceneGapFragmenter extends SimpleFragmenter {
|
|||
/* (non-Javadoc)
|
||||
* @see org.apache.lucene.search.highlight.TextFragmenter#start(java.lang.String)
|
||||
*/
|
||||
public void start(String originalText) {
|
||||
public void start(String originalText, TokenStream tokenStream) {
|
||||
offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class);
|
||||
posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class);
|
||||
fragOffset = 0;
|
||||
}
|
||||
|
||||
/* (non-Javadoc)
|
||||
* @see org.apache.lucene.search.highlight.TextFragmenter#isNewFragment(org.apache.lucene.analysis.Token)
|
||||
*/
|
||||
public boolean isNewFragment(Token token) {
|
||||
public boolean isNewFragment() {
|
||||
int endOffset = offsetAtt.endOffset();
|
||||
boolean isNewFrag =
|
||||
token.endOffset() >= fragOffset + getFragmentSize() ||
|
||||
token.getPositionIncrement() > INCREMENT_THRESHOLD;
|
||||
endOffset >= fragOffset + getFragmentSize() ||
|
||||
posIncAtt.getPositionIncrement() > INCREMENT_THRESHOLD;
|
||||
if(isNewFrag) {
|
||||
fragOffset = token.endOffset();
|
||||
fragOffset = endOffset;
|
||||
}
|
||||
return isNewFrag;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,9 @@ import java.util.Arrays;
|
|||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.lucene.analysis.Token;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.search.highlight.Fragmenter;
|
||||
import org.apache.lucene.search.highlight.NullFragmenter;
|
||||
import org.apache.solr.common.params.DefaultSolrParams;
|
||||
|
@ -146,6 +148,9 @@ class LuceneRegexFragmenter implements Fragmenter
|
|||
protected int targetOffset;
|
||||
protected int[] hotspots;
|
||||
|
||||
private PositionIncrementAttribute posIncAtt;
|
||||
private OffsetAttribute offsetAtt;
|
||||
|
||||
// ** other
|
||||
// note: could dynamically change size of sentences extracted to match
|
||||
// target frag size
|
||||
|
@ -193,10 +198,12 @@ class LuceneRegexFragmenter implements Fragmenter
|
|||
/* (non-Javadoc)
|
||||
* @see org.apache.lucene.search.highlight.TextFragmenter#start(java.lang.String)
|
||||
*/
|
||||
public void start(String originalText) {
|
||||
public void start(String originalText, TokenStream tokenStream) {
|
||||
currentNumFrags = 1;
|
||||
currentOffset = 0;
|
||||
addHotSpots(originalText);
|
||||
posIncAtt = (PositionIncrementAttribute) tokenStream.getAttribute(PositionIncrementAttribute.class);
|
||||
offsetAtt = (OffsetAttribute) tokenStream.getAttribute(OffsetAttribute.class);
|
||||
}
|
||||
|
||||
////////////////////////////////////
|
||||
|
@ -231,23 +238,24 @@ class LuceneRegexFragmenter implements Fragmenter
|
|||
/* (non-Javadoc)
|
||||
* @see org.apache.lucene.search.highlight.TextFragmenter#isNewFragment(org.apache.lucene.analysis.Token)
|
||||
*/
|
||||
public boolean isNewFragment(Token token)
|
||||
public boolean isNewFragment()
|
||||
{
|
||||
boolean isNewFrag = false;
|
||||
int minFragLen = (int)((1.0f - slop)*targetFragChars);
|
||||
|
||||
int endOffset = offsetAtt.endOffset();
|
||||
|
||||
// ** determin isNewFrag
|
||||
if(token.getPositionIncrement() > incrementGapThreshold) {
|
||||
if(posIncAtt.getPositionIncrement() > incrementGapThreshold) {
|
||||
// large position gaps always imply new fragments
|
||||
isNewFrag = true;
|
||||
|
||||
} else if(token.endOffset() - currentOffset < minFragLen) {
|
||||
} else if(endOffset - currentOffset < minFragLen) {
|
||||
// we're not in our range of flexibility
|
||||
isNewFrag = false;
|
||||
|
||||
} else if(targetOffset > 0) {
|
||||
// we've already decided on a target
|
||||
isNewFrag = token.endOffset() > targetOffset;
|
||||
isNewFrag = endOffset > targetOffset;
|
||||
|
||||
} else {
|
||||
// we might be able to do something
|
||||
|
@ -256,7 +264,7 @@ class LuceneRegexFragmenter implements Fragmenter
|
|||
int hotIndex;
|
||||
|
||||
// look for a close hotspot
|
||||
hotIndex = Arrays.binarySearch(hotspots, token.endOffset());
|
||||
hotIndex = Arrays.binarySearch(hotspots, endOffset);
|
||||
if(hotIndex < 0) hotIndex = -hotIndex;
|
||||
if(hotIndex >= hotspots.length) {
|
||||
// no more hotspots in this input stream
|
||||
|
@ -276,13 +284,13 @@ class LuceneRegexFragmenter implements Fragmenter
|
|||
targetOffset = goal <= maxOffset ? goal : currentOffset + targetFragChars;
|
||||
}
|
||||
|
||||
isNewFrag = token.endOffset() > targetOffset;
|
||||
isNewFrag = endOffset > targetOffset;
|
||||
}
|
||||
|
||||
// ** operate on isNewFrag
|
||||
if(isNewFrag) {
|
||||
currentNumFrags++;
|
||||
currentOffset = token.endOffset();
|
||||
currentOffset = endOffset;
|
||||
targetOffset = -1;
|
||||
}
|
||||
return isNewFrag;
|
||||
|
|
|
@ -130,7 +130,7 @@ public class RandomSortField extends FieldType {
|
|||
values[slot] = hash(doc+seed);
|
||||
}
|
||||
|
||||
public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException {
|
||||
public void setNextReader(IndexReader reader, int docBase) throws IOException {
|
||||
seed = getSeed(fieldname, reader);
|
||||
}
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ class MissingLastOrdComparator extends FieldComparator {
|
|||
readerGen[slot] = currentReaderGen;
|
||||
}
|
||||
|
||||
public void setNextReader(IndexReader reader, int docBase, int numSlotsFull) throws IOException {
|
||||
public void setNextReader(IndexReader reader, int docBase) throws IOException {
|
||||
FieldCache.StringIndex currentReaderValues = FieldCache.DEFAULT.getStringIndex(reader, field);
|
||||
currentReaderGen++;
|
||||
order = currentReaderValues.order;
|
||||
|
|
|
@ -144,7 +144,7 @@ public class SolrQueryParser extends QueryParser {
|
|||
// (sortable numeric types don't do prefixes, but can do range queries)
|
||||
Term t = new Term(field, termStr);
|
||||
PrefixQuery prefixQuery = new PrefixQuery(t);
|
||||
prefixQuery.setConstantScoreRewrite(true);
|
||||
prefixQuery.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
|
||||
return prefixQuery;
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ public class SolrQueryParser extends QueryParser {
|
|||
if (q instanceof WildcardQuery) {
|
||||
// use a constant score query to avoid overflowing clauses
|
||||
WildcardQuery wildcardQuery = new WildcardQuery(((WildcardQuery)q).getTerm());
|
||||
wildcardQuery.setConstantScoreRewrite(true);
|
||||
wildcardQuery.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_FILTER_REWRITE);
|
||||
return wildcardQuery;
|
||||
}
|
||||
return q;
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.io.FileOutputStream;
|
|||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.PrintStream;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.text.DateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
|
@ -71,7 +72,10 @@ public class SolrIndexWriter extends IndexWriter {
|
|||
if (config.maxMergeDocs != -1) setMaxMergeDocs(config.maxMergeDocs);
|
||||
if (config.maxFieldLength != -1) setMaxFieldLength(config.maxFieldLength);
|
||||
if (config.mergePolicyClassName != null && SolrIndexConfig.DEFAULT_MERGE_POLICY_CLASSNAME.equals(config.mergePolicyClassName) == false) {
|
||||
MergePolicy policy = (MergePolicy) schema.getResourceLoader().newInstance(config.mergePolicyClassName);
|
||||
MergePolicy policy = null;
|
||||
|
||||
policy = (MergePolicy) schema.getResourceLoader().newInstance(config.mergePolicyClassName, new String[]{}, new Class[]{IndexWriter.class}, new Object[] { this });
|
||||
|
||||
setMergePolicy(policy);///hmm, is this really the best way to get a newInstance?
|
||||
}
|
||||
if (config.mergeFactor != -1 && getMergePolicy() instanceof LogMergePolicy) {
|
||||
|
|
|
@ -92,7 +92,7 @@ public class HighlightingUtils implements HighlightParams {
|
|||
* @deprecated use DefaultSolrHighlighter
|
||||
* @see DefaultSolrHighlighter
|
||||
*/
|
||||
public static QueryScorer getQueryScorer(Query query, String fieldName, SolrQueryRequest request) {
|
||||
public static QueryTermScorer getQueryScorer(Query query, String fieldName, SolrQueryRequest request) {
|
||||
return HIGHLIGHTER.getQueryScorerX(query, fieldName, request);
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ class SolrHighlighterX extends DefaultSolrHighlighter {
|
|||
Highlighter getHighlighterX(Query query, String fieldName, SolrQueryRequest request) {
|
||||
return getHighlighter(query, fieldName, request);
|
||||
}
|
||||
QueryScorer getQueryScorerX(Query query, String fieldName, SolrQueryRequest request) {
|
||||
QueryTermScorer getQueryScorerX(Query query, String fieldName, SolrQueryRequest request) {
|
||||
return getQueryScorer(query, fieldName, request);
|
||||
}
|
||||
int getMaxSnippetsX(String fieldName, SolrQueryRequest request) {
|
||||
|
|
Loading…
Reference in New Issue