Upgrade to Lucene 4.6
This commit upgrades to Lucene 4.6 and contains the following improvements: * Remove XIndexWriter in favor of the fixed IndexWriter * Removes patched XLuceneConstantScoreQuery * Now uses Lucene passage formatters contributed from Elasticsearch in PostingsHighlighter * Upgrades to Lucene46 Codec from Lucene45 Codec * Fixes problem in CommonTermsQueryParser where close was never called. Closes #4241
This commit is contained in:
parent
28adbd475d
commit
8e17d636ef
2
pom.xml
2
pom.xml
|
@ -30,7 +30,7 @@
|
|||
</parent>
|
||||
|
||||
<properties>
|
||||
<lucene.version>4.5.1</lucene.version>
|
||||
<lucene.version>4.6.0</lucene.version>
|
||||
<tests.jvms>1</tests.jvms>
|
||||
<tests.shuffle>true</tests.shuffle>
|
||||
<tests.output>onerror</tests.output>
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Licensed to ElasticSearch and Shay Banon under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. ElasticSearch licenses this
|
||||
* file to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
public final class XIndexWriter extends IndexWriter {
|
||||
|
||||
private static final Method processEvents;
|
||||
|
||||
|
||||
static {
|
||||
// fix for https://issues.apache.org/jira/browse/LUCENE-5330
|
||||
assert Version.LUCENE_45.onOrAfter(org.elasticsearch.Version.CURRENT.luceneVersion) : "This should be fixed in LUCENE-4.6";
|
||||
try {
|
||||
processEvents = IndexWriter.class.getDeclaredMethod("processEvents", boolean.class, boolean.class);
|
||||
processEvents.setAccessible(true);
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public XIndexWriter(Directory d, IndexWriterConfig conf) throws IOException {
|
||||
super(d, conf);
|
||||
}
|
||||
|
||||
private void processEvents() {
|
||||
try {
|
||||
processEvents.invoke(this, false, true);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void rollback() throws IOException {
|
||||
super.rollback();
|
||||
processEvents();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(boolean waitForMerges) throws IOException {
|
||||
super.close(waitForMerges);
|
||||
processEvents();
|
||||
}
|
||||
|
||||
@Override
|
||||
DirectoryReader getReader(boolean applyAllDeletes) throws IOException {
|
||||
DirectoryReader reader = super.getReader(applyAllDeletes);
|
||||
processEvents();
|
||||
return reader;
|
||||
}
|
||||
|
||||
}
|
|
@ -102,7 +102,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
|
||||
this.forcedAnalyzer = settings.forcedAnalyzer() != null;
|
||||
this.analyzer = forcedAnalyzer ? settings.forcedAnalyzer() : settings.defaultAnalyzer();
|
||||
this.setAnalyzer(forcedAnalyzer ? settings.forcedAnalyzer() : settings.defaultAnalyzer());
|
||||
if (settings.forcedQuoteAnalyzer() != null) {
|
||||
this.forcedQuoteAnalyzer = true;
|
||||
this.quoteAnalyzer = settings.forcedQuoteAnalyzer();
|
||||
|
@ -216,11 +216,11 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
}
|
||||
currentMapper = null;
|
||||
Analyzer oldAnalyzer = analyzer;
|
||||
Analyzer oldAnalyzer = getAnalyzer();
|
||||
try {
|
||||
MapperService.SmartNameFieldMappers fieldMappers = null;
|
||||
if (quoted) {
|
||||
analyzer = quoteAnalyzer;
|
||||
setAnalyzer(quoteAnalyzer);
|
||||
if (quoteFieldSuffix != null) {
|
||||
fieldMappers = parseContext.smartFieldMappers(field + quoteFieldSuffix);
|
||||
}
|
||||
|
@ -231,11 +231,11 @@ public class MapperQueryParser extends QueryParser {
|
|||
if (fieldMappers != null) {
|
||||
if (quoted) {
|
||||
if (!forcedQuoteAnalyzer) {
|
||||
analyzer = fieldMappers.searchQuoteAnalyzer();
|
||||
setAnalyzer(fieldMappers.searchQuoteAnalyzer());
|
||||
}
|
||||
} else {
|
||||
if (!forcedAnalyzer) {
|
||||
analyzer = fieldMappers.searchAnalyzer();
|
||||
setAnalyzer(fieldMappers.searchAnalyzer());
|
||||
}
|
||||
}
|
||||
currentMapper = fieldMappers.fieldMappers().mapper();
|
||||
|
@ -269,7 +269,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
return super.getFieldQuery(field, queryText, quoted);
|
||||
} finally {
|
||||
analyzer = oldAnalyzer;
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -502,12 +502,12 @@ public class MapperQueryParser extends QueryParser {
|
|||
|
||||
private Query getPrefixQuerySingle(String field, String termStr) throws ParseException {
|
||||
currentMapper = null;
|
||||
Analyzer oldAnalyzer = analyzer;
|
||||
Analyzer oldAnalyzer = getAnalyzer();
|
||||
try {
|
||||
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
|
||||
if (fieldMappers != null) {
|
||||
if (!forcedAnalyzer) {
|
||||
analyzer = fieldMappers.searchAnalyzer();
|
||||
setAnalyzer(fieldMappers.searchAnalyzer());
|
||||
}
|
||||
currentMapper = fieldMappers.fieldMappers().mapper();
|
||||
if (currentMapper != null) {
|
||||
|
@ -537,7 +537,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
throw e;
|
||||
} finally {
|
||||
analyzer = oldAnalyzer;
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -655,12 +655,12 @@ public class MapperQueryParser extends QueryParser {
|
|||
private Query getWildcardQuerySingle(String field, String termStr) throws ParseException {
|
||||
String indexedNameField = field;
|
||||
currentMapper = null;
|
||||
Analyzer oldAnalyzer = analyzer;
|
||||
Analyzer oldAnalyzer = getAnalyzer();
|
||||
try {
|
||||
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
|
||||
if (fieldMappers != null) {
|
||||
if (!forcedAnalyzer) {
|
||||
analyzer = fieldMappers.searchAnalyzer();
|
||||
setAnalyzer(fieldMappers.searchAnalyzer());
|
||||
}
|
||||
currentMapper = fieldMappers.fieldMappers().mapper();
|
||||
if (currentMapper != null) {
|
||||
|
@ -675,7 +675,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
throw e;
|
||||
} finally {
|
||||
analyzer = oldAnalyzer;
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -790,12 +790,12 @@ public class MapperQueryParser extends QueryParser {
|
|||
|
||||
private Query getRegexpQuerySingle(String field, String termStr) throws ParseException {
|
||||
currentMapper = null;
|
||||
Analyzer oldAnalyzer = analyzer;
|
||||
Analyzer oldAnalyzer = getAnalyzer();
|
||||
try {
|
||||
MapperService.SmartNameFieldMappers fieldMappers = parseContext.smartFieldMappers(field);
|
||||
if (fieldMappers != null) {
|
||||
if (!forcedAnalyzer) {
|
||||
analyzer = fieldMappers.searchAnalyzer();
|
||||
setAnalyzer(fieldMappers.searchAnalyzer());
|
||||
}
|
||||
currentMapper = fieldMappers.fieldMappers().mapper();
|
||||
if (currentMapper != null) {
|
||||
|
@ -825,7 +825,7 @@ public class MapperQueryParser extends QueryParser {
|
|||
}
|
||||
throw e;
|
||||
} finally {
|
||||
analyzer = oldAnalyzer;
|
||||
setAnalyzer(oldAnalyzer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,250 +0,0 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* Extension of {@link ConstantScoreQuery} that works around LUCENE-5307.
|
||||
*/
|
||||
// we extend CSQ so that highlighters know how to deal with this query
|
||||
public class XLuceneConstantScoreQuery extends ConstantScoreQuery {
|
||||
|
||||
static {
|
||||
assert Version.LUCENE_45.onOrAfter(Lucene.VERSION) : "Lucene 4.6 CSQ is fixed, remove this one!";
|
||||
}
|
||||
|
||||
public XLuceneConstantScoreQuery(Query filter) {
|
||||
super(filter);
|
||||
}
|
||||
|
||||
public XLuceneConstantScoreQuery(Filter filter) {
|
||||
super(filter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (query != null) {
|
||||
Query rewritten = query.rewrite(reader);
|
||||
if (rewritten != query) {
|
||||
rewritten = new XLuceneConstantScoreQuery(rewritten);
|
||||
rewritten.setBoost(this.getBoost());
|
||||
return rewritten;
|
||||
}
|
||||
} else {
|
||||
assert filter != null;
|
||||
// Fix outdated usage pattern from Lucene 2.x/early-3.x:
|
||||
// because ConstantScoreQuery only accepted filters,
|
||||
// QueryWrapperFilter was used to wrap queries.
|
||||
if (filter instanceof QueryWrapperFilter) {
|
||||
final QueryWrapperFilter qwf = (QueryWrapperFilter) filter;
|
||||
final Query rewritten = new XLuceneConstantScoreQuery(qwf.getQuery().rewrite(reader));
|
||||
rewritten.setBoost(this.getBoost());
|
||||
return rewritten;
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher) throws IOException {
|
||||
return new XConstantWeight(searcher);
|
||||
}
|
||||
|
||||
protected class XConstantWeight extends Weight {
|
||||
private final Weight innerWeight;
|
||||
private float queryNorm;
|
||||
private float queryWeight;
|
||||
|
||||
public XConstantWeight(IndexSearcher searcher) throws IOException {
|
||||
this.innerWeight = (query == null) ? null : query.createWeight(searcher);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery() {
|
||||
return XLuceneConstantScoreQuery.this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getValueForNormalization() throws IOException {
|
||||
// we calculate sumOfSquaredWeights of the inner weight, but ignore it (just to initialize everything)
|
||||
if (innerWeight != null) innerWeight.getValueForNormalization();
|
||||
queryWeight = getBoost();
|
||||
return queryWeight * queryWeight;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void normalize(float norm, float topLevelBoost) {
|
||||
this.queryNorm = norm * topLevelBoost;
|
||||
queryWeight *= this.queryNorm;
|
||||
// we normalize the inner weight, but ignore it (just to initialize everything)
|
||||
if (innerWeight != null) innerWeight.normalize(norm, topLevelBoost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
|
||||
boolean topScorer, final Bits acceptDocs) throws IOException {
|
||||
final DocIdSetIterator disi;
|
||||
if (filter != null) {
|
||||
assert query == null;
|
||||
final DocIdSet dis = filter.getDocIdSet(context, acceptDocs);
|
||||
if (dis == null) {
|
||||
return null;
|
||||
}
|
||||
disi = dis.iterator();
|
||||
} else {
|
||||
assert query != null && innerWeight != null;
|
||||
disi = innerWeight.scorer(context, scoreDocsInOrder, topScorer, acceptDocs);
|
||||
}
|
||||
|
||||
if (disi == null) {
|
||||
return null;
|
||||
}
|
||||
return new XConstantScorer(disi, this, queryWeight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean scoresDocsOutOfOrder() {
|
||||
return (innerWeight != null) ? innerWeight.scoresDocsOutOfOrder() : false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||
final Scorer cs = scorer(context, true, false, context.reader().getLiveDocs());
|
||||
final boolean exists = (cs != null && cs.advance(doc) == doc);
|
||||
|
||||
final ComplexExplanation result = new ComplexExplanation();
|
||||
if (exists) {
|
||||
result.setDescription(XLuceneConstantScoreQuery.this.toString() + ", product of:");
|
||||
result.setValue(queryWeight);
|
||||
result.setMatch(Boolean.TRUE);
|
||||
result.addDetail(new Explanation(getBoost(), "boost"));
|
||||
result.addDetail(new Explanation(queryNorm, "queryNorm"));
|
||||
} else {
|
||||
result.setDescription(XLuceneConstantScoreQuery.this.toString() + " doesn't match id " + doc);
|
||||
result.setValue(0);
|
||||
result.setMatch(Boolean.FALSE);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
protected class XConstantScorer extends Scorer {
|
||||
final DocIdSetIterator docIdSetIterator;
|
||||
final float theScore;
|
||||
|
||||
public XConstantScorer(DocIdSetIterator docIdSetIterator, Weight w, float theScore) {
|
||||
super(w);
|
||||
this.theScore = theScore;
|
||||
this.docIdSetIterator = docIdSetIterator;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return docIdSetIterator.nextDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return docIdSetIterator.docID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float score() throws IOException {
|
||||
assert docIdSetIterator.docID() != NO_MORE_DOCS;
|
||||
return theScore;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int freq() throws IOException {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return docIdSetIterator.advance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return docIdSetIterator.cost();
|
||||
}
|
||||
|
||||
private Collector wrapCollector(final Collector collector) {
|
||||
return new Collector() {
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) throws IOException {
|
||||
// we must wrap again here, but using the scorer passed in as parameter:
|
||||
collector.setScorer(new ConstantScorer(scorer, XConstantScorer.this.weight, XConstantScorer.this.theScore));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(int doc) throws IOException {
|
||||
collector.collect(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextReader(AtomicReaderContext context) throws IOException {
|
||||
collector.setNextReader(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean acceptsDocsOutOfOrder() {
|
||||
return collector.acceptsDocsOutOfOrder();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// this optimization allows out of order scoring as top scorer!
|
||||
@Override
|
||||
public void score(Collector collector) throws IOException {
|
||||
if (query != null) {
|
||||
((Scorer) docIdSetIterator).score(wrapCollector(collector));
|
||||
} else {
|
||||
super.score(collector);
|
||||
}
|
||||
}
|
||||
|
||||
// this optimization allows out of order scoring as top scorer,
|
||||
@Override
|
||||
public boolean score(Collector collector, int max, int firstDocID) throws IOException {
|
||||
if (query != null) {
|
||||
return ((Scorer) docIdSetIterator).score(wrapCollector(collector), max, firstDocID);
|
||||
} else {
|
||||
return super.score(collector, max, firstDocID);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<ChildScorer> getChildren() {
|
||||
if (query != null)
|
||||
return Collections.singletonList(new ChildScorer((Scorer) docIdSetIterator, "constant"));
|
||||
else
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -26,7 +26,7 @@ Custom passage formatter that allows us to:
|
|||
1) extract different snippets (instead of a single big string) together with their scores ({@link Snippet})
|
||||
2) use the {@link Encoder} implementations that are already used with the other highlighters
|
||||
*/
|
||||
public class CustomPassageFormatter extends XPassageFormatter {
|
||||
public class CustomPassageFormatter extends PassageFormatter {
|
||||
|
||||
private final String preTag;
|
||||
private final String postTag;
|
||||
|
|
|
@ -135,7 +135,7 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected XPassageFormatter getFormatter(String field) {
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
return passageFormatter;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,147 +0,0 @@
|
|||
package org.apache.lucene.search.postingshighlight;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
|
||||
/**
|
||||
* Creates a formatted snippet from the top passages.
|
||||
* <p>
|
||||
* The default implementation marks the query terms as bold, and places
|
||||
* ellipses between unconnected passages.
|
||||
*/
|
||||
//LUCENE MONITOR - REMOVE ME WHEN LUCENE 4.6 IS OUT
|
||||
//Applied LUCENE-4906 to be able to return arbitrary objects
|
||||
public class XDefaultPassageFormatter extends XPassageFormatter {
|
||||
|
||||
static {
|
||||
assert Version.CURRENT.luceneVersion.compareTo(org.apache.lucene.util.Version.LUCENE_45) == 0 : "Remove XDefaultPassageFormatter once 4.6 is out";
|
||||
}
|
||||
|
||||
/** text that will appear before highlighted terms */
|
||||
protected final String preTag;
|
||||
/** text that will appear after highlighted terms */
|
||||
protected final String postTag;
|
||||
/** text that will appear between two unconnected passages */
|
||||
protected final String ellipsis;
|
||||
/** true if we should escape for html */
|
||||
protected final boolean escape;
|
||||
|
||||
/**
|
||||
* Creates a new DefaultPassageFormatter with the default tags.
|
||||
*/
|
||||
public XDefaultPassageFormatter() {
|
||||
this("<b>", "</b>", "... ", false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new DefaultPassageFormatter with custom tags.
|
||||
* @param preTag text which should appear before a highlighted term.
|
||||
* @param postTag text which should appear after a highlighted term.
|
||||
* @param ellipsis text which should be used to connect two unconnected passages.
|
||||
* @param escape true if text should be html-escaped
|
||||
*/
|
||||
public XDefaultPassageFormatter(String preTag, String postTag, String ellipsis, boolean escape) {
|
||||
if (preTag == null || postTag == null || ellipsis == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
this.preTag = preTag;
|
||||
this.postTag = postTag;
|
||||
this.ellipsis = ellipsis;
|
||||
this.escape = escape;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String format(Passage passages[], String content) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
int pos = 0;
|
||||
for (Passage passage : passages) {
|
||||
// don't add ellipsis if its the first one, or if its connected.
|
||||
if (passage.startOffset > pos && pos > 0) {
|
||||
sb.append(ellipsis);
|
||||
}
|
||||
pos = passage.startOffset;
|
||||
for (int i = 0; i < passage.numMatches; i++) {
|
||||
int start = passage.matchStarts[i];
|
||||
int end = passage.matchEnds[i];
|
||||
// its possible to have overlapping terms
|
||||
if (start > pos) {
|
||||
append(sb, content, pos, start);
|
||||
}
|
||||
if (end > pos) {
|
||||
sb.append(preTag);
|
||||
append(sb, content, Math.max(pos, start), end);
|
||||
sb.append(postTag);
|
||||
pos = end;
|
||||
}
|
||||
}
|
||||
// its possible a "term" from the analyzer could span a sentence boundary.
|
||||
append(sb, content, pos, Math.max(pos, passage.endOffset));
|
||||
pos = passage.endOffset;
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends original text to the response.
|
||||
* @param dest resulting text, possibly transformed or encoded
|
||||
* @param content original text content
|
||||
* @param start index of the first character in content
|
||||
* @param end index of the character following the last character in content
|
||||
*/
|
||||
protected void append(StringBuilder dest, String content, int start, int end) {
|
||||
if (escape) {
|
||||
// note: these are the rules from owasp.org
|
||||
for (int i = start; i < end; i++) {
|
||||
char ch = content.charAt(i);
|
||||
switch(ch) {
|
||||
case '&':
|
||||
dest.append("&");
|
||||
break;
|
||||
case '<':
|
||||
dest.append("<");
|
||||
break;
|
||||
case '>':
|
||||
dest.append(">");
|
||||
break;
|
||||
case '"':
|
||||
dest.append(""");
|
||||
break;
|
||||
case '\'':
|
||||
dest.append("'");
|
||||
break;
|
||||
case '/':
|
||||
dest.append("/");
|
||||
break;
|
||||
default:
|
||||
if (ch >= 0x30 && ch <= 0x39 || ch >= 0x41 && ch <= 0x5A || ch >= 0x61 && ch <= 0x7A) {
|
||||
dest.append(ch);
|
||||
} else if (ch < 0xff) {
|
||||
dest.append("&#");
|
||||
dest.append((int)ch);
|
||||
dest.append(";");
|
||||
} else {
|
||||
dest.append(ch);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dest.append(content, start, end);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package org.apache.lucene.search.postingshighlight;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
|
||||
/**
|
||||
* Creates a formatted snippet from the top passages.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
//LUCENE MONITOR - REMOVE ME WHEN LUCENE 4.6 IS OUT
|
||||
//Applied LUCENE-4906 to be able to return arbitrary objects
|
||||
public abstract class XPassageFormatter {
|
||||
|
||||
static {
|
||||
assert Version.CURRENT.luceneVersion.compareTo(org.apache.lucene.util.Version.LUCENE_45) == 0 : "Remove XPassageFormatter once 4.6 is out";
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the top <code>passages</code> from <code>content</code>
|
||||
* into a human-readable text snippet.
|
||||
*
|
||||
* @param passages top-N passages for the field. Note these are sorted in
|
||||
* the order that they appear in the document for convenience.
|
||||
* @param content content for the field.
|
||||
* @return formatted highlight
|
||||
*/
|
||||
public abstract Object format(Passage passages[], String content);
|
||||
|
||||
}
|
|
@ -71,7 +71,7 @@ public class XPostingsHighlighter {
|
|||
|
||||
/** Set the first time {@link #getFormatter} is called,
|
||||
* and then reused. */
|
||||
private XPassageFormatter defaultFormatter;
|
||||
private PassageFormatter defaultFormatter;
|
||||
|
||||
/** Set the first time {@link #getScorer} is called,
|
||||
* and then reused. */
|
||||
|
@ -110,9 +110,9 @@ public class XPostingsHighlighter {
|
|||
* formatting passages into highlighted snippets. This
|
||||
* returns a new {@code PassageFormatter} by default;
|
||||
* subclasses can override to customize. */
|
||||
protected XPassageFormatter getFormatter(String field) {
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
if (defaultFormatter == null) {
|
||||
defaultFormatter = new XDefaultPassageFormatter();
|
||||
defaultFormatter = new DefaultPassageFormatter();
|
||||
}
|
||||
return defaultFormatter;
|
||||
}
|
||||
|
@ -395,7 +395,7 @@ public class XPostingsHighlighter {
|
|||
TermsEnum termsEnum = null;
|
||||
int lastLeaf = -1;
|
||||
|
||||
XPassageFormatter fieldFormatter = getFormatter(field);
|
||||
PassageFormatter fieldFormatter = getFormatter(field);
|
||||
if (fieldFormatter == null) {
|
||||
throw new NullPointerException("PassageFormatter cannot be null");
|
||||
}
|
||||
|
|
|
@ -22,8 +22,7 @@ import com.carrotsearch.hppc.ObjectIntOpenHashMap;
|
|||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.TokenStreamToAutomaton;
|
||||
import org.apache.lucene.search.spell.TermFreqIterator;
|
||||
import org.apache.lucene.search.spell.TermFreqPayloadIterator;
|
||||
import org.apache.lucene.search.suggest.InputIterator;
|
||||
import org.apache.lucene.search.suggest.Lookup;
|
||||
import org.apache.lucene.search.suggest.Sort;
|
||||
import org.apache.lucene.store.*;
|
||||
|
@ -391,19 +390,13 @@ public class XAnalyzingSuggester extends Lookup {
|
|||
};
|
||||
|
||||
@Override
|
||||
public void build(TermFreqIterator iterator) throws IOException {
|
||||
public void build(InputIterator iterator) throws IOException {
|
||||
String prefix = getClass().getSimpleName();
|
||||
File directory = Sort.defaultTempDir();
|
||||
File tempInput = File.createTempFile(prefix, ".input", directory);
|
||||
File tempSorted = File.createTempFile(prefix, ".sorted", directory);
|
||||
|
||||
TermFreqPayloadIterator payloads;
|
||||
if (iterator instanceof TermFreqPayloadIterator) {
|
||||
payloads = (TermFreqPayloadIterator) iterator;
|
||||
} else {
|
||||
payloads = null;
|
||||
}
|
||||
hasPayloads = payloads != null;
|
||||
hasPayloads = iterator.hasPayloads();
|
||||
|
||||
Sort.ByteSequencesWriter writer = new Sort.ByteSequencesWriter(tempInput);
|
||||
Sort.ByteSequencesReader reader = null;
|
||||
|
@ -442,7 +435,7 @@ public class XAnalyzingSuggester extends Lookup {
|
|||
if (surfaceForm.length > (Short.MAX_VALUE-2)) {
|
||||
throw new IllegalArgumentException("cannot handle surface form > " + (Short.MAX_VALUE-2) + " in length (got " + surfaceForm.length + ")");
|
||||
}
|
||||
payload = payloads.payload();
|
||||
payload = iterator.payload();
|
||||
// payload + surfaceLength (short)
|
||||
requiredLength += payload.length + 2;
|
||||
} else {
|
||||
|
@ -480,7 +473,7 @@ public class XAnalyzingSuggester extends Lookup {
|
|||
writer.close();
|
||||
|
||||
// Sort all input/output pairs (required by FST.Builder):
|
||||
new Sort(new AnalyzingComparator(payloads != null)).sort(tempInput, tempSorted);
|
||||
new Sort(new AnalyzingComparator(hasPayloads)).sort(tempInput, tempSorted);
|
||||
|
||||
// Free disk space:
|
||||
tempInput.delete();
|
||||
|
|
|
@ -37,7 +37,6 @@ public final class RateLimitedFSDirectory extends FilterDirectory {
|
|||
|
||||
@Override
|
||||
public IndexOutput createOutput(String name, IOContext context) throws IOException {
|
||||
ensureOpen();
|
||||
final IndexOutput output = in.createOutput(name, context);
|
||||
|
||||
StoreRateLimiting rateLimiting = rateLimitingProvider.rateLimiting();
|
||||
|
@ -60,7 +59,6 @@ public final class RateLimitedFSDirectory extends FilterDirectory {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
isOpen = false;
|
||||
in.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
* <p>If constructed with {@link ByteBufferAllocator}, it allows to control the allocation and release of
|
||||
* byte buffer. For example, custom implementations can include caching of byte buffers.
|
||||
*/
|
||||
public class ByteBufferDirectory extends Directory {
|
||||
public class ByteBufferDirectory extends BaseDirectory {
|
||||
|
||||
protected final Map<String, ByteBufferFile> files = new ConcurrentHashMap<String, ByteBufferFile>();
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ public class Version implements Serializable {
|
|||
public static final int V_1_0_0_Beta1_ID = /*00*/1000001;
|
||||
public static final Version V_1_0_0_Beta1 = new Version(V_1_0_0_Beta1_ID, false, org.apache.lucene.util.Version.LUCENE_45);
|
||||
public static final int V_1_0_0_Beta2_ID = /*00*/1000002;
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, true, org.apache.lucene.util.Version.LUCENE_45);
|
||||
public static final Version V_1_0_0_Beta2 = new Version(V_1_0_0_Beta2_ID, true, org.apache.lucene.util.Version.LUCENE_46);
|
||||
|
||||
public static final Version CURRENT = V_1_0_0_Beta2;
|
||||
|
||||
|
|
|
@ -351,6 +351,11 @@ public final class TermVectorFields extends Fields {
|
|||
return docCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasFreqs() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasOffsets() {
|
||||
return hasOffsets;
|
||||
|
|
|
@ -44,7 +44,7 @@ import java.io.IOException;
|
|||
*/
|
||||
public class Lucene {
|
||||
|
||||
public static final Version VERSION = Version.LUCENE_45;
|
||||
public static final Version VERSION = Version.LUCENE_46;
|
||||
public static final Version ANALYZER_VERSION = VERSION;
|
||||
public static final Version QUERYPARSER_VERSION = VERSION;
|
||||
|
||||
|
@ -60,6 +60,9 @@ public class Lucene {
|
|||
if (version == null) {
|
||||
return defaultVersion;
|
||||
}
|
||||
if ("4.6".equals(version)) {
|
||||
return VERSION.LUCENE_46;
|
||||
}
|
||||
if ("4.5".equals(version)) {
|
||||
return VERSION.LUCENE_45;
|
||||
}
|
||||
|
@ -116,7 +119,7 @@ public class Lucene {
|
|||
TotalHitCountCollector countCollector = new TotalHitCountCollector();
|
||||
// we don't need scores, so wrap it in a constant score query
|
||||
if (!(query instanceof ConstantScoreQuery)) {
|
||||
query = new XLuceneConstantScoreQuery(query);
|
||||
query = new ConstantScoreQuery(query);
|
||||
}
|
||||
searcher.search(query, countCollector);
|
||||
return countCollector.getTotalHits();
|
||||
|
|
|
@ -19,13 +19,13 @@
|
|||
|
||||
package org.elasticsearch.common.lucene.search;
|
||||
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.XLuceneConstantScoreQuery;
|
||||
|
||||
/**
|
||||
* We still need sometimes to exclude deletes, because we don't remove them always with acceptDocs on filters
|
||||
*/
|
||||
public class XConstantScoreQuery extends XLuceneConstantScoreQuery {
|
||||
public class XConstantScoreQuery extends ConstantScoreQuery {
|
||||
|
||||
private final Filter actualFilter;
|
||||
|
||||
|
|
|
@ -100,7 +100,7 @@ public final class XFilteredQuery extends Query {
|
|||
if (queryRewritten instanceof MatchAllDocsQuery || Queries.isConstantMatchAllQuery(queryRewritten)) {
|
||||
// Special case: If the query is a MatchAllDocsQuery, we only
|
||||
// return a CSQ(filter).
|
||||
final Query rewritten = new XLuceneConstantScoreQuery(delegate.getFilter());
|
||||
final Query rewritten = new ConstantScoreQuery(delegate.getFilter());
|
||||
// Combine boost of MatchAllDocsQuery and the wrapped rewritten query:
|
||||
rewritten.setBoost(delegate.getBoost() * queryRewritten.getBoost());
|
||||
return rewritten;
|
||||
|
|
|
@ -96,10 +96,10 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
|
||||
if ("wordnet".equalsIgnoreCase(settings.get("format"))) {
|
||||
parser = new WordnetSynonymParser(true, expand, analyzer);
|
||||
((WordnetSynonymParser) parser).add(rulesReader);
|
||||
((WordnetSynonymParser) parser).parse(rulesReader);
|
||||
} else {
|
||||
parser = new SolrSynonymParser(true, expand, analyzer);
|
||||
((SolrSynonymParser) parser).add(rulesReader);
|
||||
((SolrSynonymParser) parser).parse(rulesReader);
|
||||
}
|
||||
|
||||
synonymMap = parser.build();
|
||||
|
|
|
@ -21,8 +21,7 @@ package org.elasticsearch.index.codec;
|
|||
|
||||
import org.apache.lucene.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.diskdv.DiskDocValuesFormat;
|
||||
import org.apache.lucene.codecs.lucene45.Lucene45Codec;
|
||||
import org.apache.lucene.codecs.lucene46.Lucene46Codec;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.index.codec.docvaluesformat.DocValuesFormatProvider;
|
||||
import org.elasticsearch.index.codec.postingsformat.PostingsFormatProvider;
|
||||
|
@ -38,7 +37,7 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
* configured for a specific field the default postings format is used.
|
||||
*/
|
||||
// LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version
|
||||
public class PerFieldMappingPostingFormatCodec extends Lucene45Codec {
|
||||
public class PerFieldMappingPostingFormatCodec extends Lucene46Codec {
|
||||
private final ESLogger logger;
|
||||
private final MapperService mapperService;
|
||||
private final PostingsFormat defaultPostingFormat;
|
||||
|
|
|
@ -189,13 +189,12 @@ public final class BloomFilterPostingsFormat extends PostingsFormat {
|
|||
|
||||
|
||||
}
|
||||
|
||||
public static final class BloomFilteredTerms extends Terms {
|
||||
private Terms delegateTerms;
|
||||
|
||||
public static final class BloomFilteredTerms extends FilterAtomicReader.FilterTerms {
|
||||
private BloomFilter filter;
|
||||
|
||||
public BloomFilteredTerms(Terms terms, BloomFilter filter) {
|
||||
this.delegateTerms = terms;
|
||||
super(terms);
|
||||
this.filter = filter;
|
||||
}
|
||||
|
||||
|
@ -203,12 +202,6 @@ public final class BloomFilterPostingsFormat extends PostingsFormat {
|
|||
return filter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum intersect(CompiledAutomaton compiled,
|
||||
final BytesRef startTerm) throws IOException {
|
||||
return delegateTerms.intersect(compiled, startTerm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum iterator(TermsEnum reuse) throws IOException {
|
||||
TermsEnum result;
|
||||
|
@ -217,56 +210,16 @@ public final class BloomFilterPostingsFormat extends PostingsFormat {
|
|||
// to recycle its contained TermsEnum
|
||||
BloomFilteredTermsEnum bfte = (BloomFilteredTermsEnum) reuse;
|
||||
if (bfte.filter == filter) {
|
||||
bfte.reset(delegateTerms);
|
||||
bfte.reset(this.in);
|
||||
return bfte;
|
||||
}
|
||||
reuse = bfte.reuse;
|
||||
}
|
||||
// We have been handed something we cannot reuse (either null, wrong
|
||||
// class or wrong filter) so allocate a new object
|
||||
result = new BloomFilteredTermsEnum(delegateTerms, reuse, filter);
|
||||
result = new BloomFilteredTermsEnum(this.in, reuse, filter);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparator<BytesRef> getComparator() {
|
||||
return delegateTerms.getComparator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long size() throws IOException {
|
||||
return delegateTerms.size();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumTotalTermFreq() throws IOException {
|
||||
return delegateTerms.getSumTotalTermFreq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumDocFreq() throws IOException {
|
||||
return delegateTerms.getSumDocFreq();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getDocCount() throws IOException {
|
||||
return delegateTerms.getDocCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasOffsets() {
|
||||
return delegateTerms.hasOffsets();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPositions() {
|
||||
return delegateTerms.hasPositions();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPayloads() {
|
||||
return delegateTerms.hasPayloads();
|
||||
}
|
||||
}
|
||||
|
||||
static final class BloomFilteredTermsEnum extends TermsEnum {
|
||||
|
|
|
@ -1148,7 +1148,7 @@ public class RobinEngine extends AbstractIndexShardComponent implements Engine {
|
|||
try {
|
||||
for (AtomicReaderContext reader : searcher.reader().leaves()) {
|
||||
assert reader.reader() instanceof SegmentReader;
|
||||
SegmentInfoPerCommit info = ((SegmentReader) reader.reader()).getSegmentInfo();
|
||||
SegmentCommitInfo info = ((SegmentReader) reader.reader()).getSegmentInfo();
|
||||
assert !segments.containsKey(info.info.name);
|
||||
Segment segment = new Segment(info.info.name);
|
||||
segment.search = true;
|
||||
|
@ -1170,7 +1170,7 @@ public class RobinEngine extends AbstractIndexShardComponent implements Engine {
|
|||
// now, correlate or add the committed ones...
|
||||
if (lastCommittedSegmentInfos != null) {
|
||||
SegmentInfos infos = lastCommittedSegmentInfos;
|
||||
for (SegmentInfoPerCommit info : infos) {
|
||||
for (SegmentCommitInfo info : infos) {
|
||||
Segment segment = segments.get(info.info.name);
|
||||
if (segment == null) {
|
||||
segment = new Segment(info.info.name);
|
||||
|
@ -1203,7 +1203,7 @@ public class RobinEngine extends AbstractIndexShardComponent implements Engine {
|
|||
// fill in the merges flag
|
||||
Set<OnGoingMerge> onGoingMerges = mergeScheduler.onGoingMerges();
|
||||
for (OnGoingMerge onGoingMerge : onGoingMerges) {
|
||||
for (SegmentInfoPerCommit segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
|
||||
for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) {
|
||||
for (Segment segment : segmentsArr) {
|
||||
if (segment.getName().equals(segmentInfoPerCommit.info.name)) {
|
||||
segment.mergeId = onGoingMerge.getId();
|
||||
|
@ -1379,7 +1379,7 @@ public class RobinEngine extends AbstractIndexShardComponent implements Engine {
|
|||
}
|
||||
}
|
||||
});
|
||||
return new XIndexWriter(store.directory(), config);
|
||||
return new IndexWriter(store.directory(), config);
|
||||
} catch (LockObtainFailedException ex) {
|
||||
boolean isLocked = IndexWriter.isLocked(store.directory());
|
||||
logger.warn("Could not lock IndexWriter isLocked [{}]", ex, isLocked);
|
||||
|
|
|
@ -172,7 +172,7 @@ public class IdFieldMapper extends AbstractFieldMapper<String> implements Intern
|
|||
return super.termQuery(value, context);
|
||||
}
|
||||
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
|
||||
return new XLuceneConstantScoreQuery(termFilter(value, context));
|
||||
return new ConstantScoreQuery(termFilter(value, context));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,9 +25,9 @@ import org.apache.lucene.index.FieldInfo.IndexOptions;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermFilter;
|
||||
import org.apache.lucene.queries.TermsFilter;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.XLuceneConstantScoreQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -249,7 +249,7 @@ public class ParentFieldMapper extends AbstractFieldMapper<Uid> implements Inter
|
|||
if (context == null) {
|
||||
return super.termQuery(value, context);
|
||||
}
|
||||
return new XLuceneConstantScoreQuery(termFilter(value, context));
|
||||
return new ConstantScoreQuery(termFilter(value, context));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.index.merge;
|
||||
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.SegmentInfoPerCommit;
|
||||
import org.apache.lucene.index.SegmentCommitInfo;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
|
@ -30,7 +30,7 @@ import java.util.List;
|
|||
public class OnGoingMerge {
|
||||
|
||||
private final String id;
|
||||
private final List<SegmentInfoPerCommit> mergedSegments;
|
||||
private final List<SegmentCommitInfo> mergedSegments;
|
||||
|
||||
public OnGoingMerge(MergePolicy.OneMerge merge) {
|
||||
this.id = Integer.toString(System.identityHashCode(merge));
|
||||
|
@ -47,7 +47,7 @@ public class OnGoingMerge {
|
|||
/**
|
||||
* The list of segments that are being merged.
|
||||
*/
|
||||
public List<SegmentInfoPerCommit> getMergedSegments() {
|
||||
public List<SegmentCommitInfo> getMergedSegments() {
|
||||
return mergedSegments;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -143,7 +143,7 @@ public final class IndexUpgraderMergePolicy extends MergePolicy {
|
|||
|
||||
static class IndexUpgraderOneMerge extends OneMerge {
|
||||
|
||||
public IndexUpgraderOneMerge(List<SegmentInfoPerCommit> segments) {
|
||||
public IndexUpgraderOneMerge(List<SegmentCommitInfo> segments) {
|
||||
super(segments);
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ public final class IndexUpgraderMergePolicy extends MergePolicy {
|
|||
|
||||
@Override
|
||||
public MergeSpecification findForcedMerges(SegmentInfos segmentInfos,
|
||||
int maxSegmentCount, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge)
|
||||
int maxSegmentCount, Map<SegmentCommitInfo,Boolean> segmentsToMerge)
|
||||
throws IOException {
|
||||
return upgradedMergeSpecification(delegate.findForcedMerges(segmentInfos, maxSegmentCount, segmentsToMerge));
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ public final class IndexUpgraderMergePolicy extends MergePolicy {
|
|||
|
||||
@Override
|
||||
public boolean useCompoundFile(SegmentInfos segments,
|
||||
SegmentInfoPerCommit newSegment) throws IOException {
|
||||
SegmentCommitInfo newSegment) throws IOException {
|
||||
return delegate.useCompoundFile(segments, newSegment);
|
||||
}
|
||||
|
||||
|
|
|
@ -207,15 +207,19 @@ public class CommonTermsQueryParser implements QueryParser {
|
|||
|
||||
// Logic similar to QueryParser#getFieldQuery
|
||||
TokenStream source = analyzer.tokenStream(field, queryString.toString());
|
||||
source.reset();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
int count = 0;
|
||||
while (source.incrementToken()) {
|
||||
BytesRef ref = new BytesRef(termAtt.length() * 4); // oversize for
|
||||
// UTF-8
|
||||
UnicodeUtil.UTF16toUTF8(termAtt.buffer(), 0, termAtt.length(), ref);
|
||||
query.add(new Term(field, ref));
|
||||
count++;
|
||||
try {
|
||||
source.reset();
|
||||
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
|
||||
while (source.incrementToken()) {
|
||||
BytesRef ref = new BytesRef(termAtt.length() * 4); // oversize for
|
||||
// UTF-8
|
||||
UnicodeUtil.UTF16toUTF8(termAtt.buffer(), 0, termAtt.length(), ref);
|
||||
query.add(new Term(field, ref));
|
||||
count++;
|
||||
}
|
||||
} finally {
|
||||
source.close();
|
||||
}
|
||||
|
||||
if (count == 0) {
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.XLuceneConstantScoreQuery;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.XConstantScoreQuery;
|
||||
|
@ -104,7 +104,7 @@ public class ConstantScoreQueryParser implements QueryParser {
|
|||
return query1;
|
||||
}
|
||||
// Query
|
||||
query = new XLuceneConstantScoreQuery(query);
|
||||
query = new ConstantScoreQuery(query);
|
||||
query.setBoost(boost);
|
||||
return query;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,8 @@ package org.elasticsearch.index.query;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Iterables;
|
||||
import org.apache.lucene.queries.TermsFilter;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.XLuceneConstantScoreQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -117,7 +117,7 @@ public class IdsQueryParser implements QueryParser {
|
|||
|
||||
TermsFilter filter = new TermsFilter(UidFieldMapper.NAME, Uid.createTypeUids(types, ids));
|
||||
// no need for constant score filter, since we don't cache the filter, and it always takes deletes into account
|
||||
XLuceneConstantScoreQuery query = new XLuceneConstantScoreQuery(filter);
|
||||
ConstantScoreQuery query = new ConstantScoreQuery(filter);
|
||||
query.setBoost(boost);
|
||||
if (queryName != null) {
|
||||
parseContext.addNamedQuery(queryName, query);
|
||||
|
|
|
@ -300,7 +300,7 @@ public class Store extends AbstractIndexShardComponent implements CloseableIndex
|
|||
/**
|
||||
* The idea of the store directory is to cache file level meta data, as well as md5 of it
|
||||
*/
|
||||
public class StoreDirectory extends Directory implements ForceSyncDirectory {
|
||||
public class StoreDirectory extends BaseDirectory implements ForceSyncDirectory {
|
||||
|
||||
private final Distributor distributor;
|
||||
|
||||
|
|
|
@ -129,8 +129,6 @@ public class QueryFacetExecutor extends FacetExecutor {
|
|||
if (constantScoreQuery.getFilter() != null) {
|
||||
return constantScoreQuery.getFilter();
|
||||
}
|
||||
} else if (query instanceof XLuceneConstantScoreQuery) {
|
||||
return ((XLuceneConstantScoreQuery) query).getFilter();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchLuceneTestCase
|
|||
}
|
||||
|
||||
@Override
|
||||
protected XPassageFormatter getFormatter(String field) {
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
return new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
|
||||
}
|
||||
};
|
||||
|
|
|
@ -453,7 +453,7 @@ public class XPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
|
|||
final CustomPassageFormatter passageFormatter = new CustomPassageFormatter("<b>", "</b>", new DefaultEncoder());
|
||||
highlighter = new XPostingsHighlighter() {
|
||||
@Override
|
||||
protected XPassageFormatter getFormatter(String field) {
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
return passageFormatter;
|
||||
}
|
||||
};
|
||||
|
@ -1663,9 +1663,9 @@ public class XPostingsHighlighterTests extends ElasticsearchLuceneTestCase {
|
|||
IndexSearcher searcher = newSearcher(ir);
|
||||
XPostingsHighlighter highlighter = new XPostingsHighlighter() {
|
||||
@Override
|
||||
protected XPassageFormatter getFormatter(String field) {
|
||||
return new XPassageFormatter() {
|
||||
XPassageFormatter defaultFormatter = new XDefaultPassageFormatter();
|
||||
protected PassageFormatter getFormatter(String field) {
|
||||
return new PassageFormatter() {
|
||||
PassageFormatter defaultFormatter = new DefaultPassageFormatter();
|
||||
|
||||
@Override
|
||||
public String[] format(Passage passages[], String content) {
|
||||
|
|
|
@ -24,10 +24,10 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.queries.TermFilter;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.XLuceneConstantScoreQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
@ -67,7 +67,7 @@ public class FilterCacheTests extends ElasticsearchTestCase {
|
|||
|
||||
reader = refreshReader(reader);
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
assertThat(Lucene.count(searcher, new XLuceneConstantScoreQuery(filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
|
||||
assertThat(Lucene.count(searcher, new ConstantScoreQuery(filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
|
||||
assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l));
|
||||
|
||||
indexWriter.deleteDocuments(new Term("id", "1"));
|
||||
|
@ -77,7 +77,7 @@ public class FilterCacheTests extends ElasticsearchTestCase {
|
|||
Filter cachedFilter = filterCache.cache(filter);
|
||||
long constantScoreCount = filter == cachedFilter ? 0 : 1;
|
||||
// sadly, when caching based on cacheKey with NRT, this fails, that's why we have DeletionAware one
|
||||
assertThat(Lucene.count(searcher, new XLuceneConstantScoreQuery(cachedFilter)), equalTo(constantScoreCount));
|
||||
assertThat(Lucene.count(searcher, new ConstantScoreQuery(cachedFilter)), equalTo(constantScoreCount));
|
||||
assertThat(Lucene.count(searcher, new XConstantScoreQuery(cachedFilter)), equalTo(0l));
|
||||
assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), cachedFilter)), equalTo(0l));
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.codecs.lucene41.Lucene41Codec;
|
|||
import org.apache.lucene.codecs.lucene42.Lucene42Codec;
|
||||
import org.apache.lucene.codecs.lucene45.Lucene45Codec;
|
||||
import org.apache.lucene.codecs.lucene45.Lucene45DocValuesFormat;
|
||||
import org.apache.lucene.codecs.lucene46.Lucene46Codec;
|
||||
import org.apache.lucene.codecs.memory.DirectPostingsFormat;
|
||||
import org.apache.lucene.codecs.memory.MemoryDocValuesFormat;
|
||||
import org.apache.lucene.codecs.memory.MemoryPostingsFormat;
|
||||
|
@ -74,7 +75,9 @@ public class CodecTests extends ElasticsearchLuceneTestCase {
|
|||
public void testResolveDefaultCodecs() throws Exception {
|
||||
CodecService codecService = createCodecService();
|
||||
assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class));
|
||||
assertThat(codecService.codec("default"), instanceOf(Lucene45Codec.class));
|
||||
assertThat(codecService.codec("default"), instanceOf(Lucene46Codec.class));
|
||||
assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class));
|
||||
assertThat(codecService.codec("Lucene45"), instanceOf(Lucene45Codec.class));
|
||||
assertThat(codecService.codec("Lucene40"), instanceOf(Lucene40Codec.class));
|
||||
assertThat(codecService.codec("Lucene41"), instanceOf(Lucene41Codec.class));
|
||||
assertThat(codecService.codec("Lucene42"), instanceOf(Lucene42Codec.class));
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.codec.postingformat;
|
|||
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.codecs.lucene45.Lucene45Codec;
|
||||
import org.apache.lucene.codecs.lucene46.Lucene46Codec;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.*;
|
||||
|
@ -41,7 +41,6 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.MatcherAssert.assertThat;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
|
@ -49,7 +48,7 @@ import static org.hamcrest.Matchers.*;
|
|||
*/
|
||||
public class DefaultPostingsFormatTests extends ElasticsearchTestCase {
|
||||
|
||||
private final class TestCodec extends Lucene45Codec {
|
||||
private final class TestCodec extends Lucene46Codec {
|
||||
|
||||
@Override
|
||||
public PostingsFormat getPostingsFormatForField(String field) {
|
||||
|
|
|
@ -317,8 +317,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchTestCase {
|
|||
IndexQueryParserService queryParser = queryParser();
|
||||
String query = copyToStringFromClasspath("/org/elasticsearch/index/query/starColonStar.json");
|
||||
Query parsedQuery = queryParser.parse(query).query();
|
||||
assertThat(parsedQuery, instanceOf(XLuceneConstantScoreQuery.class));
|
||||
XLuceneConstantScoreQuery constantScoreQuery = (XLuceneConstantScoreQuery) parsedQuery;
|
||||
assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class));
|
||||
ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) parsedQuery;
|
||||
Filter internalFilter = constantScoreQuery.getFilter();
|
||||
assertThat(internalFilter, instanceOf(MatchAllDocsFilter.class));
|
||||
}
|
||||
|
|
|
@ -25,8 +25,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.FieldInfo.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.spell.TermFreqIterator;
|
||||
import org.apache.lucene.search.spell.TermFreqPayloadIterator;
|
||||
import org.apache.lucene.search.suggest.InputIterator;
|
||||
import org.apache.lucene.search.suggest.Lookup;
|
||||
import org.apache.lucene.search.suggest.Lookup.LookupResult;
|
||||
import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester;
|
||||
|
@ -120,7 +119,7 @@ public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
|
|||
|
||||
}
|
||||
docs.close();
|
||||
final TermFreqIterator primaryIter = new TermFreqIterator() {
|
||||
final InputIterator primaryIter = new InputIterator() {
|
||||
int index = 0;
|
||||
long currentWeight = -1;
|
||||
|
||||
|
@ -143,10 +142,20 @@ public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
|
|||
return currentWeight;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef payload() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPayloads() {
|
||||
return false;
|
||||
}
|
||||
|
||||
};
|
||||
TermFreqIterator iter;
|
||||
InputIterator iter;
|
||||
if (usePayloads) {
|
||||
iter = new TermFreqPayloadIterator() {
|
||||
iter = new InputIterator() {
|
||||
@Override
|
||||
public long weight() {
|
||||
return primaryIter.weight();
|
||||
|
@ -166,6 +175,11 @@ public class CompletionPostingsFormatTest extends ElasticsearchTestCase {
|
|||
public BytesRef payload() {
|
||||
return new BytesRef(Long.toString(weight()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasPayloads() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
} else {
|
||||
iter = primaryIter;
|
||||
|
|
|
@ -159,7 +159,7 @@ public class NoisyChannelSpellCheckerTests extends ElasticsearchTestCase{
|
|||
TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
|
||||
try {
|
||||
SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
|
||||
((SolrSynonymParser) parser).add(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
|
||||
((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
|
||||
filter = new SynonymFilter(filter, parser.build(), true);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -370,7 +370,7 @@ public class NoisyChannelSpellCheckerTests extends ElasticsearchTestCase{
|
|||
TokenFilter filter = new LowerCaseFilter(Version.LUCENE_41, t);
|
||||
try {
|
||||
SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer(Version.LUCENE_41));
|
||||
((SolrSynonymParser) parser).add(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
|
||||
((SolrSynonymParser) parser).parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
|
||||
filter = new SynonymFilter(filter, parser.build(), true);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
Loading…
Reference in New Issue