Upgrade to lucene-6.5.0-snapshot-d00c5ca (#23385)

Lucene upgrade
This commit is contained in:
Jim Ferenczi 2017-02-27 18:39:04 +01:00 committed by GitHub
parent 48280a9403
commit 5c84640126
58 changed files with 184 additions and 155 deletions

View File

@ -1,6 +1,6 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 6.0.0-alpha1
lucene = 6.5.0-snapshot-f919485
lucene = 6.5.0-snapshot-d00c5ca
# optional dependencies
spatial4j = 0.6

View File

@ -0,0 +1 @@
9ad2a7bd252cbdb76ac121287e670d75f4db2cd3

View File

@ -1 +0,0 @@
886c1da9adc3347f61ab95ecbf4dbeeaa0e7acb2

View File

@ -0,0 +1 @@
c6a940eff8a87df40262b752ed7b135e448b7873

View File

@ -1 +0,0 @@
df9e94f63ad7d9188f14820c435ea1dc3c28d87a

View File

@ -0,0 +1 @@
6ef5ad88141760c00ea041da1535f3ffc364d67d

View File

@ -1 +0,0 @@
3539f8dc9c3ed8ebe90afcb3daa2e9afcf5108d1

View File

@ -0,0 +1 @@
f15775571fb5762dfc92e00c3909cb8db8ff1d53

View File

@ -1 +0,0 @@
da76338e4f299963da9d7ab33dae7586dfc902c2

View File

@ -0,0 +1 @@
051d793aa64257beead4ccc7432eb5df81d17f23

View File

@ -1 +0,0 @@
f6318d120236c7ac03fca6bf98825b4cb4347fc8

View File

@ -0,0 +1 @@
5bc4cba55670c14ea812ff5de65edad4c312fdf6

View File

@ -1 +0,0 @@
68f045ff272e10c307fe25a1867c2948b614b57c

View File

@ -0,0 +1 @@
68cf08bcd8414a57493debf3a6a509d78a9abb56

View File

@ -1 +0,0 @@
b58a7a15267614a9a14f7cf6257454e0c24b146d

View File

@ -0,0 +1 @@
f5d90756dbeda1218d723b7bea0799c88d621adb

View File

@ -1 +0,0 @@
d5f00fcd00fee6906b563d201bc00bdea7a92baa

View File

@ -0,0 +1 @@
9298e7d1ed96e7beb63d7ccdce1a4502eb0fe484

View File

@ -1 +0,0 @@
2664901a494d87e9f4cef65be14cca918da7c4f5

View File

@ -0,0 +1 @@
918de18963607af69dff38e4773c0bde89c73ae3

View File

@ -1 +0,0 @@
476a79293f9a15ea1ee5f93684587205d03480d1

View File

@ -0,0 +1 @@
a311a7d9f3e9a8fbf3a367a4e2731f9d4579732b

View File

@ -1 +0,0 @@
f4dd70223178cca067b0cade4e58c4d82bec87d6

View File

@ -0,0 +1 @@
693bc4cb0e2e4465e0173c67ed0818071c4b460b

View File

@ -1 +0,0 @@
72c4ec5d811480164db556b54c7a76bd3ea16bd6

View File

@ -0,0 +1 @@
0326f31e63c76d476c23488c7354265cf915350f

View File

@ -1 +0,0 @@
f7af3755fdd09df7c258c655aff03ddef9536a04

View File

@ -0,0 +1 @@
69a3a86e9d045f872408793ea411d49e0c577268

View File

@ -1 +0,0 @@
2bf820109203b990e93a05dade8dcebec6aeb71a

View File

@ -0,0 +1 @@
fabc05ca175150171cf60370877276b933716bcd

View File

@ -1 +0,0 @@
fc1f32923ee68761ee05051f4ef6f4a4ab3acdec

View File

@ -19,9 +19,6 @@
package org.apache.lucene.queryparser.classic;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
@ -33,12 +30,14 @@ import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.automaton.RegExp;
@ -59,6 +58,9 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.lucene.search.Queries.fixNegativeQueryIfNeeded;
/**
* A query parser that uses the {@link MapperService} in order to build smarter
* queries based on the mapping information.
@ -747,26 +749,29 @@ public class MapperQueryParser extends AnalyzingQueryParser {
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
builder.setSlop(slop);
return builder.build();
} else if (q instanceof GraphQuery && ((GraphQuery) q).hasPhrase()) {
// we have a graph query that has at least one phrase sub-query
// re-build and set slop on all phrase queries
List<Query> oldQueries = ((GraphQuery) q).getQueries();
Query[] queries = new Query[oldQueries.size()];
for (int i = 0; i < queries.length; i++) {
Query oldQuery = oldQueries.get(i);
if (oldQuery instanceof PhraseQuery) {
queries[i] = addSlopToPhrase((PhraseQuery) oldQuery, slop);
} else {
queries[i] = oldQuery;
}
}
return new GraphQuery(queries);
} else if (q instanceof SpanQuery) {
return addSlopToSpan((SpanQuery) q, slop);
} else {
return q;
}
}
private Query addSlopToSpan(SpanQuery query, int slop) {
if (query instanceof SpanNearQuery) {
return new SpanNearQuery(((SpanNearQuery) query).getClauses(), slop,
((SpanNearQuery) query).isInOrder());
} else if (query instanceof SpanOrQuery) {
SpanQuery[] clauses = new SpanQuery[((SpanOrQuery) query).getClauses().length];
int pos = 0;
for (SpanQuery clause : ((SpanOrQuery) query).getClauses()) {
clauses[pos++] = (SpanQuery) addSlopToSpan(clause, slop);
}
return new SpanOrQuery(clauses);
} else {
return query;
}
}
/**
* Rebuild a phrase query with a slop value
*/

View File

@ -62,11 +62,9 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
@ -243,11 +241,11 @@ public class SearchPhaseController extends AbstractComponent {
final Sort sort = new Sort(firstTopDocs.fields);
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true);
} else {
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true);
}
ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;

View File

@ -18,6 +18,9 @@
*/
package org.elasticsearch.index.analysis;
import java.io.Reader;
import java.util.regex.Pattern;
import org.apache.lucene.analysis.pattern.PatternReplaceCharFilter;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.regex.Regex;
@ -25,10 +28,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import java.io.Reader;
import java.util.regex.Pattern;
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory implements MultiTermAwareComponent {
private final Pattern pattern;
private final String replacement;
@ -56,4 +56,9 @@ public class PatternReplaceCharFilterFactory extends AbstractCharFilterFactory {
public Reader create(Reader tokenStream) {
return new PatternReplaceCharFilter(pattern, replacement, tokenStream);
}
@Override
public Object getMultiTermComponent() {
return this;
}
}

View File

@ -27,13 +27,18 @@ import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.QueryBuilder;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
@ -49,7 +54,6 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.QueryParsers;
import java.io.IOException;
import java.util.List;
public class MatchQuery {
@ -318,17 +322,6 @@ public class MatchQuery {
public Query createPhrasePrefixQuery(String field, String queryText, int phraseSlop, int maxExpansions) {
final Query query = createFieldQuery(getAnalyzer(), Occur.MUST, field, queryText, true, phraseSlop);
if (query instanceof GraphQuery) {
// we have a graph query, convert inner queries to multi phrase prefix queries
List<Query> oldQueries = ((GraphQuery) query).getQueries();
Query[] queries = new Query[oldQueries.size()];
for (int i = 0; i < queries.length; i++) {
queries[i] = toMultiPhrasePrefix(oldQueries.get(i), phraseSlop, maxExpansions);
}
return new GraphQuery(queries);
}
return toMultiPhrasePrefix(query, phraseSlop, maxExpansions);
}
@ -340,6 +333,9 @@ public class MatchQuery {
boost *= bq.getBoost();
innerQuery = bq.getQuery();
}
if (query instanceof SpanQuery) {
return toSpanQueryPrefix((SpanQuery) query, boost);
}
final MultiPhrasePrefixQuery prefixQuery = new MultiPhrasePrefixQuery();
prefixQuery.setMaxExpansions(maxExpansions);
prefixQuery.setSlop(phraseSlop);
@ -369,6 +365,33 @@ public class MatchQuery {
return query;
}
private Query toSpanQueryPrefix(SpanQuery query, float boost) {
if (query instanceof SpanTermQuery) {
SpanMultiTermQueryWrapper<PrefixQuery> ret =
new SpanMultiTermQueryWrapper<>(new PrefixQuery(((SpanTermQuery) query).getTerm()));
return boost == 1 ? ret : new BoostQuery(ret, boost);
} else if (query instanceof SpanNearQuery) {
SpanNearQuery spanNearQuery = (SpanNearQuery) query;
SpanQuery[] clauses = spanNearQuery.getClauses();
if (clauses[clauses.length-1] instanceof SpanTermQuery) {
clauses[clauses.length-1] = new SpanMultiTermQueryWrapper<>(
new PrefixQuery(((SpanTermQuery) clauses[clauses.length-1]).getTerm())
);
}
SpanNearQuery newQuery = new SpanNearQuery(clauses, spanNearQuery.getSlop(), spanNearQuery.isInOrder());
return boost == 1 ? newQuery : new BoostQuery(newQuery, boost);
} else if (query instanceof SpanOrQuery) {
SpanOrQuery orQuery = (SpanOrQuery) query;
SpanQuery[] clauses = new SpanQuery[orQuery.getClauses().length];
for (int i = 0; i < clauses.length; i++) {
clauses[i] = (SpanQuery) toSpanQueryPrefix(orQuery.getClauses()[i], 1);
}
return boost == 1 ? new SpanOrQuery(clauses) : new BoostQuery(new SpanOrQuery(clauses), boost);
} else {
return query;
}
}
public Query createCommonTermsQuery(String field, String queryText, Occur highFreqOccur, Occur lowFreqOccur, float
maxTermFrequency, MappedFieldType fieldType) {
Query booleanQuery = createBooleanQuery(field, queryText, lowFreqOccur);

View File

@ -119,7 +119,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits {
shardDocs[i] = topHitsAgg.topDocs;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs);
reducedTopDocs = TopDocs.merge(sort, from, size, (TopFieldDocs[]) shardDocs, true);
} else {
shardDocs = new TopDocs[aggregations.size()];
for (int i = 0; i < shardDocs.length; i++) {
@ -127,7 +127,7 @@ public class InternalTopHits extends InternalAggregation implements TopHits {
shardDocs[i] = topHitsAgg.topDocs;
shardHits[i] = topHitsAgg.searchHits;
}
reducedTopDocs = TopDocs.merge(from, size, shardDocs);
reducedTopDocs = TopDocs.merge(from, size, shardDocs, true);
}
final int[] tracker = new int[shardHits.length];

View File

@ -70,7 +70,7 @@ final class ProfileScorer extends Scorer {
}
@Override
public Collection<ChildScorer> getChildren() {
public Collection<ChildScorer> getChildren() throws IOException {
return scorer.getChildren();
}

View File

@ -184,8 +184,10 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
private final SuggestDocPriorityQueue pq;
private final Map<Integer, SuggestDoc> scoreDocMap;
// TODO: expose dup removal
TopDocumentsCollector(int num) {
super(1); // TODO hack, we don't use the underlying pq, so we allocate a size of 1
super(1, false); // TODO hack, we don't use the underlying pq, so we allocate a size of 1
this.num = num;
this.scoreDocMap = new LinkedHashMap<>(num);
this.pq = new SuggestDocPriorityQueue(num);

View File

@ -31,7 +31,7 @@ grant codeBase "${codebase.securesm-1.1.jar}" {
//// Very special jar permissions:
//// These are dangerous permissions that we don't want to grant to everything.
grant codeBase "${codebase.lucene-core-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-core-6.5.0-snapshot-d00c5ca.jar}" {
// needed to allow MMapDirectory's "unmap hack" (die unmap hack, die)
// java 8 package
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
@ -42,7 +42,7 @@ grant codeBase "${codebase.lucene-core-6.5.0-snapshot-f919485.jar}" {
permission java.lang.RuntimePermission "accessDeclaredMembers";
};
grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-misc-6.5.0-snapshot-d00c5ca.jar}" {
// needed to allow shard shrinking to use hard-links if possible via lucenes HardlinkCopyDirectoryWrapper
permission java.nio.file.LinkPermission "hard";
};

View File

@ -33,7 +33,7 @@ grant codeBase "${codebase.securemock-1.2.jar}" {
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
};
grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-f919485.jar}" {
grant codeBase "${codebase.lucene-test-framework-6.5.0-snapshot-d00c5ca.jar}" {
// needed by RamUsageTester
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
// needed for testing hardlinks in StoreRecoveryTests since we install MockFS

View File

@ -24,11 +24,11 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.MapperQueryParser;
import org.apache.lucene.queryparser.classic.QueryParserSettings;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiTermQuery;
@ -40,7 +40,9 @@ import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.TooComplexToDeterminizeException;
import org.elasticsearch.common.ParsingException;
@ -396,26 +398,24 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
// simple multi-term
Query query = queryParser.parse("guinea pig");
Query expectedQuery = new GraphQuery(
new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
);
Query expectedQuery = new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST).build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), defaultOp)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// simple with additional tokens
query = queryParser.parse("that guinea pig smells");
expectedQuery = new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), defaultOp)
.add(new GraphQuery(
new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST))
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
), defaultOp)
.build(), Occur.SHOULD)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), Occur.SHOULD).build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), defaultOp)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
@ -423,70 +423,62 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
// complex
query = queryParser.parse("+that -(guinea pig) +smells");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST))
.add(new BooleanClause(new GraphQuery(
new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST)
.build(),
new TermQuery(new Term(STRING_FIELD_NAME, "cavy"))
), BooleanClause.Occur.MUST_NOT))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST))
.build();
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), Occur.MUST)
.add(new BooleanQuery.Builder()
.add(new BooleanQuery.Builder()
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), Occur.MUST)
.build(), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), defaultOp)
.build(), Occur.MUST_NOT)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), Occur.MUST)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// no paren should cause guinea and pig to be treated as separate tokens
// no parent should cause guinea and pig to be treated as separate tokens
query = queryParser.parse("+that -guinea pig +smells");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST_NOT))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp))
.add(new BooleanClause(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST))
.add(new TermQuery(new Term(STRING_FIELD_NAME, "that")), BooleanClause.Occur.MUST)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST_NOT)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), defaultOp)
.add(new TermQuery(new Term(STRING_FIELD_NAME, "smells")), BooleanClause.Occur.MUST)
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// phrase
// span query
query = queryParser.parse("\"that guinea pig smells\"");
expectedQuery = new BooleanQuery.Builder()
.add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that")))
.addClause(new SpanOrQuery(
new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea")))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(),
new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy"))))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells")))
.build(), Occur.SHOULD)
.setDisableCoord(true)
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "guinea"))
.add(new Term(STRING_FIELD_NAME, "pig"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.build(),
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "cavy"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.build()
), BooleanClause.Occur.SHOULD)).build();
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
// phrase with slop
// span query with slop
query = queryParser.parse("\"that guinea pig smells\"~2");
expectedQuery = new BooleanQuery.Builder()
.add(new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "that")))
.addClause(new SpanOrQuery(
new SpanNearQuery.Builder(STRING_FIELD_NAME, true)
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "guinea")))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "pig"))).build(),
new SpanTermQuery(new Term(STRING_FIELD_NAME, "cavy"))))
.addClause(new SpanTermQuery(new Term(STRING_FIELD_NAME, "smells")))
.setSlop(2)
.build(),
Occur.SHOULD)
.setDisableCoord(true)
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "guinea"))
.add(new Term(STRING_FIELD_NAME, "pig"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.setSlop(2)
.build(),
new PhraseQuery.Builder()
.add(new Term(STRING_FIELD_NAME, "that"))
.add(new Term(STRING_FIELD_NAME, "cavy"))
.add(new Term(STRING_FIELD_NAME, "smells"))
.setSlop(2)
.build()
), BooleanClause.Occur.SHOULD)).build();
.build();
assertThat(query, Matchers.equalTo(expectedQuery));
}
}

View File

@ -25,12 +25,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.GraphQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanOrQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
@ -138,27 +140,27 @@ public class SimpleQueryParserTests extends ESTestCase {
// phrase will pick it up
query = parser.parse("\"guinea pig\"");
expectedQuery = new GraphQuery(
new PhraseQuery("field1", "guinea", "pig"),
new TermQuery(new Term("field1", "cavy")));
SpanTermQuery span1 = new SpanTermQuery(new Term("field1", "guinea"));
SpanTermQuery span2 = new SpanTermQuery(new Term("field1", "pig"));
expectedQuery = new SpanOrQuery(
new SpanNearQuery(new SpanQuery[] { span1, span2 }, 0, true),
new SpanTermQuery(new Term("field1", "cavy")));
assertThat(query, equalTo(expectedQuery));
// phrase with slop
query = parser.parse("big \"guinea pig\"~2");
query = parser.parse("big \"tiny guinea pig\"~2");
expectedQuery = new BooleanQuery.Builder()
.add(new BooleanClause(new TermQuery(new Term("field1", "big")), defaultOp))
.add(new BooleanClause(new GraphQuery(
new PhraseQuery.Builder()
.add(new Term("field1", "guinea"))
.add(new Term("field1", "pig"))
.setSlop(2)
.build(),
new TermQuery(new Term("field1", "cavy"))), defaultOp))
.add(new TermQuery(new Term("field1", "big")), defaultOp)
.add(new SpanNearQuery(new SpanQuery[] {
new SpanTermQuery(new Term("field1", "tiny")),
new SpanOrQuery(
new SpanNearQuery(new SpanQuery[] { span1, span2 }, 0, true),
new SpanTermQuery(new Term("field1", "cavy"))
)
}, 2, true), defaultOp)
.build();
assertThat(query, equalTo(expectedQuery));
}
}

View File

@ -79,13 +79,11 @@ public class ESToParentBlockJoinQueryTests extends ESTestCase {
new PhraseQuery("body", "term"), // rewrites to a TermQuery
new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
ScoreMode.Avg, "nested");
assertEquals(q, q.rewrite(new MultiReader()));
// do this once LUCENE-7685 is addressed
// Query expected = new ESToParentBlockJoinQuery(
// new TermQuery(new Term("body", "term")),
// new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
// ScoreMode.Avg, "nested");
// Query rewritten = q.rewrite(new MultiReader());
// assertEquals(expected, rewritten);
Query expected = new ESToParentBlockJoinQuery(
new TermQuery(new Term("body", "term")),
new QueryBitSetProducer(new TermQuery(new Term("is", "parent"))),
ScoreMode.Avg, "nested");
Query rewritten = q.rewrite(new MultiReader());
assertEquals(expected, rewritten);
}
}

View File

@ -0,0 +1 @@
eb201cc666e834f5f128cea00acdf2c046fcbb87

View File

@ -1 +0,0 @@
89f1c501f5f6504c53527da76aa18796c2b56492

View File

@ -0,0 +1 @@
165f826617aa6cb7af67b2c3f87df3b46216a155

View File

@ -1 +0,0 @@
e430aa3efe4883c74edc01711871870c907f37ca

View File

@ -0,0 +1 @@
50ed8c505a120bfcd1d5a7d3fae837027153f0dd

View File

@ -1 +0,0 @@
f4340c16ce417a688b5b20f6b6624f51683247bd

View File

@ -0,0 +1 @@
f4c04ecad541aa9526c4e2bd4e98aa08898ffa1c

View File

@ -1 +0,0 @@
ccf0f76f1249bc0027e9ebe01953e3663e52a5dc

View File

@ -0,0 +1 @@
bc5ca65f0db1ec9f71481c6ad4e146bbf56df32e

View File

@ -1 +0,0 @@
8cfa9d27ea3d36524d69b78601fe9b9c6d4b9628

View File

@ -0,0 +1 @@
dae2a3e6b79197d4e48ee1ae8d0ef31b8b20069e

View File

@ -1 +0,0 @@
eb7e1cdab96c107ca256cef75e149139f9b62044

View File

@ -0,0 +1 @@
318fcd0d1d33d45088ac3f4ab8291a4a22060078

View File

@ -1 +0,0 @@
d0a56789aa72751547c0001a0e950c387b245181

View File

@ -147,6 +147,10 @@ public class AnalysisFactoryTestCase extends ESTestCase {
// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
.put("wikipedia", Void.class)
// TODO: expose these
.put("simplepattern", Void.class)
.put("simplepatternsplit", Void.class)
.immutableMap();
static final Map<PreBuiltTokenizers, Class<?>> PREBUILT_TOKENIZERS;