mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 02:14:54 +00:00
Fixed a msm accounting error that can occur during analyzing a percolator query.
In case of a disjunction query with both range and term based clauses and msm specified, the query analyzer needs to also reduce the msn if a range based clause for the same field is encountered. This did not happen. Instead of fixing this bug the logic has been simplified to just set a percolator query's msm to 1 if a disjunction contains range clauses and msm on disjunction has been specified. The logic would otherwise just get to complex and the performance gain isn't that much for this kind of percolator queries. In case a percolator query has clauses that have duplicate terms or ranges then for disjunction clauses with a minimum should match the query extraction of the clause with the lowest msm should be used and for conjunction queries query extractions wiht duplicate terms/ranges the msn should be ignored. If this is not done then percolator queries that should match never match. Example percolator query: value1 OR value2 OR value2 OR value3 OR value3 OR value3 OR value4 OR value5 (msm set to 3) In the above example query the extracted msm would be 3 Example document1: value1 value2 value3 With the msm and extracted terms this would match and is expected behaviour Example document2: value3 This document should match too (value3 appears in 3 clauses), but with msm set to 3 and the fact that fact that only distinct values are indexed in extracted terms field this document would Also added another random duel test. Closes #29393
This commit is contained in:
parent
ec65710926
commit
f4395c0c94
@ -375,7 +375,11 @@ final class QueryAnalyzer {
|
||||
private static BiFunction<Query, Version, Result> disjunctionMaxQuery() {
|
||||
return (query, version) -> {
|
||||
List<Query> disjuncts = ((DisjunctionMaxQuery) query).getDisjuncts();
|
||||
return handleDisjunctionQuery(disjuncts, 1, version);
|
||||
if (disjuncts.isEmpty()) {
|
||||
return new Result(false, Collections.emptySet(), 0);
|
||||
} else {
|
||||
return handleDisjunctionQuery(disjuncts, 1, version);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ -459,17 +463,17 @@ final class QueryAnalyzer {
|
||||
}
|
||||
}
|
||||
|
||||
if (success == false) {
|
||||
if (success == false) {
|
||||
// No clauses could be extracted
|
||||
if (uqe != null) {
|
||||
throw uqe;
|
||||
} else {
|
||||
// Empty conjunction
|
||||
return new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
}
|
||||
if (uqe != null) {
|
||||
|
||||
Result result = handleConjunction(results, version);
|
||||
throw uqe;
|
||||
} else {
|
||||
// Empty conjunction
|
||||
return new Result(true, Collections.emptySet(), 0);
|
||||
}
|
||||
}
|
||||
Result result = handleConjunction(results, version);
|
||||
if (uqe != null) {
|
||||
result = result.unverify();
|
||||
}
|
||||
@ -486,44 +490,43 @@ final class QueryAnalyzer {
|
||||
return subResult;
|
||||
}
|
||||
}
|
||||
int msm = 0;
|
||||
boolean verified = true;
|
||||
boolean matchAllDocs = true;
|
||||
boolean hasDuplicateTerms = false;Set<QueryExtraction> extractions = new HashSet<>();
|
||||
Set<String> seenRangeFields = new HashSet<>();
|
||||
for (Result result : conjunctions) {
|
||||
// In case that there are duplicate query extractions we need to be careful with incrementing msm,
|
||||
// because that could lead to valid matches not becoming candidate matches:
|
||||
// query: (field:val1 AND field:val2) AND (field:val2 AND field:val3)
|
||||
// doc: field: val1 val2 val3
|
||||
// So lets be protective and decrease the msm:
|
||||
int resultMsm = result.minimumShouldMatch;
|
||||
for (QueryExtraction queryExtraction : result.extractions) {
|
||||
if (queryExtraction.range != null) {
|
||||
// In case of range queries each extraction does not simply increment the minimum_should_match
|
||||
// for that percolator query like for a term based extraction, so that can lead to more false
|
||||
// positives for percolator queries with range queries than term based queries.
|
||||
// The is because the way number fields are extracted from the document to be percolated.
|
||||
// Per field a single range is extracted and if a percolator query has two or more range queries
|
||||
// on the same field, then the minimum should match can be higher than clauses in the CoveringQuery.
|
||||
// Therefore right now the minimum should match is incremented once per number field when processing
|
||||
// the percolator query at index time.
|
||||
if (seenRangeFields.add(queryExtraction.range.fieldName)) {
|
||||
resultMsm = 1;
|
||||
} else {
|
||||
resultMsm = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int msm = 0;
|
||||
boolean verified = true;
|
||||
boolean matchAllDocs = true;
|
||||
Set<QueryExtraction> extractions = new HashSet<>();
|
||||
Set<String> seenRangeFields = new HashSet<>();
|
||||
for (Result result : conjunctions) {
|
||||
// In case that there are duplicate query extractions we need to be careful with incrementing msm,
|
||||
// because that could lead to valid matches not becoming candidate matches:
|
||||
// query: (field:val1 AND field:val2) AND (field:val2 AND field:val3)
|
||||
// doc: field: val1 val2 val3
|
||||
// So lets be protective and decrease the msm:
|
||||
int resultMsm = result.minimumShouldMatch;
|
||||
for (QueryExtraction queryExtraction : result.extractions) {
|
||||
if (queryExtraction.range != null) {
|
||||
// In case of range queries each extraction does not simply increment the minimum_should_match
|
||||
// for that percolator query like for a term based extraction, so that can lead to more false
|
||||
// positives for percolator queries with range queries than term based queries.
|
||||
// The is because the way number fields are extracted from the document to be percolated.
|
||||
// Per field a single range is extracted and if a percolator query has two or more range queries
|
||||
// on the same field, then the minimum should match can be higher than clauses in the CoveringQuery.
|
||||
// Therefore right now the minimum should match is incremented once per number field when processing
|
||||
// the percolator query at index time.
|
||||
if (seenRangeFields.add(queryExtraction.range.fieldName)) {
|
||||
resultMsm = 1;
|
||||
} else {
|
||||
resultMsm = 0;
|
||||
}
|
||||
}
|
||||
if (extractions.contains(queryExtraction)) {
|
||||
|
||||
if (extractions.contains(queryExtraction)) {
|
||||
// To protect against negative msm:
|
||||
// (sub results could consist out of disjunction and conjunction and
|
||||
// then we do not know which extraction contributed to msm)
|
||||
resultMsm = Math.max(0, resultMsm - 1);
|
||||
}
|
||||
}
|
||||
msm += resultMsm;
|
||||
resultMsm = 0;
|
||||
verified = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
msm += resultMsm;
|
||||
|
||||
if (result.verified == false
|
||||
// If some inner extractions are optional, the result can't be verified
|
||||
@ -536,7 +539,7 @@ final class QueryAnalyzer {
|
||||
if (matchAllDocs) {
|
||||
return new Result(matchAllDocs, verified);
|
||||
} else {
|
||||
return new Result(verified, extractions, msm);
|
||||
return new Result(verified, extractions, hasDuplicateTerms ? 1 : msm);
|
||||
}
|
||||
} else {
|
||||
Result bestClause = null;
|
||||
@ -559,7 +562,7 @@ final class QueryAnalyzer {
|
||||
|
||||
private static Result handleDisjunction(List<Result> disjunctions, int requiredShouldClauses, Version version) {
|
||||
// Keep track of the msm for each clause:
|
||||
List<DisjunctionClause> clauses = new ArrayList<>(disjunctions.size());
|
||||
List<Integer> clauses = new ArrayList<>(disjunctions.size());
|
||||
boolean verified;
|
||||
if (version.before(Version.V_6_1_0)) {
|
||||
verified = requiredShouldClauses <= 1;
|
||||
@ -567,6 +570,21 @@ final class QueryAnalyzer {
|
||||
verified = true;
|
||||
}
|
||||
int numMatchAllClauses = 0;
|
||||
boolean hasRangeExtractions = false;
|
||||
|
||||
// In case that there are duplicate extracted terms / ranges then the msm should always be equal to the clause
|
||||
// with lowest msm, because the at percolate time there is no way to know the number of repetitions per
|
||||
// extracted term and field value from a percolator document may have more 'weight' than others.
|
||||
// Example percolator query: value1 OR value2 OR value2 OR value3 OR value3 OR value3 OR value4 OR value5 (msm set to 3)
|
||||
// In the above example query the extracted msm would be 3
|
||||
// Example document1: value1 value2 value3
|
||||
// With the msm and extracted terms this would match and is expected behaviour
|
||||
// Example document2: value3
|
||||
// This document should match too (value3 appears in 3 clauses), but with msm set to 3 and the fact
|
||||
// that fact that only distinct values are indexed in extracted terms field this document would
|
||||
// never match.
|
||||
boolean hasDuplicateTerms = false;
|
||||
|
||||
Set<QueryExtraction> terms = new HashSet<>();
|
||||
for (int i = 0; i < disjunctions.size(); i++) {
|
||||
Result subResult = disjunctions.get(i);
|
||||
@ -585,35 +603,37 @@ final class QueryAnalyzer {
|
||||
int resultMsm = subResult.minimumShouldMatch;
|
||||
for (QueryExtraction extraction : subResult.extractions) {
|
||||
if (terms.add(extraction) == false) {
|
||||
resultMsm = Math.max(0, resultMsm - 1);
|
||||
verified = false;
|
||||
hasDuplicateTerms = true;
|
||||
}
|
||||
}
|
||||
clauses.add(new DisjunctionClause(resultMsm, subResult.extractions.stream()
|
||||
.filter(extraction -> extraction.range != null)
|
||||
.map(extraction -> extraction.range.fieldName)
|
||||
.collect(toSet())));
|
||||
if (hasRangeExtractions == false) {
|
||||
hasRangeExtractions = subResult.extractions.stream().anyMatch(qe -> qe.range != null);
|
||||
}
|
||||
clauses.add(resultMsm);
|
||||
}
|
||||
boolean matchAllDocs = numMatchAllClauses > 0 && numMatchAllClauses >= requiredShouldClauses;
|
||||
|
||||
int msm = 0;
|
||||
if (version.onOrAfter(Version.V_6_1_0)) {
|
||||
Set<String> seenRangeFields = new HashSet<>();
|
||||
if (version.onOrAfter(Version.V_6_1_0) &&
|
||||
// Having ranges would mean we need to juggle with the msm and that complicates this logic a lot,
|
||||
// so for now lets not do it.
|
||||
hasRangeExtractions == false) {
|
||||
// Figure out what the combined msm is for this disjunction:
|
||||
// (sum the lowest required clauses, otherwise we're too strict and queries may not match)
|
||||
clauses = clauses.stream()
|
||||
.filter(o -> o.msm > 0)
|
||||
.sorted(Comparator.comparingInt(o -> o.msm))
|
||||
.filter(val -> val > 0)
|
||||
.sorted()
|
||||
.collect(Collectors.toList());
|
||||
int limit = Math.min(clauses.size(), Math.max(1, requiredShouldClauses));
|
||||
for (int i = 0; i < limit; i++) {
|
||||
if (clauses.get(i).rangeFieldNames.isEmpty() == false) {
|
||||
for (String rangeField: clauses.get(i).rangeFieldNames) {
|
||||
if (seenRangeFields.add(rangeField)) {
|
||||
msm += 1;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msm += clauses.get(i).msm;
|
||||
|
||||
// When there are duplicated query extractions, percolator can no longer reliably determine msm across this disjunction
|
||||
if (hasDuplicateTerms) {
|
||||
// pick lowest msm:
|
||||
msm = clauses.get(0);
|
||||
} else {
|
||||
int limit = Math.min(clauses.size(), Math.max(1, requiredShouldClauses));
|
||||
for (int i = 0; i < limit; i++) {
|
||||
msm += clauses.get(i);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -626,17 +646,6 @@ final class QueryAnalyzer {
|
||||
}
|
||||
}
|
||||
|
||||
static class DisjunctionClause {
|
||||
|
||||
final int msm;
|
||||
final Set<String> rangeFieldNames;
|
||||
|
||||
DisjunctionClause(int msm, Set<String> rangeFieldNames) {
|
||||
this.msm = msm;
|
||||
this.rangeFieldNames = rangeFieldNames;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an extraction for the conjunction of {@code result1} and {@code result2}
|
||||
* by picking up clauses that look most restrictive and making it unverified if
|
||||
|
@ -38,9 +38,12 @@ import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.memory.MemoryIndex;
|
||||
import org.apache.lucene.queries.BlendedTermQuery;
|
||||
import org.apache.lucene.queries.CommonTermsQuery;
|
||||
@ -318,6 +321,103 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public void testDuel2() throws Exception {
|
||||
List<String> stringValues = new ArrayList<>();
|
||||
stringValues.add("value1");
|
||||
stringValues.add("value2");
|
||||
stringValues.add("value3");
|
||||
|
||||
MappedFieldType intFieldType = mapperService.documentMapper("type").mappers()
|
||||
.getMapper("int_field").fieldType();
|
||||
List<int[]> ranges = new ArrayList<>();
|
||||
ranges.add(new int[]{-5, 5});
|
||||
ranges.add(new int[]{0, 10});
|
||||
ranges.add(new int[]{15, 50});
|
||||
|
||||
List<ParseContext.Document> documents = new ArrayList<>();
|
||||
{
|
||||
addQuery(new TermQuery(new Term("string_field", randomFrom(stringValues))), documents);
|
||||
}
|
||||
{
|
||||
addQuery(new PhraseQuery(0, "string_field", stringValues.toArray(new String[0])), documents);
|
||||
}
|
||||
{
|
||||
int[] range = randomFrom(ranges);
|
||||
Query rangeQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, null);
|
||||
addQuery(rangeQuery, documents);
|
||||
}
|
||||
{
|
||||
int numBooleanQueries = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numBooleanQueries; i++) {
|
||||
Query randomBQ = randomBQ(1, stringValues, ranges, intFieldType);
|
||||
addQuery(randomBQ, documents);
|
||||
}
|
||||
}
|
||||
{
|
||||
addQuery(new MatchNoDocsQuery(), documents);
|
||||
}
|
||||
{
|
||||
addQuery(new MatchAllDocsQuery(), documents);
|
||||
}
|
||||
|
||||
indexWriter.addDocuments(documents);
|
||||
indexWriter.close();
|
||||
directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher shardSearcher = newSearcher(directoryReader);
|
||||
// Disable query cache, because ControlQuery cannot be cached...
|
||||
shardSearcher.setQueryCache(null);
|
||||
|
||||
Document document = new Document();
|
||||
for (String value : stringValues) {
|
||||
document.add(new TextField("string_field", value, Field.Store.NO));
|
||||
logger.info("Test with document: {}" + document);
|
||||
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
|
||||
duelRun(queryStore, memoryIndex, shardSearcher);
|
||||
}
|
||||
for (int[] range : ranges) {
|
||||
List<Field> numberFields =
|
||||
NumberFieldMapper.NumberType.INTEGER.createFields("int_field", between(range[0], range[1]), true, true, false);
|
||||
for (Field numberField : numberFields) {
|
||||
document.add(numberField);
|
||||
}
|
||||
logger.info("Test with document: {}" + document);
|
||||
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
|
||||
duelRun(queryStore, memoryIndex, shardSearcher);
|
||||
}
|
||||
}
|
||||
|
||||
private BooleanQuery randomBQ(int depth, List<String> stringValues, List<int[]> ranges, MappedFieldType intFieldType) {
|
||||
final int numClauses = randomIntBetween(1, 4);
|
||||
final boolean onlyShouldClauses = randomBoolean();
|
||||
final BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
|
||||
int numShouldClauses = 0;
|
||||
for (int i = 0; i < numClauses; i++) {
|
||||
Query subQuery;
|
||||
if (randomBoolean() && depth <= 3) {
|
||||
subQuery = randomBQ(depth + 1, stringValues, ranges, intFieldType);
|
||||
} else if (randomBoolean()) {
|
||||
int[] range = randomFrom(ranges);
|
||||
subQuery = intFieldType.rangeQuery(range[0], range[1], true, true, null, null, null, null);
|
||||
} else {
|
||||
subQuery = new TermQuery(new Term("string_field", randomFrom(stringValues)));
|
||||
}
|
||||
|
||||
Occur occur;
|
||||
if (onlyShouldClauses) {
|
||||
occur = Occur.SHOULD;
|
||||
} else {
|
||||
occur = randomFrom(Arrays.asList(Occur.FILTER, Occur.MUST, Occur.SHOULD));
|
||||
}
|
||||
if (occur == Occur.SHOULD) {
|
||||
numShouldClauses++;
|
||||
}
|
||||
builder.add(subQuery, occur);
|
||||
}
|
||||
builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public void testDuelIdBased() throws Exception {
|
||||
List<Function<String, Query>> queryFunctions = new ArrayList<>();
|
||||
queryFunctions.add((id) -> new PrefixQuery(new Term("field", id)));
|
||||
@ -858,6 +958,90 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
assertEquals(1, topDocs.scoreDocs[1].doc);
|
||||
}
|
||||
|
||||
public void testDuplicatedClauses2() throws Exception {
|
||||
List<ParseContext.Document> docs = new ArrayList<>();
|
||||
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setMinimumNumberShouldMatch(3);
|
||||
builder.add(new TermQuery(new Term("field", "value1")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value2")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value2")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value3")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value4")), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value5")), Occur.SHOULD);
|
||||
addQuery(builder.build(), docs);
|
||||
|
||||
indexWriter.addDocuments(docs);
|
||||
indexWriter.close();
|
||||
directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher shardSearcher = newSearcher(directoryReader);
|
||||
shardSearcher.setQueryCache(null);
|
||||
|
||||
Version v = Version.CURRENT;
|
||||
List<BytesReference> sources = Collections.singletonList(new BytesArray("{}"));
|
||||
|
||||
MemoryIndex memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value1 value4 value5", new WhitespaceAnalyzer());
|
||||
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
|
||||
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
|
||||
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
|
||||
assertEquals(1L, topDocs.totalHits);
|
||||
assertEquals(0, topDocs.scoreDocs[0].doc);
|
||||
|
||||
memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value1 value2", new WhitespaceAnalyzer());
|
||||
percolateSearcher = memoryIndex.createSearcher();
|
||||
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
|
||||
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
|
||||
assertEquals(1L, topDocs.totalHits);
|
||||
assertEquals(0, topDocs.scoreDocs[0].doc);
|
||||
|
||||
memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value3", new WhitespaceAnalyzer());
|
||||
percolateSearcher = memoryIndex.createSearcher();
|
||||
query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
|
||||
topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
|
||||
assertEquals(1L, topDocs.totalHits);
|
||||
assertEquals(0, topDocs.scoreDocs[0].doc);
|
||||
}
|
||||
|
||||
public void testMsmAndRanges_disjunction() throws Exception {
|
||||
// Recreates a similar scenario that made testDuel() fail randomly:
|
||||
// https://github.com/elastic/elasticsearch/issues/29393
|
||||
List<ParseContext.Document> docs = new ArrayList<>();
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
builder.setMinimumNumberShouldMatch(2);
|
||||
|
||||
BooleanQuery.Builder builder1 = new BooleanQuery.Builder();
|
||||
builder1.add(new TermQuery(new Term("field", "value1")), Occur.FILTER);
|
||||
builder.add(builder1.build(), Occur.SHOULD);
|
||||
builder.add(new TermQuery(new Term("field", "value2")), Occur.MUST_NOT);
|
||||
builder.add(IntPoint.newRangeQuery("int_field", 0, 5), Occur.SHOULD);
|
||||
builder.add(IntPoint.newRangeQuery("int_field", 6, 10), Occur.SHOULD);
|
||||
addQuery(builder.build(), docs);
|
||||
|
||||
indexWriter.addDocuments(docs);
|
||||
indexWriter.close();
|
||||
directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher shardSearcher = newSearcher(directoryReader);
|
||||
shardSearcher.setQueryCache(null);
|
||||
|
||||
Version v = Version.CURRENT;
|
||||
List<BytesReference> sources = Collections.singletonList(new BytesArray("{}"));
|
||||
|
||||
Document document = new Document();
|
||||
document.add(new IntPoint("int_field", 4));
|
||||
document.add(new IntPoint("int_field", 7));
|
||||
MemoryIndex memoryIndex = MemoryIndex.fromDocument(document, new WhitespaceAnalyzer());
|
||||
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
|
||||
PercolateQuery query = (PercolateQuery) fieldType.percolateQuery("_name", queryStore, sources, percolateSearcher, v);
|
||||
TopDocs topDocs = shardSearcher.search(query, 10, new Sort(SortField.FIELD_DOC), true, true);
|
||||
assertEquals(1L, topDocs.totalHits);
|
||||
assertEquals(0, topDocs.scoreDocs[0].doc);
|
||||
}
|
||||
|
||||
private void duelRun(PercolateQuery.QueryStore queryStore, MemoryIndex memoryIndex, IndexSearcher shardSearcher) throws IOException {
|
||||
boolean requireScore = randomBoolean();
|
||||
IndexSearcher percolateSearcher = memoryIndex.createSearcher();
|
||||
@ -900,7 +1084,17 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
|
||||
|
||||
// Additional stored information that is useful when debugging:
|
||||
String queryToString = shardSearcher.doc(controlTopDocs.scoreDocs[i].doc).get("query_to_string");
|
||||
logger.error("topDocs.scoreDocs[{}].query_to_string={}", i, queryToString);
|
||||
logger.error("controlTopDocs.scoreDocs[{}].query_to_string={}", i, queryToString);
|
||||
|
||||
TermsEnum tenum = MultiFields.getFields(shardSearcher.getIndexReader()).terms(fieldType.queryTermsField.name()).iterator();
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (BytesRef term = tenum.next(); term != null; term = tenum.next()) {
|
||||
PostingsEnum penum = tenum.postings(null);
|
||||
if (penum.advance(controlTopDocs.scoreDocs[i].doc) == controlTopDocs.scoreDocs[i].doc) {
|
||||
builder.append(term.utf8ToString()).append(',');
|
||||
}
|
||||
}
|
||||
logger.error("controlTopDocs.scoreDocs[{}].query_terms_field={}", i, builder.toString());
|
||||
|
||||
NumericDocValues numericValues =
|
||||
MultiDocValues.getNumericValues(shardSearcher.getIndexReader(), fieldType.minimumShouldMatchField.name());
|
||||
|
@ -877,7 +877,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
|
||||
assertThat(values.get(1), equalTo("field\0value2"));
|
||||
assertThat(values.get(2), equalTo("field\0value3"));
|
||||
int msm = doc.rootDoc().getFields(fieldType.minimumShouldMatchField.name())[0].numericValue().intValue();
|
||||
assertThat(msm, equalTo(3));
|
||||
assertThat(msm, equalTo(2));
|
||||
|
||||
qb = boolQuery()
|
||||
.must(boolQuery().must(termQuery("field", "value1")).must(termQuery("field", "value2")))
|
||||
@ -901,7 +901,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
|
||||
assertThat(values.get(3), equalTo("field\0value4"));
|
||||
assertThat(values.get(4), equalTo("field\0value5"));
|
||||
msm = doc.rootDoc().getFields(fieldType.minimumShouldMatchField.name())[0].numericValue().intValue();
|
||||
assertThat(msm, equalTo(4));
|
||||
assertThat(msm, equalTo(2));
|
||||
|
||||
qb = boolQuery()
|
||||
.minimumShouldMatch(3)
|
||||
|
@ -65,6 +65,7 @@ import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -175,7 +176,7 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
assertTermsEqual(result.extractions, new Term("_field", "_term1"), new Term("_field", "_term2"));
|
||||
assertEquals(1, result.minimumShouldMatch); // because of the dup term
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testExtractQueryMetadata_booleanQuery() {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
@ -1295,7 +1296,7 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
boolQuery.add(LongPoint.newRangeQuery("_field2", 10, 15), BooleanClause.Occur.SHOULD);
|
||||
result = analyze(boolQuery.build(), Version.CURRENT);
|
||||
assertFalse(result.verified);
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertThat(result.minimumShouldMatch, equalTo(1));
|
||||
assertEquals(2, result.extractions.size());
|
||||
assertEquals("_field2", new ArrayList<>(result.extractions).get(0).range.fieldName);
|
||||
assertEquals("_field1", new ArrayList<>(result.extractions).get(1).range.fieldName);
|
||||
@ -1335,9 +1336,9 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
BooleanClause.Occur.MUST
|
||||
);
|
||||
Result result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat(result.verified, is(true));
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.matchAllDocs, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(4));
|
||||
assertThat(result.minimumShouldMatch, equalTo(2));
|
||||
assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"),
|
||||
new Term("field", "value3"), new Term("field", "value4"));
|
||||
|
||||
@ -1371,6 +1372,21 @@ public class QueryAnalyzerTests extends ESTestCase {
|
||||
new Term("field", "value3"), new Term("field", "value4"));
|
||||
}
|
||||
|
||||
public void testEmptyQueries() {
|
||||
BooleanQuery.Builder builder = new BooleanQuery.Builder();
|
||||
Result result = analyze(builder.build(), Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.matchAllDocs, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
assertThat(result.extractions.size(), equalTo(0));
|
||||
|
||||
result = analyze(new DisjunctionMaxQuery(Collections.emptyList(), 0f), Version.CURRENT);
|
||||
assertThat(result.verified, is(false));
|
||||
assertThat(result.matchAllDocs, is(false));
|
||||
assertThat(result.minimumShouldMatch, equalTo(0));
|
||||
assertThat(result.extractions.size(), equalTo(0));
|
||||
}
|
||||
|
||||
private static void assertDimension(byte[] expected, Consumer<byte[]> consumer) {
|
||||
byte[] dest = new byte[expected.length];
|
||||
consumer.accept(dest);
|
||||
|
Loading…
x
Reference in New Issue
Block a user