mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-26 14:54:56 +00:00
Factor out filling of TopDocs in SearchPhaseController (#23380)
Previously this code was duplicated across the 3 different topdocs variants we have. It also had no real unittest (where we tested with holes in the results) which caused a sneaky bug where the comparison used `result.size()` vs `results.size()` causing several NPEs downstream. This change adds a static method to fill the top docs that is shared across all variants and adds a unittest that would have caught the issue very quickly. Closes #19356 Closes #23357
This commit is contained in:
parent
3215ac11ec
commit
b8e2d12b23
@ -66,6 +66,7 @@ import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
@ -233,47 +234,19 @@ public class SearchPhaseController extends AbstractComponent {
|
||||
if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
|
||||
CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final CollapseTopFieldDocs empty = new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (CollapseTopFieldDocs) topDocs;
|
||||
}
|
||||
fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0],
|
||||
sort.getSort(), new Object[0], Float.NaN));
|
||||
mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
|
||||
} else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
|
||||
TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
|
||||
final Sort sort = new Sort(firstTopDocs.fields);
|
||||
|
||||
final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
final TopFieldDocs empty = new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN);
|
||||
Arrays.fill(shardTopDocs, empty);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = (TopFieldDocs) topDocs;
|
||||
}
|
||||
fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
|
||||
mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs);
|
||||
} else {
|
||||
final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
|
||||
if (result.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, Lucene.EMPTY_TOP_DOCS);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
|
||||
TopDocs topDocs = sortedResult.value.queryResult().topDocs();
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[sortedResult.index] = topDocs;
|
||||
}
|
||||
fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
|
||||
mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs);
|
||||
}
|
||||
|
||||
@ -314,6 +287,20 @@ public class SearchPhaseController extends AbstractComponent {
|
||||
return scoreDocs;
|
||||
}
|
||||
|
||||
static <T extends TopDocs> void fillTopDocs(T[] shardTopDocs,
|
||||
List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results,
|
||||
T empytTopDocs) {
|
||||
if (results.size() != shardTopDocs.length) {
|
||||
// TopDocs#merge can't deal with null shard TopDocs
|
||||
Arrays.fill(shardTopDocs, empytTopDocs);
|
||||
}
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> resultProvider : results) {
|
||||
final T topDocs = (T) resultProvider.value.queryResult().topDocs();
|
||||
assert topDocs != null : "top docs must not be null in a valid result";
|
||||
// the 'index' field is the position in the resultsArr atomic array
|
||||
shardTopDocs[resultProvider.index] = topDocs;
|
||||
}
|
||||
}
|
||||
public ScoreDoc[] getLastEmittedDocPerShard(ReducedQueryPhase reducedQueryPhase,
|
||||
ScoreDoc[] sortedScoreDocs, int numShards) {
|
||||
ScoreDoc[] lastEmittedDocPerShard = new ScoreDoc[numShards];
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
@ -347,4 +348,31 @@ public class SearchPhaseControllerTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testFillTopDocs() {
|
||||
final int maxIters = randomIntBetween(5, 15);
|
||||
for (int iters = 0; iters < maxIters; iters++) {
|
||||
TopDocs[] topDocs = new TopDocs[randomIntBetween(2, 100)];
|
||||
int numShards = topDocs.length;
|
||||
AtomicArray<QuerySearchResultProvider> resultProviderAtomicArray = generateQueryResults(numShards, Collections.emptyList(),
|
||||
2, randomBoolean());
|
||||
if (randomBoolean()) {
|
||||
int maxNull = randomIntBetween(1, topDocs.length - 1);
|
||||
for (int i = 0; i < maxNull; i++) {
|
||||
resultProviderAtomicArray.set(randomIntBetween(0, resultProviderAtomicArray.length() - 1), null);
|
||||
}
|
||||
}
|
||||
SearchPhaseController.fillTopDocs(topDocs, resultProviderAtomicArray.asList(), Lucene.EMPTY_TOP_DOCS);
|
||||
for (int i = 0; i < topDocs.length; i++) {
|
||||
assertNotNull(topDocs[i]);
|
||||
if (topDocs[i] == Lucene.EMPTY_TOP_DOCS) {
|
||||
assertNull(resultProviderAtomicArray.get(i));
|
||||
} else {
|
||||
assertNotNull(resultProviderAtomicArray.get(i));
|
||||
assertNotNull(resultProviderAtomicArray.get(i).queryResult());
|
||||
assertSame(resultProviderAtomicArray.get(i).queryResult().topDocs(), topDocs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user