Parallelize knn query rewrite across slices rather than segments (#12325)

The concurrent query rewrite for knn vectory query introduced with #12160
requests one thread per segment to the executor. To align this with the
IndexSearcher parallel behaviour, we should rather parallelize across
slices. Also, we can reuse the same slice executor instance that the
index searcher already holds, in that way we are using a
QueueSizeBasedExecutor when a thread pool executor is provided.
This commit is contained in:
Luca Cavanna 2023-05-26 09:17:25 +02:00 committed by GitHub
parent c188d47a8b
commit 10bebde269
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 40 additions and 22 deletions

View File

@ -19,12 +19,12 @@ package org.apache.lucene.search;
import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.concurrent.FutureTask;
import org.apache.lucene.codecs.KnnVectorsReader;
import org.apache.lucene.index.FieldInfo;
@ -81,11 +81,12 @@ abstract class AbstractKnnVectorQuery extends Query {
filterWeight = null;
}
Executor executor = indexSearcher.getExecutor();
SliceExecutor sliceExecutor = indexSearcher.getSliceExecutor();
// in case of parallel execution, the leaf results are not ordered by leaf context's ordinal
TopDocs[] perLeafResults =
(executor == null)
(sliceExecutor == null)
? sequentialSearch(reader.leaves(), filterWeight)
: parallelSearch(reader.leaves(), filterWeight, executor);
: parallelSearch(indexSearcher.getSlices(), filterWeight, sliceExecutor);
// Merge sort the results
TopDocs topK = TopDocs.merge(k, perLeafResults);
@ -109,27 +110,40 @@ abstract class AbstractKnnVectorQuery extends Query {
}
private TopDocs[] parallelSearch(
List<LeafReaderContext> leafReaderContexts, Weight filterWeight, Executor executor) {
List<FutureTask<TopDocs>> tasks =
leafReaderContexts.stream()
.map(ctx -> new FutureTask<>(() -> searchLeaf(ctx, filterWeight)))
.toList();
IndexSearcher.LeafSlice[] slices, Weight filterWeight, SliceExecutor sliceExecutor) {
List<FutureTask<TopDocs[]>> tasks = new ArrayList<>(slices.length);
int segmentsCount = 0;
for (IndexSearcher.LeafSlice slice : slices) {
segmentsCount += slice.leaves.length;
tasks.add(
new FutureTask<>(
() -> {
TopDocs[] results = new TopDocs[slice.leaves.length];
int i = 0;
for (LeafReaderContext context : slice.leaves) {
results[i++] = searchLeaf(context, filterWeight);
}
return results;
}));
}
SliceExecutor sliceExecutor = new SliceExecutor(executor);
sliceExecutor.invokeAll(tasks);
return tasks.stream()
.map(
task -> {
TopDocs[] topDocs = new TopDocs[segmentsCount];
int i = 0;
for (FutureTask<TopDocs[]> task : tasks) {
try {
return task.get();
for (TopDocs docs : task.get()) {
topDocs[i++] = docs;
}
} catch (ExecutionException e) {
throw new RuntimeException(e.getCause());
} catch (InterruptedException e) {
throw new ThreadInterruptedException(e);
}
})
.toArray(TopDocs[]::new);
}
return topDocs;
}
private TopDocs searchLeaf(LeafReaderContext ctx, Weight filterWeight) throws IOException {

View File

@ -962,6 +962,10 @@ public class IndexSearcher {
return executor;
}
SliceExecutor getSliceExecutor() {
return sliceExecutor;
}
/**
* Thrown when an attempt is made to add more than {@link #getMaxClauseCount()} clauses. This
* typically happens if a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery is expanded to