Remove unnecessary IndexSearcher field on HitContext (#62378)

FastVectorHighlighter uses the top-level reader to rewrite queries against, which
it gets via an IndexSearcher field on HitContext. However, we can already access
this top-level reader via HitContext's existing LeafReaderContext field.

This commit removes the unnecessary field and constructor parameter, and
changes the implementation of topLevelReader to go via ReaderUtils and
the leaf reader context.
This commit is contained in:
Alan Woodward 2020-09-15 15:03:38 +01:00 committed by Alan Woodward
parent 7dd073c243
commit f89fa421e2
5 changed files with 11 additions and 14 deletions

View File

@ -97,7 +97,7 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
HitContext subContext = new HitContext(
new SearchHit(slot, "unknown", new Text(hit.hit().getType()),
Collections.emptyMap(), Collections.emptyMap()),
percolatorLeafReaderContext, slot, percolatorIndexSearcher, new HashMap<>()
percolatorLeafReaderContext, slot, new HashMap<>()
);
subContext.sourceLookup().setSource(document);
// force source because MemoryIndex does not store fields

View File

@ -66,7 +66,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
LeafReaderContext context = reader.leaves().get(0);
// A match:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
HitContext hit = new HitContext(new SearchHit(0), context, 0, new HashMap<>());
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
@ -87,7 +87,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
// No match:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
HitContext hit = new HitContext(new SearchHit(0), context, 0, new HashMap<>());
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
@ -107,7 +107,7 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
// No query:
{
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
HitContext hit = new HitContext(new SearchHit(0), context, 0, new HashMap<>());
PercolateQuery.QueryStore queryStore = ctx -> docId -> null;
MemoryIndex memoryIndex = new MemoryIndex();
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());

View File

@ -265,7 +265,7 @@ public class FetchPhase {
if (fieldsVisitor == null) {
SearchHit hit = new SearchHit(docId, null, typeText, null, null);
return new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
return new HitContext(hit, subReaderContext, subDocId, sharedCache);
} else {
SearchHit hit;
loadStoredFields(context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
@ -279,7 +279,7 @@ public class FetchPhase {
hit = new SearchHit(docId, uid.id(), typeText, emptyMap(), emptyMap());
}
HitContext hitContext = new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
HitContext hitContext = new HitContext(hit, subReaderContext, subDocId, sharedCache);
if (fieldsVisitor.source() != null) {
hitContext.sourceLookup().setSource(fieldsVisitor.source());
}
@ -359,7 +359,7 @@ public class FetchPhase {
getInternalNestedIdentity(context, nestedDocId, subReaderContext, context.mapperService(), nestedObjectMapper);
SearchHit hit = new SearchHit(nestedTopDocId, rootId.id(), typeText, nestedIdentity, docFields, metaFields);
HitContext hitContext = new HitContext(hit, subReaderContext, nestedDocId, context.searcher(), sharedCache);
HitContext hitContext = new HitContext(hit, subReaderContext, nestedDocId, sharedCache);
if (rootSourceAsMap != null) {
// Isolate the nested json array object that matches with nested hit and wrap it back into the same json

View File

@ -21,7 +21,7 @@ package org.elasticsearch.search.fetch;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.ReaderUtil;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.lookup.SourceLookup;
@ -36,18 +36,15 @@ public interface FetchSubPhase {
class HitContext {
private final SearchHit hit;
private final IndexSearcher searcher;
private final LeafReaderContext readerContext;
private final int docId;
private final SourceLookup sourceLookup = new SourceLookup();
private final Map<String, Object> cache;
public HitContext(SearchHit hit, LeafReaderContext context, int docId, IndexSearcher searcher,
Map<String, Object> cache) {
public HitContext(SearchHit hit, LeafReaderContext context, int docId, Map<String, Object> cache) {
this.hit = hit;
this.readerContext = context;
this.docId = docId;
this.searcher = searcher;
this.sourceLookup.setSegmentAndDocument(context, docId);
this.cache = cache;
}
@ -83,7 +80,7 @@ public interface FetchSubPhase {
}
public IndexReader topLevelReader() {
return searcher.getIndexReader();
return ReaderUtil.getTopLevelContext(readerContext).reader();
}
// TODO move this into Highlighter

View File

@ -159,7 +159,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
// We don't need a real index, just a LeafReaderContext which cannot be mocked.
MemoryIndex index = new MemoryIndex();
LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0);
HitContext hitContext = new HitContext(searchHit, leafReaderContext, 1, null, new HashMap<>());
HitContext hitContext = new HitContext(searchHit, leafReaderContext, 1, new HashMap<>());
hitContext.sourceLookup().setSource(source == null ? null : BytesReference.bytes(source));
FetchSourcePhase phase = new FetchSourcePhase();