FetchSubPhase has two 'execute' methods, one which takes all hits to be examined, and one which takes a single HitContext. It's not obvious which one should be implemented by a given sub-phase, or if implementing both is a possibility; nor is it obvious that we first run the hitExecute methods of all subphases, and then subsequently call all the hitsExecute methods. This commit reworks FetchSubPhase to replace these two variants with a processor class, `FetchSubPhaseProcessor`, that is returned from a single `getProcessor` method. This processor class has two methods, `setNextReader()` and `process`. FetchPhase collects processors from all its subphases (if a subphase does not need to execute on the current search context, it can return `null` from `getProcessor`). It then sorts its hits by docid, and groups them by lucene leaf reader. For each reader group, it calls `setNextReader()` on all non-null processors, and then passes each doc id to `process()`. Implementations of fetch sub phases can divide their concerns into per-request, per-reader and per-document sections, and no longer need to worry about sorting docs or dealing with reader slices. FetchSubPhase now provides a FetchSubPhaseExecutor that exposes two methods, setNextReader(LeafReaderContext) and execute(HitContext). The parent FetchPhase collects all these executors together (if a phase should not be executed, then it returns null here); then it sorts hits, and groups them by reader; for each reader it calls setNextReader, and then execute for each hit in turn. Individual sub phases no longer need to concern themselves with sorting docs or keeping track of readers; global structures can be built in getExecutor(SearchContext), per-reader structures in setNextReader and per-doc in execute.
This commit is contained in:
parent
af01ccee93
commit
e2f006eeb4
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.percolator;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryVisitor;
|
||||
|
@ -30,6 +29,7 @@ import org.elasticsearch.common.text.Text;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.Highlighter;
|
||||
|
@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -53,83 +54,86 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
|
|||
this.highlightPhase = new HighlightPhase(highlighters);
|
||||
}
|
||||
|
||||
boolean hitsExecutionNeeded(SearchContext context) { // for testing
|
||||
return context.highlight() != null && locatePercolatorQuery(context.query()).isEmpty() == false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
if (hitsExecutionNeeded(context) == false) {
|
||||
return;
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) throws IOException {
|
||||
if (searchContext.highlight() == null) {
|
||||
return null;
|
||||
}
|
||||
List<PercolateQuery> percolateQueries = locatePercolatorQuery(context.query());
|
||||
List<PercolateQuery> percolateQueries = locatePercolatorQuery(searchContext.query());
|
||||
if (percolateQueries.isEmpty()) {
|
||||
// shouldn't happen as we checked for the existence of a percolator query in hitsExecutionNeeded(...)
|
||||
throw new IllegalStateException("couldn't locate percolator query");
|
||||
return null;
|
||||
}
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
boolean singlePercolateQuery = percolateQueries.size() == 1;
|
||||
for (PercolateQuery percolateQuery : percolateQueries) {
|
||||
String fieldName = singlePercolateQuery ? PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX :
|
||||
PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
|
||||
List<LeafReaderContext> ctxs = context.searcher().getIndexReader().leaves();
|
||||
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
|
||||
PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore();
|
||||
LeafReaderContext ctx;
|
||||
|
||||
LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0);
|
||||
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
this.ctx = readerContext;
|
||||
}
|
||||
|
||||
for (SearchHit hit : hits) {
|
||||
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
|
||||
int segmentDocId = hit.docId() - ctx.docBase;
|
||||
final Query query = queryStore.getQueries(ctx).apply(segmentDocId);
|
||||
if (query != null) {
|
||||
DocumentField field = hit.field(fieldName);
|
||||
if (field == null) {
|
||||
// It possible that a hit did not match with a particular percolate query,
|
||||
// so then continue highlighting with the next hit.
|
||||
continue;
|
||||
}
|
||||
@Override
|
||||
public void process(HitContext hit) throws IOException {
|
||||
boolean singlePercolateQuery = percolateQueries.size() == 1;
|
||||
for (PercolateQuery percolateQuery : percolateQueries) {
|
||||
String fieldName = singlePercolateQuery ? PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX :
|
||||
PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
|
||||
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
|
||||
PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore();
|
||||
|
||||
for (Object matchedSlot : field.getValues()) {
|
||||
int slot = (int) matchedSlot;
|
||||
BytesReference document = percolateQuery.getDocuments().get(slot);
|
||||
// Enforce highlighting by source, because MemoryIndex doesn't support stored fields.
|
||||
SearchHighlightContext highlight = new SearchHighlightContext(context.highlight().fields(), true);
|
||||
QueryShardContext shardContext = new QueryShardContext(context.getQueryShardContext());
|
||||
shardContext.freezeContext();
|
||||
hitContext.reset(
|
||||
new SearchHit(slot, "unknown", new Text(hit.getType()), Collections.emptyMap(), Collections.emptyMap()),
|
||||
percolatorLeafReaderContext, slot, percolatorIndexSearcher
|
||||
);
|
||||
hitContext.sourceLookup().setSource(document);
|
||||
hitContext.cache().clear();
|
||||
highlightPhase.hitExecute(context.shardTarget(), shardContext, query, highlight, hitContext);
|
||||
for (Map.Entry<String, HighlightField> entry : hitContext.hit().getHighlightFields().entrySet()) {
|
||||
if (percolateQuery.getDocuments().size() == 1) {
|
||||
String hlFieldName;
|
||||
if (singlePercolateQuery) {
|
||||
hlFieldName = entry.getKey();
|
||||
LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0);
|
||||
final Query query = queryStore.getQueries(ctx).apply(hit.docId());
|
||||
if (query != null) {
|
||||
DocumentField field = hit.hit().field(fieldName);
|
||||
if (field == null) {
|
||||
// It possible that a hit did not match with a particular percolate query,
|
||||
// so then continue highlighting with the next hit.
|
||||
continue;
|
||||
}
|
||||
|
||||
for (Object matchedSlot : field.getValues()) {
|
||||
int slot = (int) matchedSlot;
|
||||
BytesReference document = percolateQuery.getDocuments().get(slot);
|
||||
HitContext subContext = new HitContext(
|
||||
new SearchHit(slot, "unknown", new Text(hit.hit().getType()),
|
||||
Collections.emptyMap(), Collections.emptyMap()),
|
||||
percolatorLeafReaderContext, slot, percolatorIndexSearcher, new HashMap<>()
|
||||
);
|
||||
subContext.sourceLookup().setSource(document);
|
||||
// force source because MemoryIndex does not store fields
|
||||
SearchHighlightContext highlight = new SearchHighlightContext(searchContext.highlight().fields(), true);
|
||||
QueryShardContext shardContext = new QueryShardContext(searchContext.getQueryShardContext());
|
||||
FetchSubPhaseProcessor processor = highlightPhase.getProcessor(shardContext, searchContext.shardTarget(),
|
||||
highlight, query);
|
||||
processor.process(subContext);
|
||||
for (Map.Entry<String, HighlightField> entry : subContext.hit().getHighlightFields().entrySet()) {
|
||||
if (percolateQuery.getDocuments().size() == 1) {
|
||||
String hlFieldName;
|
||||
if (singlePercolateQuery) {
|
||||
hlFieldName = entry.getKey();
|
||||
} else {
|
||||
hlFieldName = percolateQuery.getName() + "_" + entry.getKey();
|
||||
}
|
||||
hit.hit().getHighlightFields().put(hlFieldName,
|
||||
new HighlightField(hlFieldName, entry.getValue().fragments()));
|
||||
} else {
|
||||
hlFieldName = percolateQuery.getName() + "_" + entry.getKey();
|
||||
// In case multiple documents are being percolated we need to identify to which document
|
||||
// a highlight belongs to.
|
||||
String hlFieldName;
|
||||
if (singlePercolateQuery) {
|
||||
hlFieldName = slot + "_" + entry.getKey();
|
||||
} else {
|
||||
hlFieldName = percolateQuery.getName() + "_" + slot + "_" + entry.getKey();
|
||||
}
|
||||
hit.hit().getHighlightFields().put(hlFieldName,
|
||||
new HighlightField(hlFieldName, entry.getValue().fragments()));
|
||||
}
|
||||
hit.getHighlightFields().put(hlFieldName, new HighlightField(hlFieldName, entry.getValue().fragments()));
|
||||
} else {
|
||||
// In case multiple documents are being percolated we need to identify to which document
|
||||
// a highlight belongs to.
|
||||
String hlFieldName;
|
||||
if (singlePercolateQuery) {
|
||||
hlFieldName = slot + "_" + entry.getKey();
|
||||
} else {
|
||||
hlFieldName = percolateQuery.getName() + "_" + slot + "_" + entry.getKey();
|
||||
}
|
||||
hit.getHighlightFields().put(hlFieldName, new HighlightField(hlFieldName, entry.getValue().fragments()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static List<PercolateQuery> locatePercolatorQuery(Query query) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.percolator;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
|
@ -35,11 +34,12 @@ import org.apache.lucene.util.BitSetIterator;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -49,7 +49,7 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
|
|||
import static org.elasticsearch.percolator.PercolatorHighlightSubFetchPhase.locatePercolatorQuery;
|
||||
|
||||
/**
|
||||
* Adds a special field to the a percolator query hit to indicate which documents matched with the percolator query.
|
||||
* Adds a special field to a percolator query hit to indicate which documents matched with the percolator query.
|
||||
* This is useful when multiple documents are being percolated in a single request.
|
||||
*/
|
||||
final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
|
||||
|
@ -57,60 +57,89 @@ final class PercolatorMatchedSlotSubFetchPhase implements FetchSubPhase {
|
|||
static final String FIELD_NAME_PREFIX = "_percolator_document_slot";
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
innerHitsExecute(context.query(), context.searcher(), hits);
|
||||
}
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) throws IOException {
|
||||
|
||||
static void innerHitsExecute(Query mainQuery, IndexSearcher indexSearcher, SearchHit[] hits) throws IOException {
|
||||
List<PercolateQuery> percolateQueries = locatePercolatorQuery(mainQuery);
|
||||
if (percolateQueries.isEmpty()) {
|
||||
return;
|
||||
List<PercolateContext> percolateContexts = new ArrayList<>();
|
||||
List<PercolateQuery> percolateQueries = locatePercolatorQuery(searchContext.query());
|
||||
boolean singlePercolateQuery = percolateQueries.size() == 1;
|
||||
for (PercolateQuery pq : percolateQueries) {
|
||||
percolateContexts.add(new PercolateContext(pq, singlePercolateQuery));
|
||||
}
|
||||
if (percolateContexts.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
boolean singlePercolateQuery = percolateQueries.size() == 1;
|
||||
for (PercolateQuery percolateQuery : percolateQueries) {
|
||||
String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
LeafReaderContext ctx;
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
this.ctx = readerContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
for (PercolateContext pc : percolateContexts) {
|
||||
String fieldName = pc.fieldName();
|
||||
Query query = pc.percolateQuery.getQueryStore().getQueries(ctx).apply(hitContext.docId());
|
||||
if (query == null) {
|
||||
// This is not a document with a percolator field.
|
||||
continue;
|
||||
}
|
||||
query = pc.filterNestedDocs(query);
|
||||
IndexSearcher percolatorIndexSearcher = pc.percolateQuery.getPercolatorIndexSearcher();
|
||||
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
|
||||
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
|
||||
if (topDocs.totalHits.value == 0) {
|
||||
// This hit didn't match with a percolate query,
|
||||
// likely to happen when percolating multiple documents
|
||||
continue;
|
||||
}
|
||||
|
||||
IntStream slots = convertTopDocsToSlots(topDocs, pc.rootDocsBySlot);
|
||||
// _percolator_document_slot fields are document fields and should be under "fields" section in a hit
|
||||
hitContext.hit().setDocumentField(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList())));
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
static class PercolateContext {
|
||||
final PercolateQuery percolateQuery;
|
||||
final boolean singlePercolateQuery;
|
||||
final int[] rootDocsBySlot;
|
||||
|
||||
PercolateContext(PercolateQuery pq, boolean singlePercolateQuery) throws IOException {
|
||||
this.percolateQuery = pq;
|
||||
this.singlePercolateQuery = singlePercolateQuery;
|
||||
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
|
||||
Query nonNestedFilter = percolatorIndexSearcher.rewrite(Queries.newNonNestedFilter(Version.CURRENT));
|
||||
Weight weight = percolatorIndexSearcher.createWeight(nonNestedFilter, ScoreMode.COMPLETE_NO_SCORES, 1f);
|
||||
Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
|
||||
int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
|
||||
BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc);
|
||||
int[] rootDocsBySlot = null;
|
||||
boolean hasNestedDocs = rootDocs.cardinality() != percolatorIndexSearcher.getIndexReader().numDocs();
|
||||
if (hasNestedDocs) {
|
||||
rootDocsBySlot = buildRootDocsSlots(rootDocs);
|
||||
this.rootDocsBySlot = buildRootDocsSlots(rootDocs);
|
||||
} else {
|
||||
this.rootDocsBySlot = null;
|
||||
}
|
||||
}
|
||||
|
||||
PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore();
|
||||
List<LeafReaderContext> ctxs = indexSearcher.getIndexReader().leaves();
|
||||
for (SearchHit hit : hits) {
|
||||
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
|
||||
int segmentDocId = hit.docId() - ctx.docBase;
|
||||
Query query = queryStore.getQueries(ctx).apply(segmentDocId);
|
||||
if (query == null) {
|
||||
// This is not a document with a percolator field.
|
||||
continue;
|
||||
}
|
||||
if (hasNestedDocs) {
|
||||
// Ensures that we filter out nested documents
|
||||
query = new BooleanQuery.Builder()
|
||||
.add(query, BooleanClause.Occur.MUST)
|
||||
.add(nonNestedFilter, BooleanClause.Occur.FILTER)
|
||||
.build();
|
||||
}
|
||||
String fieldName() {
|
||||
return singlePercolateQuery ? FIELD_NAME_PREFIX : FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
|
||||
}
|
||||
|
||||
TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc, new Sort(SortField.FIELD_DOC));
|
||||
if (topDocs.totalHits.value == 0) {
|
||||
// This hit didn't match with a percolate query,
|
||||
// likely to happen when percolating multiple documents
|
||||
continue;
|
||||
}
|
||||
|
||||
IntStream slots = convertTopDocsToSlots(topDocs, rootDocsBySlot);
|
||||
// _percolator_document_slot fields are document fields and should be under "fields" section in a hit
|
||||
hit.setDocumentField(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList())));
|
||||
Query filterNestedDocs(Query in) {
|
||||
if (rootDocsBySlot != null) {
|
||||
// Ensures that we filter out nested documents
|
||||
return new BooleanQuery.Builder()
|
||||
.add(in, BooleanClause.Occur.MUST)
|
||||
.add(Queries.newNonNestedFilter(Version.CURRENT), BooleanClause.Occur.FILTER)
|
||||
.build();
|
||||
}
|
||||
return in;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,18 +33,18 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
|
||||
|
||||
public void testHitsExecutionNeeded() {
|
||||
public void testHitsExecutionNeeded() throws IOException {
|
||||
PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")),
|
||||
new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), null, new MatchAllDocsQuery());
|
||||
PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(emptyMap());
|
||||
|
@ -52,9 +52,9 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase {
|
|||
Mockito.when(searchContext.highlight()).thenReturn(new SearchHighlightContext(Collections.emptyList()));
|
||||
Mockito.when(searchContext.query()).thenReturn(new MatchAllDocsQuery());
|
||||
|
||||
assertThat(subFetchPhase.hitsExecutionNeeded(searchContext), is(false));
|
||||
assertNull(subFetchPhase.getProcessor(searchContext));
|
||||
Mockito.when(searchContext.query()).thenReturn(percolateQuery);
|
||||
assertThat(subFetchPhase.hitsExecutionNeeded(searchContext), is(true));
|
||||
assertNotNull(subFetchPhase.getProcessor(searchContext));
|
||||
}
|
||||
|
||||
public void testLocatePercolatorQuery() {
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.memory.MemoryIndex;
|
||||
|
@ -36,11 +37,18 @@ import org.apache.lucene.store.Directory;
|
|||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
|
||||
|
||||
public void testHitsExecute() throws Exception {
|
||||
|
@ -51,12 +59,14 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
|
|||
indexWriter.addDocument(document);
|
||||
}
|
||||
|
||||
PercolatorMatchedSlotSubFetchPhase phase = new PercolatorMatchedSlotSubFetchPhase();
|
||||
|
||||
try (DirectoryReader reader = DirectoryReader.open(directory)) {
|
||||
IndexSearcher indexSearcher = new IndexSearcher(reader);
|
||||
|
||||
LeafReaderContext context = reader.leaves().get(0);
|
||||
// A match:
|
||||
{
|
||||
SearchHit[] hits = new SearchHit[]{new SearchHit(0)};
|
||||
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
|
||||
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
|
||||
MemoryIndex memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
|
||||
|
@ -64,14 +74,20 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
|
|||
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
|
||||
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
|
||||
|
||||
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
|
||||
assertNotNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
assertEquals(0, (int) hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX).getValue());
|
||||
SearchContext sc = mock(SearchContext.class);
|
||||
when(sc.query()).thenReturn(percolateQuery);
|
||||
|
||||
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
|
||||
assertNotNull(processor);
|
||||
processor.process(hit);
|
||||
|
||||
assertNotNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
assertEquals(0, (int) hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX).getValue());
|
||||
}
|
||||
|
||||
// No match:
|
||||
{
|
||||
SearchHit[] hits = new SearchHit[]{new SearchHit(0)};
|
||||
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
|
||||
PercolateQuery.QueryStore queryStore = ctx -> docId -> new TermQuery(new Term("field", "value"));
|
||||
MemoryIndex memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value1", new WhitespaceAnalyzer());
|
||||
|
@ -79,13 +95,19 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
|
|||
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
|
||||
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
|
||||
|
||||
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
|
||||
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
SearchContext sc = mock(SearchContext.class);
|
||||
when(sc.query()).thenReturn(percolateQuery);
|
||||
|
||||
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
|
||||
assertNotNull(processor);
|
||||
processor.process(hit);
|
||||
|
||||
assertNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
}
|
||||
|
||||
// No query:
|
||||
{
|
||||
SearchHit[] hits = new SearchHit[]{new SearchHit(0)};
|
||||
HitContext hit = new HitContext(new SearchHit(0), context, 0, null, new HashMap<>());
|
||||
PercolateQuery.QueryStore queryStore = ctx -> docId -> null;
|
||||
MemoryIndex memoryIndex = new MemoryIndex();
|
||||
memoryIndex.addField("field", "value", new WhitespaceAnalyzer());
|
||||
|
@ -93,8 +115,14 @@ public class PercolatorMatchedSlotSubFetchPhaseTests extends ESTestCase {
|
|||
PercolateQuery percolateQuery = new PercolateQuery("_name", queryStore, Collections.emptyList(),
|
||||
new MatchAllDocsQuery(), memoryIndex.createSearcher(), null, new MatchNoDocsQuery());
|
||||
|
||||
PercolatorMatchedSlotSubFetchPhase.innerHitsExecute(percolateQuery, indexSearcher, hits);
|
||||
assertNull(hits[0].field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
SearchContext sc = mock(SearchContext.class);
|
||||
when(sc.query()).thenReturn(percolateQuery);
|
||||
|
||||
FetchSubPhaseProcessor processor = phase.getProcessor(sc);
|
||||
assertNotNull(processor);
|
||||
processor.process(hit);
|
||||
|
||||
assertNull(hit.hit().field(PercolatorMatchedSlotSubFetchPhase.FIELD_NAME_PREFIX));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
|
@ -54,36 +55,22 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.net.URLEncoder;
|
||||
import java.text.BreakIterator;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.apache.lucene.search.uhighlight.CustomUnifiedHighlighter.MULTIVAL_SEP_CHAR;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class AnnotatedTextHighlighterTests extends ESTestCase {
|
||||
|
||||
|
||||
private void assertHighlightOneDoc(String fieldName, String []markedUpInputs,
|
||||
Query query, Locale locale, BreakIterator breakIterator,
|
||||
int noMatchSize, String[] expectedPassages) throws Exception {
|
||||
|
||||
|
||||
|
||||
|
||||
// Annotated fields wrap the usual analyzer with one that injects extra tokens
|
||||
Analyzer wrapperAnalyzer = new AnnotationAnalyzerWrapper(new StandardAnalyzer());
|
||||
HitContext mockHitContext = new HitContext();
|
||||
AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer, mockHitContext);
|
||||
|
||||
AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length];
|
||||
for (int i = 0; i < markedUpInputs.length; i++) {
|
||||
annotations[i] = AnnotatedText.parse(markedUpInputs[i]);
|
||||
}
|
||||
mockHitContext.cache().put(AnnotatedText.class.getName(), annotations);
|
||||
|
||||
AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(annotations,new DefaultEncoder());
|
||||
|
||||
ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length);
|
||||
for (int i = 0; i < annotations.length; i++) {
|
||||
plainTextForHighlighter.add(annotations[i].textMinusMarkup);
|
||||
}
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = newIndexWriterConfig(wrapperAnalyzer);
|
||||
iwc.setMergePolicy(newTieredMergePolicy(random()));
|
||||
|
@ -105,10 +92,28 @@ public class AnnotatedTextHighlighterTests extends ESTestCase {
|
|||
DirectoryReader reader = iw.getReader();
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
iw.close();
|
||||
|
||||
LeafReaderContext context = searcher.getIndexReader().leaves().get(0);
|
||||
HitContext mockHitContext = new HitContext(null, context, 0, null, new HashMap<>());
|
||||
AnnotatedHighlighterAnalyzer hiliteAnalyzer = new AnnotatedHighlighterAnalyzer(wrapperAnalyzer, mockHitContext);
|
||||
|
||||
AnnotatedText[] annotations = new AnnotatedText[markedUpInputs.length];
|
||||
for (int i = 0; i < markedUpInputs.length; i++) {
|
||||
annotations[i] = AnnotatedText.parse(markedUpInputs[i]);
|
||||
}
|
||||
mockHitContext.cache().put(AnnotatedText.class.getName(), annotations);
|
||||
|
||||
AnnotatedPassageFormatter passageFormatter = new AnnotatedPassageFormatter(annotations,new DefaultEncoder());
|
||||
|
||||
ArrayList<Object> plainTextForHighlighter = new ArrayList<>(annotations.length);
|
||||
for (int i = 0; i < annotations.length; i++) {
|
||||
plainTextForHighlighter.add(annotations[i].textMinusMarkup);
|
||||
}
|
||||
|
||||
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 1, Sort.INDEXORDER);
|
||||
assertThat(topDocs.totalHits.value, equalTo(1L));
|
||||
String rawValue = Strings.collectionToDelimitedString(plainTextForHighlighter, String.valueOf(MULTIVAL_SEP_CHAR));
|
||||
|
||||
|
||||
CustomUnifiedHighlighter highlighter = new CustomUnifiedHighlighter(searcher, hiliteAnalyzer, null,
|
||||
passageFormatter, locale,
|
||||
breakIterator, rawValue, noMatchSize);
|
||||
|
@ -122,7 +127,7 @@ public class AnnotatedTextHighlighterTests extends ESTestCase {
|
|||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testAnnotatedTextStructuredMatch() throws Exception {
|
||||
// Check that a structured token eg a URL can be highlighted in a query
|
||||
|
@ -167,7 +172,7 @@ public class AnnotatedTextHighlighterTests extends ESTestCase {
|
|||
breakIterator = new SplittingBreakIterator(breakIterator, '.');
|
||||
assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
||||
}
|
||||
|
||||
|
||||
public void testAnnotatedTextSingleFieldWithBreakIterator() throws Exception {
|
||||
final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore. Kim shook hands with Donald"};
|
||||
String[] expectedPassages = { "[Donald](_hit_term=donald) Trump visited Singapore",
|
||||
|
@ -176,23 +181,23 @@ public class AnnotatedTextHighlighterTests extends ESTestCase {
|
|||
BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
breakIterator = new SplittingBreakIterator(breakIterator, '.');
|
||||
assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testAnnotatedTextSingleFieldWithPhraseQuery() throws Exception {
|
||||
final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore",
|
||||
final String[] markedUpInputs = { "[Donald Trump](Donald+Trump) visited Singapore",
|
||||
"Donald Jr was with Melania Trump"};
|
||||
String[] expectedPassages = { "[Donald](_hit_term=donald) [Trump](_hit_term=trump) visited Singapore"};
|
||||
Query query = new PhraseQuery("text", "donald", "trump");
|
||||
BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testBadAnnotation() throws Exception {
|
||||
final String[] markedUpInputs = { "Missing bracket for [Donald Trump](Donald+Trump visited Singapore"};
|
||||
String[] expectedPassages = { "Missing bracket for [Donald Trump](Donald+Trump visited [Singapore](_hit_term=singapore)"};
|
||||
Query query = new TermQuery(new Term("text", "singapore"));
|
||||
BreakIterator breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
assertHighlightOneDoc("text", markedUpInputs, query, Locale.ROOT, breakIterator, 0, expectedPassages);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.search.fetch;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -119,7 +120,21 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase {
|
|||
private static final String NAME = "term_vectors_fetch";
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) {
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
hitExecute(searchContext, hitContext);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
TermVectorsFetchBuilder fetchSubPhaseBuilder = (TermVectorsFetchBuilder)context.getSearchExt(NAME);
|
||||
if (fetchSubPhaseBuilder == null) {
|
||||
return;
|
||||
|
|
|
@ -682,19 +682,19 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
.highlighter(
|
||||
new HighlightBuilder().field(new Field("field1").preTags("<xxx>").postTags("</xxx>").forceSource(true))),
|
||||
RestStatus.BAD_REQUEST,
|
||||
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
|
||||
containsString("source is forced for fields [field1] but _source is disabled"));
|
||||
|
||||
SearchSourceBuilder searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
|
||||
.highlighter(highlight().forceSource(true).field("field1"));
|
||||
assertFailures(client().prepareSearch("test").setSource(searchSource),
|
||||
RestStatus.BAD_REQUEST,
|
||||
containsString("source is forced for fields [field1] but type [type1] has disabled _source"));
|
||||
containsString("source is forced for fields [field1] but _source is disabled"));
|
||||
|
||||
searchSource = SearchSourceBuilder.searchSource().query(termQuery("field1", "quick"))
|
||||
.highlighter(highlight().forceSource(true).field("field*"));
|
||||
assertFailures(client().prepareSearch("test").setSource(searchSource),
|
||||
RestStatus.BAD_REQUEST,
|
||||
matches("source is forced for fields \\[field\\d, field\\d\\] but type \\[type1\\] has disabled _source"));
|
||||
matches("source is forced for fields \\[field\\d, field\\d\\] but _source is disabled"));
|
||||
}
|
||||
|
||||
public void testPlainHighlighter() throws Exception {
|
||||
|
|
|
@ -53,6 +53,7 @@ import org.elasticsearch.search.SearchHit;
|
|||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.SearchPhase;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
|
||||
import org.elasticsearch.search.fetch.subphase.InnerHitsPhase;
|
||||
|
@ -61,6 +62,7 @@ import org.elasticsearch.search.lookup.SourceLookup;
|
|||
import org.elasticsearch.tasks.TaskCancelledException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -107,44 +109,44 @@ public class FetchPhase implements SearchPhase {
|
|||
Arrays.sort(docs);
|
||||
|
||||
SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()];
|
||||
SearchHit[] sortedHits = new SearchHit[context.docIdsToLoadSize()];
|
||||
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
|
||||
Map<String, Object> sharedCache = new HashMap<>();
|
||||
|
||||
List<FetchSubPhaseProcessor> processors = new ArrayList<>();
|
||||
for (FetchSubPhase fsp : fetchSubPhases) {
|
||||
FetchSubPhaseProcessor processor = fsp.getProcessor(context);
|
||||
if (processor != null) {
|
||||
processors.add(processor);
|
||||
}
|
||||
}
|
||||
|
||||
int currentReaderIndex = -1;
|
||||
LeafReaderContext currentReaderContext = null;
|
||||
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
int docId = docs[index].docId;
|
||||
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
|
||||
LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
|
||||
int subDocId = docId - subReaderContext.docBase;
|
||||
|
||||
int rootDocId = findRootDocumentIfNested(context, subReaderContext, subDocId);
|
||||
if (rootDocId == -1) {
|
||||
prepareHitContext(hitContext, context, fieldsVisitor, docId, subDocId,
|
||||
storedToRequestedFields, subReaderContext);
|
||||
} else {
|
||||
prepareNestedHitContext(hitContext, context, docId, subDocId, rootDocId,
|
||||
storedToRequestedFields, subReaderContext);
|
||||
if (currentReaderIndex != readerIndex) {
|
||||
currentReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
|
||||
currentReaderIndex = readerIndex;
|
||||
for (FetchSubPhaseProcessor processor : processors) {
|
||||
processor.setNextReader(currentReaderContext);
|
||||
}
|
||||
}
|
||||
assert currentReaderContext != null;
|
||||
|
||||
SearchHit searchHit = hitContext.hit();
|
||||
sortedHits[index] = searchHit;
|
||||
hits[docs[index].index] = searchHit;
|
||||
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
|
||||
fetchSubPhase.hitExecute(context, hitContext);
|
||||
HitContext hit
|
||||
= prepareHitContext(context, fieldsVisitor, docId, storedToRequestedFields, currentReaderContext, sharedCache);
|
||||
for (FetchSubPhaseProcessor processor : processors) {
|
||||
processor.process(hit);
|
||||
}
|
||||
hits[docs[index].index] = hit.hit();
|
||||
}
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
|
||||
for (FetchSubPhase fetchSubPhase : fetchSubPhases) {
|
||||
fetchSubPhase.hitsExecute(context, sortedHits);
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
}
|
||||
|
||||
TotalHits totalHits = context.queryResult().getTotalHits();
|
||||
context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore()));
|
||||
} catch (IOException e) {
|
||||
|
@ -227,26 +229,37 @@ public class FetchPhase implements SearchPhase {
|
|||
return -1;
|
||||
}
|
||||
|
||||
private HitContext prepareHitContext(SearchContext context, FieldsVisitor fieldsVisitor, int docId,
|
||||
Map<String, Set<String>> storedToRequestedFields,
|
||||
LeafReaderContext subReaderContext, Map<String, Object> sharedCache) throws IOException {
|
||||
int rootDocId = findRootDocumentIfNested(context, subReaderContext, docId - subReaderContext.docBase);
|
||||
if (rootDocId == -1) {
|
||||
return prepareNonNestedHitContext(context, fieldsVisitor, docId, storedToRequestedFields, subReaderContext, sharedCache);
|
||||
} else {
|
||||
return prepareNestedHitContext(context, docId, rootDocId, storedToRequestedFields, subReaderContext, sharedCache);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the provided {@link FetchSubPhase.HitContext} with information on the current
|
||||
* Resets the provided {@link HitContext} with information on the current
|
||||
* document. This includes the following:
|
||||
* - Adding an initial {@link SearchHit} instance.
|
||||
* - Loading the document source and setting it on {@link SourceLookup}. This allows
|
||||
* fetch subphases that use the hit context to access the preloaded source.
|
||||
*/
|
||||
private void prepareHitContext(FetchSubPhase.HitContext hitContext,
|
||||
SearchContext context,
|
||||
private HitContext prepareNonNestedHitContext(SearchContext context,
|
||||
FieldsVisitor fieldsVisitor,
|
||||
int docId,
|
||||
int subDocId,
|
||||
Map<String, Set<String>> storedToRequestedFields,
|
||||
LeafReaderContext subReaderContext) {
|
||||
LeafReaderContext subReaderContext,
|
||||
Map<String, Object> sharedCache) {
|
||||
int subDocId = docId - subReaderContext.docBase;
|
||||
DocumentMapper documentMapper = context.mapperService().documentMapper();
|
||||
Text typeText = documentMapper.typeText();
|
||||
|
||||
if (fieldsVisitor == null) {
|
||||
SearchHit hit = new SearchHit(docId, null, typeText, null, null);
|
||||
hitContext.reset(hit, subReaderContext, subDocId, context.searcher());
|
||||
return new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
|
||||
} else {
|
||||
SearchHit hit;
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
|
||||
|
@ -260,16 +273,17 @@ public class FetchPhase implements SearchPhase {
|
|||
hit = new SearchHit(docId, uid.id(), typeText, emptyMap(), emptyMap());
|
||||
}
|
||||
|
||||
hitContext.reset(hit, subReaderContext, subDocId, context.searcher());
|
||||
HitContext hitContext = new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
|
||||
if (fieldsVisitor.source() != null) {
|
||||
hitContext.sourceLookup().setSource(fieldsVisitor.source());
|
||||
}
|
||||
return hitContext;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Resets the provided {@link FetchSubPhase.HitContext} with information on the current
|
||||
* Resets the provided {@link HitContext} with information on the current
|
||||
* nested document. This includes the following:
|
||||
* - Adding an initial {@link SearchHit} instance.
|
||||
* - Loading the document source, filtering it based on the nested document ID, then
|
||||
|
@ -277,13 +291,12 @@ public class FetchPhase implements SearchPhase {
|
|||
* context to access the preloaded source.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private void prepareNestedHitContext(FetchSubPhase.HitContext hitContext,
|
||||
SearchContext context,
|
||||
int nestedTopDocId,
|
||||
int nestedSubDocId,
|
||||
int rootSubDocId,
|
||||
Map<String, Set<String>> storedToRequestedFields,
|
||||
LeafReaderContext subReaderContext) throws IOException {
|
||||
private HitContext prepareNestedHitContext(SearchContext context,
|
||||
int nestedTopDocId,
|
||||
int rootDocId,
|
||||
Map<String, Set<String>> storedToRequestedFields,
|
||||
LeafReaderContext subReaderContext,
|
||||
Map<String, Object> sharedCache) throws IOException {
|
||||
// Also if highlighting is requested on nested documents we need to fetch the _source from the root document,
|
||||
// otherwise highlighting will attempt to fetch the _source from the nested doc, which will fail,
|
||||
// because the entire _source is only stored with the root document.
|
||||
|
@ -293,6 +306,8 @@ public class FetchPhase implements SearchPhase {
|
|||
Map<String, Object> rootSourceAsMap = null;
|
||||
XContentType rootSourceContentType = null;
|
||||
|
||||
int nestedDocId = nestedTopDocId - subReaderContext.docBase;
|
||||
|
||||
if (context instanceof InnerHitsContext.InnerHitSubContext) {
|
||||
InnerHitsContext.InnerHitSubContext innerHitsContext = (InnerHitsContext.InnerHitSubContext) context;
|
||||
rootId = innerHitsContext.getRootId();
|
||||
|
@ -304,7 +319,7 @@ public class FetchPhase implements SearchPhase {
|
|||
}
|
||||
} else {
|
||||
FieldsVisitor rootFieldsVisitor = new FieldsVisitor(needSource);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, rootFieldsVisitor, rootSubDocId);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, rootFieldsVisitor, rootDocId);
|
||||
rootFieldsVisitor.postProcess(context.mapperService());
|
||||
rootId = rootFieldsVisitor.uid();
|
||||
|
||||
|
@ -320,7 +335,7 @@ public class FetchPhase implements SearchPhase {
|
|||
Map<String, DocumentField> metaFields = emptyMap();
|
||||
if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) {
|
||||
FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(storedToRequestedFields.keySet(), false);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedSubDocId);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedDocId);
|
||||
if (nestedFieldsVisitor.fields().isEmpty() == false) {
|
||||
docFields = new HashMap<>();
|
||||
metaFields = new HashMap<>();
|
||||
|
@ -331,13 +346,14 @@ public class FetchPhase implements SearchPhase {
|
|||
DocumentMapper documentMapper = context.mapperService().documentMapper();
|
||||
Text typeText = documentMapper.typeText();
|
||||
|
||||
ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext);
|
||||
ObjectMapper nestedObjectMapper
|
||||
= documentMapper.findNestedObjectMapper(nestedDocId, context, subReaderContext);
|
||||
assert nestedObjectMapper != null;
|
||||
SearchHit.NestedIdentity nestedIdentity =
|
||||
getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, context.mapperService(), nestedObjectMapper);
|
||||
getInternalNestedIdentity(context, nestedDocId, subReaderContext, context.mapperService(), nestedObjectMapper);
|
||||
|
||||
SearchHit hit = new SearchHit(nestedTopDocId, rootId.id(), typeText, nestedIdentity, docFields, metaFields);
|
||||
hitContext.reset(hit, subReaderContext, nestedSubDocId, context.searcher());
|
||||
HitContext hitContext = new HitContext(hit, subReaderContext, nestedDocId, context.searcher(), sharedCache);
|
||||
|
||||
if (rootSourceAsMap != null) {
|
||||
// Isolate the nested json array object that matches with nested hit and wrap it back into the same json
|
||||
|
@ -384,7 +400,7 @@ public class FetchPhase implements SearchPhase {
|
|||
hitContext.sourceLookup().setSource(nestedSourceAsMap);
|
||||
hitContext.sourceLookup().setSourceContentType(rootSourceContentType);
|
||||
}
|
||||
|
||||
return hitContext;
|
||||
}
|
||||
|
||||
private SearchHit.NestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId,
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.search.internal.SearchContext;
|
|||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -36,19 +35,21 @@ import java.util.Map;
|
|||
public interface FetchSubPhase {
|
||||
|
||||
class HitContext {
|
||||
private SearchHit hit;
|
||||
private IndexSearcher searcher;
|
||||
private LeafReaderContext readerContext;
|
||||
private int docId;
|
||||
private final SearchHit hit;
|
||||
private final IndexSearcher searcher;
|
||||
private final LeafReaderContext readerContext;
|
||||
private final int docId;
|
||||
private final SourceLookup sourceLookup = new SourceLookup();
|
||||
private Map<String, Object> cache;
|
||||
private final Map<String, Object> cache;
|
||||
|
||||
public void reset(SearchHit hit, LeafReaderContext context, int docId, IndexSearcher searcher) {
|
||||
public HitContext(SearchHit hit, LeafReaderContext context, int docId, IndexSearcher searcher,
|
||||
Map<String, Object> cache) {
|
||||
this.hit = hit;
|
||||
this.readerContext = context;
|
||||
this.docId = docId;
|
||||
this.searcher = searcher;
|
||||
this.sourceLookup.setSegmentAndDocument(context, docId);
|
||||
this.cache = cache;
|
||||
}
|
||||
|
||||
public SearchHit hit() {
|
||||
|
@ -63,6 +64,9 @@ public interface FetchSubPhase {
|
|||
return readerContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the docId of this hit relative to the leaf reader context
|
||||
*/
|
||||
public int docId() {
|
||||
return docId;
|
||||
}
|
||||
|
@ -82,21 +86,17 @@ public interface FetchSubPhase {
|
|||
return searcher.getIndexReader();
|
||||
}
|
||||
|
||||
// TODO move this into Highlighter
|
||||
public Map<String, Object> cache() {
|
||||
if (cache == null) {
|
||||
cache = new HashMap<>();
|
||||
}
|
||||
return cache;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the hit level phase, with a reader and doc id (note, its a low level reader, and the matching doc).
|
||||
* Returns a {@link FetchSubPhaseProcessor} for this sub phase.
|
||||
*
|
||||
* If nothing should be executed for the provided {@link SearchContext}, then the
|
||||
* implementation should return {@code null}
|
||||
*/
|
||||
default void hitExecute(SearchContext context, HitContext hitContext) throws IOException {}
|
||||
|
||||
/**
|
||||
* Executes the hits level phase (note, hits are sorted by doc ids).
|
||||
*/
|
||||
default void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {}
|
||||
FetchSubPhaseProcessor getProcessor(SearchContext searchContext) throws IOException;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.fetch;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Executes the logic for a {@link FetchSubPhase} against a particular leaf reader and hit
|
||||
*/
|
||||
public interface FetchSubPhaseProcessor {
|
||||
|
||||
/**
|
||||
* Called when moving to the next {@link LeafReaderContext} for a set of hits
|
||||
*/
|
||||
void setNextReader(LeafReaderContext readerContext) throws IOException;
|
||||
|
||||
/**
|
||||
* Called in doc id order for each hit in a leaf reader
|
||||
*/
|
||||
void process(HitContext hitContext) throws IOException;
|
||||
|
||||
}
|
|
@ -18,9 +18,11 @@
|
|||
*/
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.rescore.RescoreContext;
|
||||
|
||||
|
@ -32,24 +34,33 @@ import java.io.IOException;
|
|||
public final class ExplainPhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) {
|
||||
if (context.explain() == false || context.hasOnlySuggest()) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
final int topLevelDocId = hitContext.hit().docId();
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
for (RescoreContext rescore : context.rescore()) {
|
||||
explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation);
|
||||
}
|
||||
// we use the top level doc id, since we work with the top level searcher
|
||||
hitContext.hit().explanation(explanation);
|
||||
} catch (IOException e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(), "Failed to explain doc [" + hitContext.hit().getType() + "#"
|
||||
+ hitContext.hit().getId() + "]", e);
|
||||
} finally {
|
||||
context.clearReleasables(SearchContext.Lifetime.COLLECTION);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
try {
|
||||
final int topLevelDocId = hitContext.hit().docId();
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
|
||||
for (RescoreContext rescore : context.rescore()) {
|
||||
explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation);
|
||||
}
|
||||
// we use the top level doc id, since we work with the top level searcher
|
||||
hitContext.hit().explanation(explanation);
|
||||
}
|
||||
catch (IOException e) { // TODO move this try-catch up into FetchPhase
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
"Failed to explain doc [" + hitContext.hit().getId() + "]", e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,12 +19,9 @@
|
|||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.index.fielddata.LeafFieldData;
|
||||
import org.elasticsearch.index.fielddata.LeafNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
|
||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||
|
@ -32,8 +29,8 @@ import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
|||
import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -56,117 +53,203 @@ public final class FetchDocValuesPhase implements FetchSubPhase {
|
|||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(FetchDocValuesPhase.class);
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) throws IOException {
|
||||
if (context.collapse() != null) {
|
||||
// retrieve the `doc_value` associated with the collapse field
|
||||
String name = context.collapse().getFieldName();
|
||||
if (context.docValuesContext() == null) {
|
||||
context.docValuesContext(new FetchDocValuesContext(
|
||||
Collections.singletonList(new FieldAndFormat(name, null))));
|
||||
Collections.singletonList(new FieldAndFormat(name, null))));
|
||||
} else if (context.docValuesContext().fields().stream().map(ff -> ff.field).anyMatch(name::equals) == false) {
|
||||
context.docValuesContext().fields().add(new FieldAndFormat(name, null));
|
||||
}
|
||||
}
|
||||
|
||||
if (context.docValuesContext() == null) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
if (context.docValuesContext().fields().stream()
|
||||
.map(f -> f.format)
|
||||
.filter(USE_DEFAULT_FORMAT::equals)
|
||||
.findAny()
|
||||
.isPresent()) {
|
||||
.anyMatch(USE_DEFAULT_FORMAT::equals)) {
|
||||
DEPRECATION_LOGGER.deprecate("explicit_default_format",
|
||||
"[" + USE_DEFAULT_FORMAT + "] is a special format that was only used to " +
|
||||
"ease the transition to 7.x. It has become the default and shouldn't be set explicitly anymore.");
|
||||
}
|
||||
|
||||
List<DocValueField> fields = new ArrayList<>();
|
||||
for (FieldAndFormat fieldAndFormat : context.docValuesContext().fields()) {
|
||||
String field = fieldAndFormat.field;
|
||||
MappedFieldType fieldType = context.mapperService().fieldType(field);
|
||||
if (fieldType != null) {
|
||||
final IndexFieldData<?> indexFieldData = context.getForField(fieldType);
|
||||
final boolean isNanosecond;
|
||||
if (indexFieldData instanceof IndexNumericFieldData) {
|
||||
isNanosecond = ((IndexNumericFieldData) indexFieldData).getNumericType() == NumericType.DATE_NANOSECONDS;
|
||||
} else {
|
||||
isNanosecond = false;
|
||||
DocValueField f = buildField(context, fieldAndFormat);
|
||||
if (f != null) {
|
||||
fields.add(f);
|
||||
}
|
||||
}
|
||||
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
for (DocValueField f : fields) {
|
||||
f.setNextReader(readerContext);
|
||||
}
|
||||
final DocValueFormat format;
|
||||
String formatDesc = fieldAndFormat.format;
|
||||
if (Objects.equals(formatDesc, USE_DEFAULT_FORMAT)) {
|
||||
// TODO: Remove in 8.x
|
||||
formatDesc = null;
|
||||
}
|
||||
if (isNanosecond) {
|
||||
format = withNanosecondResolution(fieldType.docValueFormat(formatDesc, null));
|
||||
} else {
|
||||
format = fieldType.docValueFormat(formatDesc, null);
|
||||
}
|
||||
LeafReaderContext subReaderContext = null;
|
||||
LeafFieldData data = null;
|
||||
SortedBinaryDocValues binaryValues = null; // binary / string / ip fields
|
||||
SortedNumericDocValues longValues = null; // int / date fields
|
||||
SortedNumericDoubleValues doubleValues = null; // floating-point fields
|
||||
for (SearchHit hit : hits) {
|
||||
// if the reader index has changed we need to get a new doc values reader instance
|
||||
if (subReaderContext == null || hit.docId() >= subReaderContext.docBase + subReaderContext.reader().maxDoc()) {
|
||||
int readerIndex = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves());
|
||||
subReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
|
||||
data = indexFieldData.load(subReaderContext);
|
||||
if (indexFieldData instanceof IndexNumericFieldData) {
|
||||
NumericType numericType = ((IndexNumericFieldData) indexFieldData).getNumericType();
|
||||
if (numericType.isFloatingPoint()) {
|
||||
doubleValues = ((LeafNumericFieldData) data).getDoubleValues();
|
||||
} else {
|
||||
// by default nanoseconds are cut to milliseconds within aggregations
|
||||
// however for doc value fields we need the original nanosecond longs
|
||||
if (isNanosecond) {
|
||||
longValues = ((SortedNumericIndexFieldData.NanoSecondFieldData) data).getLongValuesAsNanos();
|
||||
} else {
|
||||
longValues = ((LeafNumericFieldData) data).getLongValues();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
data = indexFieldData.load(subReaderContext);
|
||||
binaryValues = data.getBytesValues();
|
||||
}
|
||||
}
|
||||
DocumentField hitField = hit.field(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hit) throws IOException {
|
||||
for (DocValueField f : fields) {
|
||||
DocumentField hitField = hit.hit().field(f.field);
|
||||
if (hitField == null) {
|
||||
hitField = new DocumentField(field, new ArrayList<>(2));
|
||||
hitField = new DocumentField(f.field, new ArrayList<>(2));
|
||||
// even if we request a doc values of a meta-field (e.g. _routing),
|
||||
// docValues fields will still be document fields, and put under "fields" section of a hit.
|
||||
hit.setDocumentField(field, hitField);
|
||||
hit.hit().setDocumentField(f.field, hitField);
|
||||
}
|
||||
final List<Object> values = hitField.getValues();
|
||||
f.setValues(hit.docId(), hitField);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
int subDocId = hit.docId() - subReaderContext.docBase;
|
||||
if (binaryValues != null) {
|
||||
if (binaryValues.advanceExact(subDocId)) {
|
||||
for (int i = 0, count = binaryValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(binaryValues.nextValue()));
|
||||
}
|
||||
}
|
||||
} else if (longValues != null) {
|
||||
if (longValues.advanceExact(subDocId)) {
|
||||
for (int i = 0, count = longValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(longValues.nextValue()));
|
||||
}
|
||||
}
|
||||
} else if (doubleValues != null) {
|
||||
if (doubleValues.advanceExact(subDocId)) {
|
||||
for (int i = 0, count = doubleValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(doubleValues.nextValue()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new AssertionError("Unreachable code");
|
||||
}
|
||||
private abstract static class DocValueField {
|
||||
|
||||
final String field;
|
||||
final DocValueFormat format;
|
||||
|
||||
protected DocValueField(String field, DocValueFormat format) {
|
||||
this.field = field;
|
||||
this.format = format;
|
||||
}
|
||||
|
||||
abstract void setNextReader(LeafReaderContext context);
|
||||
abstract void setValues(int doc, DocumentField hitField) throws IOException;
|
||||
|
||||
}
|
||||
|
||||
private static class DoubleDocValueField extends DocValueField {
|
||||
|
||||
SortedNumericDoubleValues doubleValues;
|
||||
IndexNumericFieldData fieldData;
|
||||
|
||||
DoubleDocValueField(String field, IndexNumericFieldData fieldData, DocValueFormat format) {
|
||||
super(field, format);
|
||||
this.fieldData = fieldData;
|
||||
}
|
||||
|
||||
@Override
|
||||
void setNextReader(LeafReaderContext context) {
|
||||
doubleValues = fieldData.load(context).getDoubleValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
void setValues(int doc, DocumentField hitField) throws IOException {
|
||||
final List<Object> values = hitField.getValues();
|
||||
if (doubleValues.advanceExact(doc)) {
|
||||
for (int i = 0, count = doubleValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(doubleValues.nextValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class NanoDocValueField extends DocValueField {
|
||||
|
||||
SortedNumericDocValues longValues;
|
||||
IndexNumericFieldData fieldData;
|
||||
|
||||
NanoDocValueField(String field, IndexNumericFieldData fieldData, DocValueFormat format) {
|
||||
super(field, withNanosecondResolution(format));
|
||||
this.fieldData = fieldData;
|
||||
}
|
||||
|
||||
@Override
|
||||
void setNextReader(LeafReaderContext context) {
|
||||
longValues = ((SortedNumericIndexFieldData.NanoSecondFieldData) fieldData.load(context)).getLongValuesAsNanos();
|
||||
}
|
||||
|
||||
@Override
|
||||
void setValues(int doc, DocumentField hitField) throws IOException {
|
||||
final List<Object> values = hitField.getValues();
|
||||
if (longValues.advanceExact(doc)) {
|
||||
for (int i = 0, count = longValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(longValues.nextValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class LongDocValueField extends DocValueField {
|
||||
|
||||
SortedNumericDocValues longValues;
|
||||
IndexNumericFieldData fieldData;
|
||||
|
||||
LongDocValueField(String field, IndexNumericFieldData fieldData, DocValueFormat format) {
|
||||
super(field, format);
|
||||
this.fieldData = fieldData;
|
||||
}
|
||||
|
||||
@Override
|
||||
void setNextReader(LeafReaderContext context) {
|
||||
longValues = fieldData.load(context).getLongValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
void setValues(int doc, DocumentField hitField) throws IOException {
|
||||
final List<Object> values = hitField.getValues();
|
||||
if (longValues.advanceExact(doc)) {
|
||||
for (int i = 0, count = longValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(longValues.nextValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class BinaryDocValueField extends DocValueField {
|
||||
|
||||
SortedBinaryDocValues byteValues;
|
||||
IndexFieldData<?> fieldData;
|
||||
|
||||
BinaryDocValueField(String field, IndexFieldData<?> fieldData, DocValueFormat format) {
|
||||
super(field, format);
|
||||
this.fieldData = fieldData;
|
||||
}
|
||||
|
||||
@Override
|
||||
void setNextReader(LeafReaderContext context) {
|
||||
byteValues = fieldData.load(context).getBytesValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
void setValues(int doc, DocumentField hitField) throws IOException {
|
||||
final List<Object> values = hitField.getValues();
|
||||
if (byteValues.advanceExact(doc)) {
|
||||
for (int i = 0, count = byteValues.docValueCount(); i < count; ++i) {
|
||||
values.add(format.format(byteValues.nextValue()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static DocValueField buildField(SearchContext context, FieldAndFormat fieldAndFormat) {
|
||||
MappedFieldType fieldType = context.mapperService().fieldType(fieldAndFormat.field);
|
||||
if (fieldType == null) {
|
||||
return null;
|
||||
}
|
||||
final IndexFieldData<?> indexFieldData = context.getForField(fieldType);
|
||||
String formatDesc = fieldAndFormat.format;
|
||||
if (Objects.equals(formatDesc, USE_DEFAULT_FORMAT)) {
|
||||
formatDesc = null;
|
||||
}
|
||||
DocValueFormat format = fieldType.docValueFormat(formatDesc, null);
|
||||
if (indexFieldData instanceof IndexNumericFieldData) {
|
||||
if (((IndexNumericFieldData) indexFieldData).getNumericType().isFloatingPoint()) {
|
||||
return new DoubleDocValueField(fieldAndFormat.field, (IndexNumericFieldData) indexFieldData, format);
|
||||
}
|
||||
if (((IndexNumericFieldData) indexFieldData).getNumericType() == NumericType.DATE_NANOSECONDS) {
|
||||
return new NanoDocValueField(fieldAndFormat.field, (IndexNumericFieldData) indexFieldData, format);
|
||||
}
|
||||
return new LongDocValueField(fieldAndFormat.field, (IndexNumericFieldData) indexFieldData, format);
|
||||
}
|
||||
return new BinaryDocValueField(fieldAndFormat.field, indexFieldData, format);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,12 @@
|
|||
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.index.mapper.IgnoredFieldMapper;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -37,21 +39,30 @@ import java.util.Set;
|
|||
public final class FetchFieldsPhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
FetchFieldsContext fetchFieldsContext = context.fetchFieldsContext();
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) {
|
||||
FetchFieldsContext fetchFieldsContext = searchContext.fetchFieldsContext();
|
||||
if (fetchFieldsContext == null) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
SearchHit hit = hitContext.hit();
|
||||
SourceLookup sourceLookup = hitContext.sourceLookup();
|
||||
FieldValueRetriever fieldValueRetriever = fetchFieldsContext.fieldValueRetriever();
|
||||
}
|
||||
|
||||
Set<String> ignoredFields = getIgnoredFields(hit);
|
||||
Map<String, DocumentField> documentFields = fieldValueRetriever.retrieve(sourceLookup, ignoredFields);
|
||||
for (Map.Entry<String, DocumentField> entry : documentFields.entrySet()) {
|
||||
hit.setDocumentField(entry.getKey(), entry.getValue());
|
||||
}
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
SearchHit hit = hitContext.hit();
|
||||
SourceLookup sourceLookup = hitContext.sourceLookup();
|
||||
FieldValueRetriever fieldValueRetriever = fetchFieldsContext.fieldValueRetriever();
|
||||
|
||||
Set<String> ignoredFields = getIgnoredFields(hit);
|
||||
Map<String, DocumentField> documentFields = fieldValueRetriever.retrieve(sourceLookup, ignoredFields);
|
||||
for (Map.Entry<String, DocumentField> entry : documentFields.entrySet()) {
|
||||
hit.setDocumentField(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Set<String> getIgnoredFields(SearchHit hit) {
|
||||
|
|
|
@ -25,48 +25,43 @@ import org.apache.lucene.search.ScoreMode;
|
|||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.ScorerSupplier;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
|
||||
public class FetchScorePhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
if (context.trackScores() == false || hits.length == 0 ||
|
||||
// scores were already computed since they are needed on the coordinated node to merge top hits
|
||||
context.sort() == null) {
|
||||
return;
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) throws IOException {
|
||||
if (context.trackScores() == false || context.docIdsToLoadSize() == 0 ||
|
||||
// scores were already computed since they are needed on the coordinated node to merge top hits
|
||||
context.sort() == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final IndexSearcher searcher = context.searcher();
|
||||
final Weight weight = searcher.createWeight(searcher.rewrite(context.query()), ScoreMode.COMPLETE, 1);
|
||||
Iterator<LeafReaderContext> leafContextIterator = searcher.getIndexReader().leaves().iterator();
|
||||
LeafReaderContext leafContext = null;
|
||||
Scorer scorer = null;
|
||||
for (SearchHit hit : hits) {
|
||||
if (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId()) {
|
||||
do {
|
||||
leafContext = leafContextIterator.next();
|
||||
} while (leafContext == null || leafContext.docBase + leafContext.reader().maxDoc() <= hit.docId());
|
||||
ScorerSupplier scorerSupplier = weight.scorerSupplier(leafContext);
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
Scorer scorer;
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
ScorerSupplier scorerSupplier = weight.scorerSupplier(readerContext);
|
||||
if (scorerSupplier == null) {
|
||||
throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query");
|
||||
throw new IllegalStateException("Can't compute score on document as it doesn't match the query");
|
||||
}
|
||||
scorer = scorerSupplier.get(1L); // random-access
|
||||
}
|
||||
|
||||
final int leafDocID = hit.docId() - leafContext.docBase;
|
||||
assert leafDocID >= 0 && leafDocID < leafContext.reader().maxDoc();
|
||||
int advanced = scorer.iterator().advance(leafDocID);
|
||||
if (advanced != leafDocID) {
|
||||
throw new IllegalStateException("Can't compute score on document " + hit + " as it doesn't match the query");
|
||||
@Override
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
if (scorer == null || scorer.iterator().advance(hitContext.docId()) != hitContext.docId()) {
|
||||
throw new IllegalStateException("Can't compute score on document " + hitContext + " as it doesn't match the query");
|
||||
}
|
||||
hitContext.hit().score(scorer.score());
|
||||
}
|
||||
hit.score(scorer.score());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,12 +19,14 @@
|
|||
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -34,20 +36,37 @@ import java.util.Map;
|
|||
public final class FetchSourcePhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
if (context.sourceRequested() == false) {
|
||||
return;
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) {
|
||||
if (searchContext.sourceRequested() == false) {
|
||||
return null;
|
||||
}
|
||||
String index = searchContext.indexShard().shardId().getIndexName();
|
||||
FetchSourceContext fetchSourceContext = searchContext.fetchSourceContext();
|
||||
assert fetchSourceContext.fetchSource();
|
||||
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
hitExecute(index, fetchSourceContext, hitContext);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void hitExecute(String index, FetchSourceContext fetchSourceContext, HitContext hitContext) {
|
||||
|
||||
final boolean nestedHit = hitContext.hit().getNestedIdentity() != null;
|
||||
SourceLookup source = hitContext.sourceLookup();
|
||||
FetchSourceContext fetchSourceContext = context.fetchSourceContext();
|
||||
assert fetchSourceContext.fetchSource();
|
||||
|
||||
// If source is disabled in the mapping, then attempt to return early.
|
||||
if (source.source() == null && source.internalSourceRef() == null) {
|
||||
if (containsFilters(fetchSourceContext)) {
|
||||
throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [" + context.indexShard().shardId().getIndexName() + "]");
|
||||
"for index [" + index + "]");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -20,38 +20,39 @@ package org.elasticsearch.search.fetch.subphase;
|
|||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.index.mapper.VersionFieldMapper;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public final class FetchVersionPhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) {
|
||||
if (context.version() == false ||
|
||||
(context.storedFieldsContext() != null && context.storedFieldsContext().fetchFields() == false)) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
int lastReaderId = -1;
|
||||
NumericDocValues versions = null;
|
||||
for (SearchHit hit : hits) {
|
||||
int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves());
|
||||
LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId);
|
||||
if (lastReaderId != readerId) {
|
||||
versions = subReaderContext.reader().getNumericDocValues(VersionFieldMapper.NAME);
|
||||
lastReaderId = readerId;
|
||||
NumericDocValues versions = null;
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
versions = readerContext.reader().getNumericDocValues(VersionFieldMapper.NAME);
|
||||
}
|
||||
int docId = hit.docId() - subReaderContext.docBase;
|
||||
long version = Versions.NOT_FOUND;
|
||||
if (versions != null && versions.advanceExact(docId)) {
|
||||
version = versions.longValue();
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
long version = Versions.NOT_FOUND;
|
||||
if (versions != null && versions.advanceExact(hitContext.docId())) {
|
||||
version = versions.longValue();
|
||||
}
|
||||
hitContext.hit().version(version < 0 ? -1 : version);
|
||||
}
|
||||
hit.version(version < 0 ? -1 : version);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
||||
|
@ -28,6 +29,7 @@ import org.elasticsearch.search.SearchHits;
|
|||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
|
@ -44,33 +46,48 @@ public final class InnerHitsPhase implements FetchSubPhase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) throws IOException {
|
||||
if (context.innerHits() == null) {
|
||||
return;
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext searchContext) {
|
||||
if (searchContext.innerHits() == null) {
|
||||
return null;
|
||||
}
|
||||
Map<String, InnerHitsContext.InnerHitSubContext> innerHits = searchContext.innerHits().getInnerHits();
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
hitExecute(innerHits, hitContext);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private void hitExecute(Map<String, InnerHitsContext.InnerHitSubContext> innerHits, HitContext hitContext) throws IOException {
|
||||
|
||||
SearchHit hit = hitContext.hit();
|
||||
SourceLookup sourceLookup = hitContext.sourceLookup();
|
||||
|
||||
for (Map.Entry<String, InnerHitsContext.InnerHitSubContext> entry : context.innerHits().getInnerHits().entrySet()) {
|
||||
InnerHitsContext.InnerHitSubContext innerHits = entry.getValue();
|
||||
TopDocsAndMaxScore topDoc = innerHits.topDocs(hit);
|
||||
for (Map.Entry<String, InnerHitsContext.InnerHitSubContext> entry : innerHits.entrySet()) {
|
||||
InnerHitsContext.InnerHitSubContext innerHitsContext = entry.getValue();
|
||||
TopDocsAndMaxScore topDoc = innerHitsContext.topDocs(hit);
|
||||
|
||||
Map<String, SearchHits> results = hit.getInnerHits();
|
||||
if (results == null) {
|
||||
hit.setInnerHits(results = new HashMap<>());
|
||||
}
|
||||
innerHits.queryResult().topDocs(topDoc, innerHits.sort() == null ? null : innerHits.sort().formats);
|
||||
innerHitsContext.queryResult().topDocs(topDoc, innerHitsContext.sort() == null ? null : innerHitsContext.sort().formats);
|
||||
int[] docIdsToLoad = new int[topDoc.topDocs.scoreDocs.length];
|
||||
for (int j = 0; j < topDoc.topDocs.scoreDocs.length; j++) {
|
||||
docIdsToLoad[j] = topDoc.topDocs.scoreDocs[j].doc;
|
||||
}
|
||||
innerHits.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
innerHits.setRootId(new Uid(hit.getType(), hit.getId()));
|
||||
innerHits.setRootLookup(sourceLookup);
|
||||
innerHitsContext.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length);
|
||||
innerHitsContext.setRootId(new Uid(hit.getType(), hit.getId()));
|
||||
innerHitsContext.setRootLookup(sourceLookup);
|
||||
|
||||
fetchPhase.execute(innerHits);
|
||||
FetchSearchResult fetchResult = innerHits.fetchResult();
|
||||
fetchPhase.execute(innerHitsContext);
|
||||
FetchSearchResult fetchResult = innerHitsContext.fetchResult();
|
||||
SearchHit[] internalHits = fetchResult.fetchResult().hits().getHits();
|
||||
for (int j = 0; j < internalHits.length; j++) {
|
||||
ScoreDoc scoreDoc = topDoc.topDocs.scoreDocs[j];
|
||||
|
@ -78,7 +95,7 @@ public final class InnerHitsPhase implements FetchSubPhase {
|
|||
searchHitFields.score(scoreDoc.score);
|
||||
if (scoreDoc instanceof FieldDoc) {
|
||||
FieldDoc fieldDoc = (FieldDoc) scoreDoc;
|
||||
searchHitFields.sortValues(fieldDoc.fields, innerHits.sort().formats);
|
||||
searchHitFields.sortValues(fieldDoc.fields, innerHitsContext.sort().formats);
|
||||
}
|
||||
}
|
||||
results.put(entry.getKey(), fetchResult.hits());
|
||||
|
|
|
@ -18,20 +18,16 @@
|
|||
*/
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.search.ScorerSupplier;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext.Lifetime;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -42,55 +38,52 @@ import java.util.Map;
|
|||
public final class MatchedQueriesPhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) {
|
||||
if (hits.length == 0 ||
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) throws IOException {
|
||||
if (context.docIdsToLoadSize() == 0 ||
|
||||
// in case the request has only suggest, parsed query is null
|
||||
context.parsedQuery() == null) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
List<String>[] matchedQueries = new List[hits.length];
|
||||
for (int i = 0; i < matchedQueries.length; ++i) {
|
||||
matchedQueries[i] = new ArrayList<>();
|
||||
}
|
||||
|
||||
Map<String, Query> namedQueries = new HashMap<>(context.parsedQuery().namedFilters());
|
||||
if (context.parsedPostFilter() != null) {
|
||||
namedQueries.putAll(context.parsedPostFilter().namedFilters());
|
||||
}
|
||||
if (namedQueries.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
Map<String, Weight> weights = new HashMap<>();
|
||||
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
|
||||
weights.put(entry.getKey(),
|
||||
context.searcher().createWeight(context.searcher().rewrite(entry.getValue()), ScoreMode.COMPLETE_NO_SCORES, 1));
|
||||
}
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
try {
|
||||
for (Map.Entry<String, Query> entry : namedQueries.entrySet()) {
|
||||
String name = entry.getKey();
|
||||
Query query = entry.getValue();
|
||||
int readerIndex = -1;
|
||||
int docBase = -1;
|
||||
Weight weight = context.searcher().createWeight(context.searcher().rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
|
||||
Bits matchingDocs = null;
|
||||
final IndexReader indexReader = context.searcher().getIndexReader();
|
||||
for (int i = 0; i < hits.length; ++i) {
|
||||
SearchHit hit = hits[i];
|
||||
int hitReaderIndex = ReaderUtil.subIndex(hit.docId(), indexReader.leaves());
|
||||
if (readerIndex != hitReaderIndex) {
|
||||
readerIndex = hitReaderIndex;
|
||||
LeafReaderContext ctx = indexReader.leaves().get(readerIndex);
|
||||
docBase = ctx.docBase;
|
||||
// scorers can be costly to create, so reuse them across docs of the same segment
|
||||
ScorerSupplier scorerSupplier = weight.scorerSupplier(ctx);
|
||||
matchingDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), scorerSupplier);
|
||||
}
|
||||
if (matchingDocs.get(hit.docId() - docBase)) {
|
||||
matchedQueries[i].add(name);
|
||||
final Map<String, Bits> matchingIterators = new HashMap<>();
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
matchingIterators.clear();
|
||||
for (Map.Entry<String, Weight> entry : weights.entrySet()) {
|
||||
ScorerSupplier ss = entry.getValue().scorerSupplier(readerContext);
|
||||
if (ss != null) {
|
||||
Bits matchingBits = Lucene.asSequentialAccessBits(readerContext.reader().maxDoc(), ss);
|
||||
matchingIterators.put(entry.getKey(), matchingBits);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < hits.length; ++i) {
|
||||
hits[i].matchedQueries(matchedQueries[i].toArray(new String[matchedQueries[i].size()]));
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
List<String> matches = new ArrayList<>();
|
||||
int doc = hitContext.docId();
|
||||
for (Map.Entry<String, Bits> iterator : matchingIterators.entrySet()) {
|
||||
if (iterator.getValue().get(doc)) {
|
||||
matches.add(iterator.getKey());
|
||||
}
|
||||
}
|
||||
hitContext.hit().matchedQueries(matches.toArray(new String[0]));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
} finally {
|
||||
context.clearReleasables(Lifetime.COLLECTION);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,14 +18,12 @@
|
|||
*/
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.elasticsearch.common.document.DocumentField;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.script.FieldScript;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -37,51 +35,51 @@ import java.util.List;
|
|||
public final class ScriptFieldsPhase implements FetchSubPhase {
|
||||
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) {
|
||||
if (context.hasScriptFields() == false) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
|
||||
int lastReaderId = -1;
|
||||
FieldScript[] leafScripts = null;
|
||||
List<ScriptFieldsContext.ScriptField> scriptFields = context.scriptFields().fields();
|
||||
final IndexReader reader = context.searcher().getIndexReader();
|
||||
for (SearchHit hit : hits) {
|
||||
int readerId = ReaderUtil.subIndex(hit.docId(), reader.leaves());
|
||||
LeafReaderContext leafReaderContext = reader.leaves().get(readerId);
|
||||
if (readerId != lastReaderId) {
|
||||
leafScripts = createLeafScripts(leafReaderContext, scriptFields);
|
||||
lastReaderId = readerId;
|
||||
}
|
||||
int docId = hit.docId() - leafReaderContext.docBase;
|
||||
for (int i = 0; i < leafScripts.length; i++) {
|
||||
leafScripts[i].setDocument(docId);
|
||||
final Object value;
|
||||
try {
|
||||
value = leafScripts[i].execute();
|
||||
CollectionUtils.ensureNoSelfReferences(value, "ScriptFieldsPhase leaf script " + i);
|
||||
} catch (RuntimeException e) {
|
||||
if (scriptFields.get(i).ignoreException()) {
|
||||
continue;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
String scriptFieldName = scriptFields.get(i).name();
|
||||
DocumentField hitField = hit.field(scriptFieldName);
|
||||
if (hitField == null) {
|
||||
final List<Object> values;
|
||||
if (value instanceof Collection) {
|
||||
values = new ArrayList<>((Collection<?>) value);
|
||||
} else {
|
||||
values = Collections.singletonList(value);
|
||||
}
|
||||
hitField = new DocumentField(scriptFieldName, values);
|
||||
// script fields are never meta-fields
|
||||
hit.setDocumentField(scriptFieldName, hitField);
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
FieldScript[] leafScripts = null;
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
leafScripts = createLeafScripts(readerContext, scriptFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
int docId = hitContext.docId();
|
||||
for (int i = 0; i < leafScripts.length; i++) {
|
||||
leafScripts[i].setDocument(docId);
|
||||
final Object value;
|
||||
try {
|
||||
value = leafScripts[i].execute();
|
||||
CollectionUtils.ensureNoSelfReferences(value, "ScriptFieldsPhase leaf script " + i);
|
||||
} catch (RuntimeException e) {
|
||||
if (scriptFields.get(i).ignoreException()) {
|
||||
continue;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
String scriptFieldName = scriptFields.get(i).name();
|
||||
DocumentField hitField = hitContext.hit().field(scriptFieldName);
|
||||
if (hitField == null) {
|
||||
final List<Object> values;
|
||||
if (value instanceof Collection) {
|
||||
values = new ArrayList<>((Collection<?>) value);
|
||||
} else {
|
||||
values = Collections.singletonList(value);
|
||||
}
|
||||
hitField = new DocumentField(scriptFieldName, values);
|
||||
// script fields are never meta-fields
|
||||
hitContext.hit().setDocumentField(scriptFieldName, hitField);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private FieldScript[] createLeafScripts(LeafReaderContext context,
|
||||
|
|
|
@ -20,45 +20,47 @@ package org.elasticsearch.search.fetch.subphase;
|
|||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public final class SeqNoPrimaryTermPhase implements FetchSubPhase {
|
||||
@Override
|
||||
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
|
||||
if (context.seqNoAndPrimaryTerm() == false) {
|
||||
return;
|
||||
}
|
||||
|
||||
int lastReaderId = -1;
|
||||
NumericDocValues seqNoField = null;
|
||||
NumericDocValues primaryTermField = null;
|
||||
for (SearchHit hit : hits) {
|
||||
int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves());
|
||||
LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId);
|
||||
if (lastReaderId != readerId) {
|
||||
seqNoField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
|
||||
primaryTermField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
||||
lastReaderId = readerId;
|
||||
}
|
||||
int docId = hit.docId() - subReaderContext.docBase;
|
||||
long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
// we have to check the primary term field as it is only assigned for non-nested documents
|
||||
if (primaryTermField != null && primaryTermField.advanceExact(docId)) {
|
||||
boolean found = seqNoField.advanceExact(docId);
|
||||
assert found: "found seq no for " + docId + " but not a primary term";
|
||||
seqNo = seqNoField.longValue();
|
||||
primaryTerm = primaryTermField.longValue();
|
||||
}
|
||||
hit.setSeqNo(seqNo);
|
||||
hit.setPrimaryTerm(primaryTerm);
|
||||
@Override
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) throws IOException {
|
||||
if (context.seqNoAndPrimaryTerm() == false) {
|
||||
return null;
|
||||
}
|
||||
return new FetchSubPhaseProcessor() {
|
||||
|
||||
NumericDocValues seqNoField = null;
|
||||
NumericDocValues primaryTermField = null;
|
||||
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) throws IOException {
|
||||
seqNoField = readerContext.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
|
||||
primaryTermField = readerContext.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
int docId = hitContext.docId();
|
||||
long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM;
|
||||
// we have to check the primary term field as it is only assigned for non-nested documents
|
||||
if (primaryTermField != null && primaryTermField.advanceExact(docId)) {
|
||||
boolean found = seqNoField.advanceExact(docId);
|
||||
assert found: "found seq no for " + docId + " but not a primary term";
|
||||
seqNo = seqNoField.longValue();
|
||||
primaryTerm = primaryTermField.longValue();
|
||||
}
|
||||
hitContext.hit().setSeqNo(seqNo);
|
||||
hitContext.hit().setPrimaryTerm(primaryTerm);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.search.fetch.subphase.highlight;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
|
@ -28,14 +29,18 @@ import org.elasticsearch.index.mapper.TextFieldMapper;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class HighlightPhase implements FetchSubPhase {
|
||||
|
||||
private final Map<String, Highlighter> highlighters;
|
||||
|
||||
public HighlightPhase(Map<String, Highlighter> highlighters) {
|
||||
|
@ -43,20 +48,62 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void hitExecute(SearchContext context, HitContext hitContext) {
|
||||
public FetchSubPhaseProcessor getProcessor(SearchContext context) {
|
||||
if (context.highlight() == null) {
|
||||
return;
|
||||
return null;
|
||||
}
|
||||
hitExecute(context.shardTarget(), context.getQueryShardContext(), context.parsedQuery().query(), context.highlight(), hitContext);
|
||||
|
||||
return getProcessor(context.getQueryShardContext(), context.shardTarget(), context.highlight(), context.parsedQuery().query());
|
||||
}
|
||||
|
||||
public void hitExecute(SearchShardTarget shardTarget,
|
||||
QueryShardContext context,
|
||||
Query query,
|
||||
SearchHighlightContext highlight,
|
||||
HitContext hitContext) {
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>();
|
||||
public FetchSubPhaseProcessor getProcessor(QueryShardContext qsc, SearchShardTarget target, SearchHighlightContext hc, Query query) {
|
||||
Map<String, Function<HitContext, FieldHighlightContext>> contextBuilders = contextBuilders(qsc, target, hc, query);
|
||||
return new FetchSubPhaseProcessor() {
|
||||
@Override
|
||||
public void setNextReader(LeafReaderContext readerContext) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>();
|
||||
for (String field : contextBuilders.keySet()) {
|
||||
FieldHighlightContext fieldContext = contextBuilders.get(field).apply(hitContext);
|
||||
Highlighter highlighter = getHighlighter(fieldContext.field);
|
||||
HighlightField highlightField = highlighter.highlight(fieldContext);
|
||||
if (highlightField != null) {
|
||||
// Note that we make sure to use the original field name in the response. This is because the
|
||||
// original field could be an alias, and highlighter implementations may instead reference the
|
||||
// concrete field it points to.
|
||||
highlightFields.put(field,
|
||||
new HighlightField(field, highlightField.fragments()));
|
||||
}
|
||||
}
|
||||
hitContext.hit().highlightFields(highlightFields);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Highlighter getHighlighter(SearchHighlightContext.Field field) {
|
||||
String highlighterType = field.fieldOptions().highlighterType();
|
||||
if (highlighterType == null) {
|
||||
highlighterType = "unified";
|
||||
}
|
||||
Highlighter highlighter = highlighters.get(highlighterType);
|
||||
if (highlighter == null) {
|
||||
throw new IllegalArgumentException("unknown highlighter type [" + highlighterType
|
||||
+ "] for the field [" + field.field() + "]");
|
||||
}
|
||||
return highlighter;
|
||||
}
|
||||
|
||||
private Map<String, Function<HitContext, FieldHighlightContext>> contextBuilders(QueryShardContext context,
|
||||
SearchShardTarget shardTarget,
|
||||
SearchHighlightContext highlight,
|
||||
Query query) {
|
||||
Map<String, Function<HitContext, FieldHighlightContext>> builders = new LinkedHashMap<>();
|
||||
for (SearchHighlightContext.Field field : highlight.fields()) {
|
||||
Highlighter highlighter = getHighlighter(field);
|
||||
Collection<String> fieldNamesToHighlight;
|
||||
if (Regex.isSimpleMatchPattern(field.field())) {
|
||||
fieldNamesToHighlight = context.getMapperService().simpleMatchToFullName(field.field());
|
||||
|
@ -65,10 +112,10 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
}
|
||||
|
||||
if (highlight.forceSource(field)) {
|
||||
SourceFieldMapper sourceFieldMapper = context.getMapperService().documentMapper(hitContext.hit().getType()).sourceMapper();
|
||||
if (!sourceFieldMapper.enabled()) {
|
||||
throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight
|
||||
+ " but type [" + hitContext.hit().getType() + "] has disabled _source");
|
||||
SourceFieldMapper sourceFieldMapper = context.getMapperService().documentMapper().sourceMapper();
|
||||
if (sourceFieldMapper.enabled() == false) {
|
||||
throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight
|
||||
+ " but _source is disabled");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -92,40 +139,19 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
fieldType.typeName().equals(KeywordFieldMapper.CONTENT_TYPE) == false) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
String highlighterType = field.fieldOptions().highlighterType();
|
||||
if (highlighterType == null) {
|
||||
highlighterType = "unified";
|
||||
}
|
||||
Highlighter highlighter = highlighters.get(highlighterType);
|
||||
if (highlighter == null) {
|
||||
throw new IllegalArgumentException("unknown highlighter type [" + highlighterType
|
||||
+ "] for the field [" + fieldName + "]");
|
||||
if (highlighter.canHighlight(fieldType) == false) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
Query highlightQuery = field.fieldOptions().highlightQuery();
|
||||
if (highlightQuery == null) {
|
||||
highlightQuery = query;
|
||||
}
|
||||
|
||||
boolean forceSource = highlight.forceSource(field);
|
||||
FieldHighlightContext fieldContext = new FieldHighlightContext(fieldType.name(),
|
||||
field, fieldType, shardTarget, context, hitContext, highlightQuery, forceSource);
|
||||
|
||||
if ((highlighter.canHighlight(fieldType) == false) && fieldNameContainsWildcards) {
|
||||
// if several fieldnames matched the wildcard then we want to skip those that we cannot highlight
|
||||
continue;
|
||||
}
|
||||
HighlightField highlightField = highlighter.highlight(fieldContext);
|
||||
if (highlightField != null) {
|
||||
// Note that we make sure to use the original field name in the response. This is because the
|
||||
// original field could be an alias, and highlighter implementations may instead reference the
|
||||
// concrete field it points to.
|
||||
highlightFields.put(fieldName,
|
||||
new HighlightField(fieldName, highlightField.fragments()));
|
||||
}
|
||||
builders.put(fieldName,
|
||||
hc -> new FieldHighlightContext(fieldType.name(), field, fieldType, shardTarget, context, hc,
|
||||
highlightQuery == null ? query : highlightQuery, forceSource));
|
||||
}
|
||||
}
|
||||
hitContext.hit().highlightFields(highlightFields);
|
||||
return builders;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,14 +27,16 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.TestSearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -46,7 +48,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
XContentBuilder source = XContentFactory.jsonBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject();
|
||||
FetchSubPhase.HitContext hitContext = hitExecute(source, true, null, null);
|
||||
HitContext hitContext = hitExecute(source, true, null, null);
|
||||
assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap());
|
||||
}
|
||||
|
||||
|
@ -55,7 +57,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
.field("field1", "value")
|
||||
.field("field2", "value2")
|
||||
.endObject();
|
||||
FetchSubPhase.HitContext hitContext = hitExecute(source, false, null, null);
|
||||
HitContext hitContext = hitExecute(source, false, null, null);
|
||||
assertNull(hitContext.hit().getSourceAsMap());
|
||||
|
||||
hitContext = hitExecute(source, true, "field1", null);
|
||||
|
@ -73,7 +75,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
.field("field", "value")
|
||||
.field("field2", "value2")
|
||||
.endObject();
|
||||
FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, new String[]{"*.notexisting", "field"}, null);
|
||||
HitContext hitContext = hitExecuteMultiple(source, true, new String[]{"*.notexisting", "field"}, null);
|
||||
assertEquals(Collections.singletonMap("field","value"), hitContext.hit().getSourceAsMap());
|
||||
|
||||
hitContext = hitExecuteMultiple(source, true, new String[]{"field.notexisting.*", "field"}, null);
|
||||
|
@ -87,7 +89,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
.field("field2", "value2")
|
||||
.field("nested1", expectedNested)
|
||||
.endObject();
|
||||
FetchSubPhase.HitContext hitContext = hitExecuteMultiple(source, true, null, null,
|
||||
HitContext hitContext = hitExecuteMultiple(source, true, null, null,
|
||||
new SearchHit.NestedIdentity("nested1", 0,null));
|
||||
assertEquals(expectedNested, hitContext.hit().getSourceAsMap());
|
||||
hitContext = hitExecuteMultiple(source, true, new String[]{"invalid"}, null,
|
||||
|
@ -104,7 +106,7 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSourceDisabled() throws IOException {
|
||||
FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null);
|
||||
HitContext hitContext = hitExecute(null, true, null, null);
|
||||
assertNull(hitContext.hit().getSourceAsMap());
|
||||
|
||||
hitContext = hitExecute(null, false, null, null);
|
||||
|
@ -120,8 +122,8 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
"for index [index]", exception.getMessage());
|
||||
}
|
||||
|
||||
public void testNestedSourceWithSourceDisabled() {
|
||||
FetchSubPhase.HitContext hitContext = hitExecute(null, true, null, null,
|
||||
public void testNestedSourceWithSourceDisabled() throws IOException {
|
||||
HitContext hitContext = hitExecute(null, true, null, null,
|
||||
new SearchHit.NestedIdentity("nested1", 0, null));
|
||||
assertNull(hitContext.hit().getSourceAsMap());
|
||||
|
||||
|
@ -131,37 +133,43 @@ public class FetchSourcePhaseTests extends ESTestCase {
|
|||
"for index [index]", e.getMessage());
|
||||
}
|
||||
|
||||
private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) {
|
||||
private HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) throws IOException {
|
||||
return hitExecute(source, fetchSource, include, exclude, null);
|
||||
}
|
||||
|
||||
private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude,
|
||||
SearchHit.NestedIdentity nestedIdentity) {
|
||||
private HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude,
|
||||
SearchHit.NestedIdentity nestedIdentity) throws IOException {
|
||||
return hitExecuteMultiple(source, fetchSource,
|
||||
include == null ? Strings.EMPTY_ARRAY : new String[]{include},
|
||||
exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}, nestedIdentity);
|
||||
}
|
||||
|
||||
private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes) {
|
||||
private HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes)
|
||||
throws IOException {
|
||||
return hitExecuteMultiple(source, fetchSource, includes, excludes, null);
|
||||
}
|
||||
|
||||
private FetchSubPhase.HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes,
|
||||
SearchHit.NestedIdentity nestedIdentity) {
|
||||
private HitContext hitExecuteMultiple(XContentBuilder source, boolean fetchSource, String[] includes, String[] excludes,
|
||||
SearchHit.NestedIdentity nestedIdentity) throws IOException {
|
||||
FetchSourceContext fetchSourceContext = new FetchSourceContext(fetchSource, includes, excludes);
|
||||
SearchContext searchContext = new FetchSourcePhaseTestSearchContext(fetchSourceContext);
|
||||
|
||||
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
|
||||
final SearchHit searchHit = new SearchHit(1, null, null, nestedIdentity, null, null);
|
||||
|
||||
// We don't need a real index, just a LeafReaderContext which cannot be mocked.
|
||||
MemoryIndex index = new MemoryIndex();
|
||||
LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0);
|
||||
hitContext.reset(searchHit, leafReaderContext, 1, null);
|
||||
HitContext hitContext = new HitContext(searchHit, leafReaderContext, 1, null, new HashMap<>());
|
||||
hitContext.sourceLookup().setSource(source == null ? null : BytesReference.bytes(source));
|
||||
|
||||
FetchSourcePhase phase = new FetchSourcePhase();
|
||||
phase.hitExecute(searchContext, hitContext);
|
||||
FetchSubPhaseProcessor processor = phase.getProcessor(searchContext);
|
||||
if (fetchSource == false) {
|
||||
assertNull(processor);
|
||||
} else {
|
||||
assertNotNull(processor);
|
||||
processor.process(hitContext);
|
||||
}
|
||||
return hitContext;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue