Rationalise fetch phase exceptions (#62230)

We have a special FetchPhaseExecutionException which contains some useful
information about which shard and doc a fetch phase has failed in. However, this
is not used in many places - currently only the ExplainPhase and the highlighters
throw one, and the FetchPhase itself catches IOExceptions and just passes them
to the ExceptionsHelper with no extra context.

This commit changes FetchPhase to throw FetchPhaseExecutionException if it
encounters problems in any of its subphases, and removes the special handling
from the explain and highlight phases. It also removes the need to pass shard ids
around when building HitContext objects.
This commit is contained in:
Alan Woodward 2020-09-15 09:07:35 +01:00 committed by Alan Woodward
parent 84ac72dced
commit a68f7077c7
7 changed files with 255 additions and 283 deletions

View File

@ -30,7 +30,6 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitSet;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
@ -52,7 +51,6 @@ import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.search.SearchContextSourcePrinter;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
@ -96,7 +94,6 @@ public class FetchPhase {
Map<String, Set<String>> storedToRequestedFields = new HashMap<>();
FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields);
try {
DocIdToIndex[] docs = new DocIdToIndex[context.docIdsToLoadSize()];
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
docs[index] = new DocIdToIndex(context.docIdsToLoad()[context.docIdsToLoadFrom() + index], index);
@ -106,13 +103,7 @@ public class FetchPhase {
SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()];
Map<String, Object> sharedCache = new HashMap<>();
List<FetchSubPhaseProcessor> processors = new ArrayList<>();
for (FetchSubPhase fsp : fetchSubPhases) {
FetchSubPhaseProcessor processor = fsp.getProcessor(context);
if (processor != null) {
processors.add(processor);
}
}
List<FetchSubPhaseProcessor> processors = getProcessors(context);
int currentReaderIndex = -1;
LeafReaderContext currentReaderContext = null;
@ -121,6 +112,7 @@ public class FetchPhase {
throw new TaskCancelledException("cancelled");
}
int docId = docs[index].docId;
try {
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
if (currentReaderIndex != readerIndex) {
currentReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
@ -130,13 +122,15 @@ public class FetchPhase {
}
}
assert currentReaderContext != null;
HitContext hit
= prepareHitContext(context, fieldsVisitor, docId, storedToRequestedFields, currentReaderContext, sharedCache);
for (FetchSubPhaseProcessor processor : processors) {
processor.process(hit);
}
hits[docs[index].index] = hit.hit();
} catch (Exception e) {
throw new FetchPhaseExecutionException(context.shardTarget(), "Error running fetch phase for doc [" + docId + "]", e);
}
}
if (context.isCancelled()) {
throw new TaskCancelledException("cancelled");
@ -144,8 +138,21 @@ public class FetchPhase {
TotalHits totalHits = context.queryResult().getTotalHits();
context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore()));
} catch (IOException e) {
throw ExceptionsHelper.convertToElastic(e);
}
List<FetchSubPhaseProcessor> getProcessors(SearchContext context) {
try {
List<FetchSubPhaseProcessor> processors = new ArrayList<>();
for (FetchSubPhase fsp : fetchSubPhases) {
FetchSubPhaseProcessor processor = fsp.getProcessor(context);
if (processor != null) {
processors.add(processor);
}
}
return processors;
} catch (Exception e) {
throw new FetchPhaseExecutionException(context.shardTarget(), "Error building fetch sub-phases", e);
}
}
@ -251,7 +258,7 @@ public class FetchPhase {
int docId,
Map<String, Set<String>> storedToRequestedFields,
LeafReaderContext subReaderContext,
Map<String, Object> sharedCache) {
Map<String, Object> sharedCache) throws IOException {
int subDocId = docId - subReaderContext.docBase;
DocumentMapper documentMapper = context.mapperService().documentMapper();
Text typeText = documentMapper.typeText();
@ -261,7 +268,7 @@ public class FetchPhase {
return new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
} else {
SearchHit hit;
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
loadStoredFields(context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
Uid uid = fieldsVisitor.uid();
if (fieldsVisitor.fields().isEmpty() == false) {
Map<String, DocumentField> docFields = new HashMap<>();
@ -318,7 +325,7 @@ public class FetchPhase {
}
} else {
FieldsVisitor rootFieldsVisitor = new FieldsVisitor(needSource);
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, rootFieldsVisitor, rootDocId);
loadStoredFields(context.mapperService(), subReaderContext, rootFieldsVisitor, rootDocId);
rootFieldsVisitor.postProcess(context.mapperService());
rootId = rootFieldsVisitor.uid();
@ -334,7 +341,7 @@ public class FetchPhase {
Map<String, DocumentField> metaFields = emptyMap();
if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) {
FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(storedToRequestedFields.keySet(), false);
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedDocId);
loadStoredFields(context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedDocId);
if (nestedFieldsVisitor.fields().isEmpty() == false) {
docFields = new HashMap<>();
metaFields = new HashMap<>();
@ -478,16 +485,11 @@ public class FetchPhase {
return nestedIdentity;
}
private void loadStoredFields(SearchShardTarget shardTarget,
MapperService mapperService,
private void loadStoredFields(MapperService mapperService,
LeafReaderContext readerContext,
FieldsVisitor fieldVisitor, int docId) {
FieldsVisitor fieldVisitor, int docId) throws IOException {
fieldVisitor.reset();
try {
readerContext.reader().document(docId, fieldVisitor);
} catch (IOException e) {
throw new FetchPhaseExecutionException(shardTarget, "Failed to fetch doc id [" + docId + "]", e);
}
fieldVisitor.postProcess(mapperService);
}

View File

@ -20,7 +20,6 @@ package org.elasticsearch.search.fetch.subphase;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Explanation;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
import org.elasticsearch.search.internal.SearchContext;
@ -45,8 +44,7 @@ public final class ExplainPhase implements FetchSubPhase {
}
@Override
public void process(HitContext hitContext) {
try {
public void process(HitContext hitContext) throws IOException {
final int topLevelDocId = hitContext.hit().docId();
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
@ -56,11 +54,6 @@ public final class ExplainPhase implements FetchSubPhase {
// we use the top level doc id, since we work with the top level searcher
hitContext.hit().explanation(explanation);
}
catch (IOException e) { // TODO move this try-catch up into FetchPhase
throw new FetchPhaseExecutionException(context.shardTarget(),
"Failed to explain doc [" + hitContext.hit().getId() + "]", e);
}
}
};
}
}

View File

@ -24,7 +24,6 @@ import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
import org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScanner;
import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
import org.apache.lucene.search.vectorhighlight.FieldFragList;
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo;
import org.apache.lucene.search.vectorhighlight.FieldQuery;
import org.apache.lucene.search.vectorhighlight.FragListBuilder;
import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
@ -39,11 +38,11 @@ import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.TextSearchInfo;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.Field;
import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.FieldOptions;
import java.io.IOException;
import java.text.BreakIterator;
import java.util.Collections;
import java.util.HashMap;
@ -68,7 +67,7 @@ public class FastVectorHighlighter implements Highlighter {
}
@Override
public HighlightField highlight(FieldHighlightContext fieldContext) {
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
SearchHighlightContext.Field field = fieldContext.field;
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
MappedFieldType fieldType = fieldContext.fieldType;
@ -87,8 +86,6 @@ public class FastVectorHighlighter implements Highlighter {
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
}
HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
try {
FieldHighlightEntry entry = cache.fields.get(fieldType);
if (entry == null) {
FragListBuilder fragListBuilder;
@ -130,14 +127,14 @@ public class FastVectorHighlighter implements Highlighter {
fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
entry = new FieldHighlightEntry();
if (field.fieldOptions().requireFieldMatch()) {
/**
/*
* we use top level reader to rewrite the query against all readers,
* with use caching it across hits (and across readers...)
*/
entry.fieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query,
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
} else {
/**
/*
* we use top level reader to rewrite the query against all readers,
* with use caching it across hits (and across readers...)
*/
@ -192,7 +189,7 @@ public class FastVectorHighlighter implements Highlighter {
// Essentially we just request that a fragment is built from 0 to noMatchSize using
// the normal fragmentsBuilder
FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
fieldFragList.add(0, noMatchSize, Collections.emptyList());
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(),
fieldType.name(), fieldFragList, 1, field.fieldOptions().preTags(),
field.fieldOptions().postTags(), encoder);
@ -202,11 +199,6 @@ public class FastVectorHighlighter implements Highlighter {
}
return null;
} catch (Exception e) {
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
}
}
@Override
@ -244,14 +236,14 @@ public class FastVectorHighlighter implements Highlighter {
}
}
private class FieldHighlightEntry {
private static class FieldHighlightEntry {
public FragListBuilder fragListBuilder;
public FragmentsBuilder fragmentsBuilder;
public FieldQuery noFieldMatchFieldQuery;
public FieldQuery fieldMatchFieldQuery;
}
private class HighlighterEntry {
private static class HighlighterEntry {
public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh;
public Map<MappedFieldType, FieldHighlightEntry> fields = new HashMap<>();
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -65,7 +66,7 @@ public class HighlightPhase implements FetchSubPhase {
}
@Override
public void process(HitContext hitContext) {
public void process(HitContext hitContext) throws IOException {
Map<String, HighlightField> highlightFields = new HashMap<>();
for (String field : contextBuilders.keySet()) {
FieldHighlightContext fieldContext = contextBuilders.get(field).apply(hitContext);

View File

@ -20,12 +20,14 @@ package org.elasticsearch.search.fetch.subphase.highlight;
import org.elasticsearch.index.mapper.MappedFieldType;
import java.io.IOException;
/**
* Highlights a search result.
*/
public interface Highlighter {
HighlightField highlight(FieldHighlightContext fieldContext);
HighlightField highlight(FieldHighlightContext fieldContext) throws IOException;
boolean canHighlight(MappedFieldType fieldType);
}

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.search.highlight.Encoder;
import org.apache.lucene.search.highlight.Formatter;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
import org.apache.lucene.search.highlight.NullFragmenter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
@ -33,18 +34,15 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -55,7 +53,7 @@ public class PlainHighlighter implements Highlighter {
private static final String CACHE_KEY = "highlight-plain";
@Override
public HighlightField highlight(FieldHighlightContext fieldContext) {
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
SearchHighlightContext.Field field = fieldContext.field;
QueryShardContext context = fieldContext.context;
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
@ -111,7 +109,6 @@ public class PlainHighlighter implements Highlighter {
};
final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset();
try {
textsToHighlight = HighlightUtils.loadFieldValues(fieldType, hitContext, fieldContext.forceSource);
for (Object textToHighlight : textsToHighlight) {
@ -141,26 +138,18 @@ public class PlainHighlighter implements Highlighter {
fragsList.add(bestTextFragment);
}
}
}
}
} catch (Exception e) {
if (ExceptionsHelper.unwrap(e, BytesRefHash.MaxBytesLengthExceededException.class) != null) {
} catch (BytesRefHash.MaxBytesLengthExceededException e) {
// this can happen if for example a field is not_analyzed and ignore_above option is set.
// the field will be ignored when indexing but the huge term is still in the source and
// the plain highlighter will parse the source and try to analyze it.
return null;
} else {
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
// ignore and continue to the next value
} catch (InvalidTokenOffsetsException e) {
throw new IllegalArgumentException(e);
}
}
if (field.fieldOptions().scoreOrdered()) {
CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {
@Override
public int compare(TextFragment o1, TextFragment o2) {
return Math.round(o2.getScore() - o1.getScore());
}
});
CollectionUtil.introSort(fragsList, (o1, o2) -> Math.round(o2.getScore() - o1.getScore()));
}
String[] fragments;
// number_of_fragments is set to 0 but we have a multivalued field
@ -171,7 +160,7 @@ public class PlainHighlighter implements Highlighter {
}
} else {
// refine numberOfFragments if needed
numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
numberOfFragments = Math.min(fragsList.size(), numberOfFragments);
fragments = new String[numberOfFragments];
for (int i = 0; i < fragments.length; i++) {
fragments[i] = fragsList.get(i).toString();
@ -186,13 +175,7 @@ public class PlainHighlighter implements Highlighter {
if (noMatchSize > 0 && textsToHighlight.size() > 0) {
// Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
String fieldContents = textsToHighlight.get(0).toString();
int end;
try {
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, fieldType.name(), fieldContents);
} catch (Exception e) {
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
}
int end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, fieldType.name(), fieldContents);
if (end > 0) {
return new HighlightField(fieldContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
}

View File

@ -61,66 +61,14 @@ public class UnifiedHighlighter implements Highlighter {
}
@Override
public HighlightField highlight(FieldHighlightContext fieldContext) {
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
@SuppressWarnings("unchecked")
Map<String, CustomUnifiedHighlighter> cache = (Map<String, CustomUnifiedHighlighter>) fieldContext.hitContext.cache()
.computeIfAbsent(UnifiedHighlighter.class.getName(), k -> new HashMap<>());
CustomUnifiedHighlighter highlighter = (CustomUnifiedHighlighter) cache.computeIfAbsent(fieldContext.fieldName, f -> {
Encoder encoder = fieldContext.field.fieldOptions().encoder().equals("html")
? HighlightUtils.Encoders.HTML
: HighlightUtils.Encoders.DEFAULT;
int maxAnalyzedOffset = fieldContext.context.getIndexSettings().getHighlightMaxAnalyzedOffset();
int keywordIgnoreAbove = Integer.MAX_VALUE;
if (fieldContext.fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
KeywordFieldMapper mapper = (KeywordFieldMapper) fieldContext.context.getMapperService().documentMapper()
.mappers().getMapper(fieldContext.fieldName);
keywordIgnoreAbove = mapper.ignoreAbove();
if (cache.containsKey(fieldContext.fieldName) == false) {
cache.put(fieldContext.fieldName, buildHighlighter(fieldContext));
}
int numberOfFragments = fieldContext.field.fieldOptions().numberOfFragments();
Analyzer analyzer = getAnalyzer(fieldContext.context.getMapperService().documentMapper());
PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder);
IndexSearcher searcher = fieldContext.context.searcher();
OffsetSource offsetSource = getOffsetSource(fieldContext.fieldType);
BreakIterator breakIterator;
int higlighterNumberOfFragments;
if (numberOfFragments == 0
// non-tokenized fields should not use any break iterator (ignore boundaryScannerType)
|| fieldContext.fieldType.getTextSearchInfo().isTokenized() == false) {
/*
* We use a control char to separate values, which is the
* only char that the custom break iterator breaks the text
* on, so we don't lose the distinction between the different
* values of a field and we get back a snippet per value
*/
breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
higlighterNumberOfFragments = numberOfFragments == 0 ? Integer.MAX_VALUE - 1 : numberOfFragments;
} else {
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
breakIterator = getBreakIterator(fieldContext.field);
higlighterNumberOfFragments = numberOfFragments;
}
try {
return new CustomUnifiedHighlighter(
searcher,
analyzer,
offsetSource,
passageFormatter,
fieldContext.field.fieldOptions().boundaryScannerLocale(),
breakIterator,
fieldContext.context.getFullyQualifiedIndex().getName(),
fieldContext.fieldName,
fieldContext.query,
fieldContext.field.fieldOptions().noMatchSize(),
higlighterNumberOfFragments,
fieldMatcher(fieldContext),
keywordIgnoreAbove,
maxAnalyzedOffset
);
} catch (IOException e) {
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
}
});
CustomUnifiedHighlighter highlighter = cache.get(fieldContext.fieldName);
MappedFieldType fieldType = fieldContext.fieldType;
SearchHighlightContext.Field field = fieldContext.field;
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
@ -166,10 +114,61 @@ public class UnifiedHighlighter implements Highlighter {
return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
}
CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) throws IOException {
Encoder encoder = fieldContext.field.fieldOptions().encoder().equals("html")
? HighlightUtils.Encoders.HTML
: HighlightUtils.Encoders.DEFAULT;
int maxAnalyzedOffset = fieldContext.context.getIndexSettings().getHighlightMaxAnalyzedOffset();
int keywordIgnoreAbove = Integer.MAX_VALUE;
if (fieldContext.fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
KeywordFieldMapper mapper = (KeywordFieldMapper) fieldContext.context.getMapperService().documentMapper()
.mappers().getMapper(fieldContext.fieldName);
keywordIgnoreAbove = mapper.ignoreAbove();
}
int numberOfFragments = fieldContext.field.fieldOptions().numberOfFragments();
Analyzer analyzer = getAnalyzer(fieldContext.context.getMapperService().documentMapper());
PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder);
IndexSearcher searcher = fieldContext.context.searcher();
OffsetSource offsetSource = getOffsetSource(fieldContext.fieldType);
BreakIterator breakIterator;
int higlighterNumberOfFragments;
if (numberOfFragments == 0
// non-tokenized fields should not use any break iterator (ignore boundaryScannerType)
|| fieldContext.fieldType.getTextSearchInfo().isTokenized() == false) {
/*
* We use a control char to separate values, which is the
* only char that the custom break iterator breaks the text
* on, so we don't lose the distinction between the different
* values of a field and we get back a snippet per value
*/
breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
higlighterNumberOfFragments = numberOfFragments == 0 ? Integer.MAX_VALUE - 1 : numberOfFragments;
} else {
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
breakIterator = getBreakIterator(fieldContext.field);
higlighterNumberOfFragments = numberOfFragments;
}
return new CustomUnifiedHighlighter(
searcher,
analyzer,
offsetSource,
passageFormatter,
fieldContext.field.fieldOptions().boundaryScannerLocale(),
breakIterator,
fieldContext.context.getFullyQualifiedIndex().getName(),
fieldContext.fieldName,
fieldContext.query,
fieldContext.field.fieldOptions().noMatchSize(),
higlighterNumberOfFragments,
fieldMatcher(fieldContext),
keywordIgnoreAbove,
maxAnalyzedOffset
);
}
protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) {
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0],
return new CustomPassageFormatter(field.fieldOptions().preTags()[0],
field.fieldOptions().postTags()[0], encoder);
return passageFormatter;
}
@ -244,7 +243,7 @@ public class UnifiedHighlighter implements Highlighter {
private Predicate<String> fieldMatcher(FieldHighlightContext fieldContext) {
if (fieldContext.field.fieldOptions().requireFieldMatch()) {
String fieldName = fieldContext.fieldName;
return name -> fieldName.equals(name);
return fieldName::equals;
}
// ignore terms that targets the _id field since they use a different encoding
// that is not compatible with utf8