Rationalise fetch phase exceptions (#62230)
We have a special FetchPhaseExecutionException which contains some useful information about which shard and doc a fetch phase has failed in. However, this is not used in many places - currently only the ExplainPhase and the highlighters throw one, and the FetchPhase itself catches IOExceptions and just passes them to the ExceptionsHelper with no extra context. This commit changes FetchPhase to throw FetchPhaseExecutionException if it encounters problems in any of its subphases, and removes the special handling from the explain and highlight phases. It also removes the need to pass shard ids around when building HitContext objects.
This commit is contained in:
parent
84ac72dced
commit
a68f7077c7
|
@ -30,7 +30,6 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.apache.lucene.search.TotalHits;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -52,7 +51,6 @@ import org.elasticsearch.index.mapper.Uid;
|
|||
import org.elasticsearch.search.SearchContextSourcePrinter;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.InnerHitsContext;
|
||||
|
@ -96,31 +94,25 @@ public class FetchPhase {
|
|||
Map<String, Set<String>> storedToRequestedFields = new HashMap<>();
|
||||
FieldsVisitor fieldsVisitor = createStoredFieldsVisitor(context, storedToRequestedFields);
|
||||
|
||||
try {
|
||||
DocIdToIndex[] docs = new DocIdToIndex[context.docIdsToLoadSize()];
|
||||
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
|
||||
docs[index] = new DocIdToIndex(context.docIdsToLoad()[context.docIdsToLoadFrom() + index], index);
|
||||
DocIdToIndex[] docs = new DocIdToIndex[context.docIdsToLoadSize()];
|
||||
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
|
||||
docs[index] = new DocIdToIndex(context.docIdsToLoad()[context.docIdsToLoadFrom() + index], index);
|
||||
}
|
||||
Arrays.sort(docs);
|
||||
|
||||
SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()];
|
||||
Map<String, Object> sharedCache = new HashMap<>();
|
||||
|
||||
List<FetchSubPhaseProcessor> processors = getProcessors(context);
|
||||
|
||||
int currentReaderIndex = -1;
|
||||
LeafReaderContext currentReaderContext = null;
|
||||
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
Arrays.sort(docs);
|
||||
|
||||
SearchHit[] hits = new SearchHit[context.docIdsToLoadSize()];
|
||||
Map<String, Object> sharedCache = new HashMap<>();
|
||||
|
||||
List<FetchSubPhaseProcessor> processors = new ArrayList<>();
|
||||
for (FetchSubPhase fsp : fetchSubPhases) {
|
||||
FetchSubPhaseProcessor processor = fsp.getProcessor(context);
|
||||
if (processor != null) {
|
||||
processors.add(processor);
|
||||
}
|
||||
}
|
||||
|
||||
int currentReaderIndex = -1;
|
||||
LeafReaderContext currentReaderContext = null;
|
||||
for (int index = 0; index < context.docIdsToLoadSize(); index++) {
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
int docId = docs[index].docId;
|
||||
int docId = docs[index].docId;
|
||||
try {
|
||||
int readerIndex = ReaderUtil.subIndex(docId, context.searcher().getIndexReader().leaves());
|
||||
if (currentReaderIndex != readerIndex) {
|
||||
currentReaderContext = context.searcher().getIndexReader().leaves().get(readerIndex);
|
||||
|
@ -130,22 +122,37 @@ public class FetchPhase {
|
|||
}
|
||||
}
|
||||
assert currentReaderContext != null;
|
||||
|
||||
HitContext hit
|
||||
= prepareHitContext(context, fieldsVisitor, docId, storedToRequestedFields, currentReaderContext, sharedCache);
|
||||
for (FetchSubPhaseProcessor processor : processors) {
|
||||
processor.process(hit);
|
||||
}
|
||||
hits[docs[index].index] = hit.hit();
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(), "Error running fetch phase for doc [" + docId + "]", e);
|
||||
}
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
}
|
||||
if (context.isCancelled()) {
|
||||
throw new TaskCancelledException("cancelled");
|
||||
}
|
||||
|
||||
TotalHits totalHits = context.queryResult().getTotalHits();
|
||||
context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore()));
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
TotalHits totalHits = context.queryResult().getTotalHits();
|
||||
context.fetchResult().hits(new SearchHits(hits, totalHits, context.queryResult().getMaxScore()));
|
||||
|
||||
}
|
||||
|
||||
List<FetchSubPhaseProcessor> getProcessors(SearchContext context) {
|
||||
try {
|
||||
List<FetchSubPhaseProcessor> processors = new ArrayList<>();
|
||||
for (FetchSubPhase fsp : fetchSubPhases) {
|
||||
FetchSubPhaseProcessor processor = fsp.getProcessor(context);
|
||||
if (processor != null) {
|
||||
processors.add(processor);
|
||||
}
|
||||
}
|
||||
return processors;
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(), "Error building fetch sub-phases", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,7 +258,7 @@ public class FetchPhase {
|
|||
int docId,
|
||||
Map<String, Set<String>> storedToRequestedFields,
|
||||
LeafReaderContext subReaderContext,
|
||||
Map<String, Object> sharedCache) {
|
||||
Map<String, Object> sharedCache) throws IOException {
|
||||
int subDocId = docId - subReaderContext.docBase;
|
||||
DocumentMapper documentMapper = context.mapperService().documentMapper();
|
||||
Text typeText = documentMapper.typeText();
|
||||
|
@ -261,7 +268,7 @@ public class FetchPhase {
|
|||
return new HitContext(hit, subReaderContext, subDocId, context.searcher(), sharedCache);
|
||||
} else {
|
||||
SearchHit hit;
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
|
||||
loadStoredFields(context.mapperService(), subReaderContext, fieldsVisitor, subDocId);
|
||||
Uid uid = fieldsVisitor.uid();
|
||||
if (fieldsVisitor.fields().isEmpty() == false) {
|
||||
Map<String, DocumentField> docFields = new HashMap<>();
|
||||
|
@ -318,7 +325,7 @@ public class FetchPhase {
|
|||
}
|
||||
} else {
|
||||
FieldsVisitor rootFieldsVisitor = new FieldsVisitor(needSource);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, rootFieldsVisitor, rootDocId);
|
||||
loadStoredFields(context.mapperService(), subReaderContext, rootFieldsVisitor, rootDocId);
|
||||
rootFieldsVisitor.postProcess(context.mapperService());
|
||||
rootId = rootFieldsVisitor.uid();
|
||||
|
||||
|
@ -334,7 +341,7 @@ public class FetchPhase {
|
|||
Map<String, DocumentField> metaFields = emptyMap();
|
||||
if (context.hasStoredFields() && !context.storedFieldsContext().fieldNames().isEmpty()) {
|
||||
FieldsVisitor nestedFieldsVisitor = new CustomFieldsVisitor(storedToRequestedFields.keySet(), false);
|
||||
loadStoredFields(context.shardTarget(), context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedDocId);
|
||||
loadStoredFields(context.mapperService(), subReaderContext, nestedFieldsVisitor, nestedDocId);
|
||||
if (nestedFieldsVisitor.fields().isEmpty() == false) {
|
||||
docFields = new HashMap<>();
|
||||
metaFields = new HashMap<>();
|
||||
|
@ -478,16 +485,11 @@ public class FetchPhase {
|
|||
return nestedIdentity;
|
||||
}
|
||||
|
||||
private void loadStoredFields(SearchShardTarget shardTarget,
|
||||
MapperService mapperService,
|
||||
private void loadStoredFields(MapperService mapperService,
|
||||
LeafReaderContext readerContext,
|
||||
FieldsVisitor fieldVisitor, int docId) {
|
||||
FieldsVisitor fieldVisitor, int docId) throws IOException {
|
||||
fieldVisitor.reset();
|
||||
try {
|
||||
readerContext.reader().document(docId, fieldVisitor);
|
||||
} catch (IOException e) {
|
||||
throw new FetchPhaseExecutionException(shardTarget, "Failed to fetch doc id [" + docId + "]", e);
|
||||
}
|
||||
readerContext.reader().document(docId, fieldVisitor);
|
||||
fieldVisitor.postProcess(mapperService);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.elasticsearch.search.fetch.subphase;
|
|||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -45,21 +44,15 @@ public final class ExplainPhase implements FetchSubPhase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
try {
|
||||
final int topLevelDocId = hitContext.hit().docId();
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
final int topLevelDocId = hitContext.hit().docId();
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
|
||||
for (RescoreContext rescore : context.rescore()) {
|
||||
explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation);
|
||||
}
|
||||
// we use the top level doc id, since we work with the top level searcher
|
||||
hitContext.hit().explanation(explanation);
|
||||
}
|
||||
catch (IOException e) { // TODO move this try-catch up into FetchPhase
|
||||
throw new FetchPhaseExecutionException(context.shardTarget(),
|
||||
"Failed to explain doc [" + hitContext.hit().getId() + "]", e);
|
||||
for (RescoreContext rescore : context.rescore()) {
|
||||
explanation = rescore.rescorer().explain(topLevelDocId, context.searcher(), rescore, explanation);
|
||||
}
|
||||
// we use the top level doc id, since we work with the top level searcher
|
||||
hitContext.hit().explanation(explanation);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.search.vectorhighlight.BoundaryScanner;
|
|||
import org.apache.lucene.search.vectorhighlight.BreakIteratorBoundaryScanner;
|
||||
import org.apache.lucene.search.vectorhighlight.CustomFieldQuery;
|
||||
import org.apache.lucene.search.vectorhighlight.FieldFragList;
|
||||
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo;
|
||||
import org.apache.lucene.search.vectorhighlight.FieldQuery;
|
||||
import org.apache.lucene.search.vectorhighlight.FragListBuilder;
|
||||
import org.apache.lucene.search.vectorhighlight.FragmentsBuilder;
|
||||
|
@ -39,11 +38,11 @@ import org.elasticsearch.common.text.Text;
|
|||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.TextSearchInfo;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.Field;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchHighlightContext.FieldOptions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.BreakIterator;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -68,7 +67,7 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) {
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
|
||||
SearchHighlightContext.Field field = fieldContext.field;
|
||||
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
|
||||
MappedFieldType fieldType = fieldContext.fieldType;
|
||||
|
@ -87,126 +86,119 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
hitContext.cache().put(CACHE_KEY, new HighlighterEntry());
|
||||
}
|
||||
HighlighterEntry cache = (HighlighterEntry) hitContext.cache().get(CACHE_KEY);
|
||||
FieldHighlightEntry entry = cache.fields.get(fieldType);
|
||||
if (entry == null) {
|
||||
FragListBuilder fragListBuilder;
|
||||
BaseFragmentsBuilder fragmentsBuilder;
|
||||
|
||||
try {
|
||||
FieldHighlightEntry entry = cache.fields.get(fieldType);
|
||||
if (entry == null) {
|
||||
FragListBuilder fragListBuilder;
|
||||
BaseFragmentsBuilder fragmentsBuilder;
|
||||
|
||||
final BoundaryScanner boundaryScanner = getBoundaryScanner(field);
|
||||
if (field.fieldOptions().numberOfFragments() == 0) {
|
||||
fragListBuilder = new SingleFragListBuilder();
|
||||
final BoundaryScanner boundaryScanner = getBoundaryScanner(field);
|
||||
if (field.fieldOptions().numberOfFragments() == 0) {
|
||||
fragListBuilder = new SingleFragListBuilder();
|
||||
|
||||
if (!forceSource && tsi.isStored()) {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(fieldType, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ?
|
||||
new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
|
||||
if (field.fieldOptions().scoreOrdered()) {
|
||||
if (!forceSource && tsi.isStored()) {
|
||||
fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
if (!forceSource && tsi.isStored()) {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(fieldType, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceSimpleFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
fragmentsBuilder =
|
||||
new SourceSimpleFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
fragListBuilder = field.fieldOptions().fragmentOffset() == -1 ?
|
||||
new SimpleFragListBuilder() : new SimpleFragListBuilder(field.fieldOptions().fragmentOffset());
|
||||
if (field.fieldOptions().scoreOrdered()) {
|
||||
if (!forceSource && tsi.isStored()) {
|
||||
fragmentsBuilder = new ScoreOrderFragmentsBuilder(field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder = new SourceScoreOrderFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
} else {
|
||||
if (!forceSource && tsi.isStored()) {
|
||||
fragmentsBuilder = new SimpleFragmentsBuilder(fieldType, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), boundaryScanner);
|
||||
} else {
|
||||
fragmentsBuilder =
|
||||
new SourceSimpleFragmentsBuilder(fieldType, hitContext.sourceLookup(),
|
||||
field.fieldOptions().preTags(), field.fieldOptions().postTags(), boundaryScanner);
|
||||
}
|
||||
}
|
||||
}
|
||||
fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
|
||||
entry = new FieldHighlightEntry();
|
||||
if (field.fieldOptions().requireFieldMatch()) {
|
||||
/**
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
entry.fieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
} else {
|
||||
/**
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
entry.noFieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
}
|
||||
entry.fragListBuilder = fragListBuilder;
|
||||
entry.fragmentsBuilder = fragmentsBuilder;
|
||||
if (cache.fvh == null) {
|
||||
// parameters to FVH are not requires since:
|
||||
// first two booleans are not relevant since they are set on the CustomFieldQuery
|
||||
// (phrase and fieldMatch) fragment builders are used explicitly
|
||||
cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
|
||||
}
|
||||
CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
|
||||
cache.fields.put(fieldType, entry);
|
||||
}
|
||||
final FieldQuery fieldQuery;
|
||||
fragmentsBuilder.setDiscreteMultiValueHighlighting(termVectorMultiValue);
|
||||
entry = new FieldHighlightEntry();
|
||||
if (field.fieldOptions().requireFieldMatch()) {
|
||||
fieldQuery = entry.fieldMatchFieldQuery;
|
||||
/*
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
entry.fieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
} else {
|
||||
fieldQuery = entry.noFieldMatchFieldQuery;
|
||||
/*
|
||||
* we use top level reader to rewrite the query against all readers,
|
||||
* with use caching it across hits (and across readers...)
|
||||
*/
|
||||
entry.noFieldMatchFieldQuery = new CustomFieldQuery(fieldContext.query,
|
||||
hitContext.topLevelReader(), true, field.fieldOptions().requireFieldMatch());
|
||||
}
|
||||
cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit());
|
||||
|
||||
String[] fragments;
|
||||
|
||||
// a HACK to make highlighter do highlighting, even though its using the single frag list builder
|
||||
int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ?
|
||||
Integer.MAX_VALUE : field.fieldOptions().numberOfFragments();
|
||||
int fragmentCharSize = field.fieldOptions().numberOfFragments() == 0 ?
|
||||
Integer.MAX_VALUE : field.fieldOptions().fragmentCharSize();
|
||||
// we highlight against the low level reader and docId, because if we load source, we want to reuse it if possible
|
||||
// Only send matched fields if they were requested to save time.
|
||||
if (field.fieldOptions().matchedFields() != null && !field.fieldOptions().matchedFields().isEmpty()) {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), field.fieldOptions().matchedFields(), fragmentCharSize,
|
||||
numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
} else {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), fragmentCharSize, numberOfFragments, entry.fragListBuilder,
|
||||
entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
|
||||
entry.fragListBuilder = fragListBuilder;
|
||||
entry.fragmentsBuilder = fragmentsBuilder;
|
||||
if (cache.fvh == null) {
|
||||
// parameters to FVH are not requires since:
|
||||
// first two booleans are not relevant since they are set on the CustomFieldQuery
|
||||
// (phrase and fieldMatch) fragment builders are used explicitly
|
||||
cache.fvh = new org.apache.lucene.search.vectorhighlight.FastVectorHighlighter();
|
||||
}
|
||||
CustomFieldQuery.highlightFilters.set(field.fieldOptions().highlightFilter());
|
||||
cache.fields.put(fieldType, entry);
|
||||
}
|
||||
final FieldQuery fieldQuery;
|
||||
if (field.fieldOptions().requireFieldMatch()) {
|
||||
fieldQuery = entry.fieldMatchFieldQuery;
|
||||
} else {
|
||||
fieldQuery = entry.noFieldMatchFieldQuery;
|
||||
}
|
||||
cache.fvh.setPhraseLimit(field.fieldOptions().phraseLimit());
|
||||
|
||||
String[] fragments;
|
||||
|
||||
// a HACK to make highlighter do highlighting, even though its using the single frag list builder
|
||||
int numberOfFragments = field.fieldOptions().numberOfFragments() == 0 ?
|
||||
Integer.MAX_VALUE : field.fieldOptions().numberOfFragments();
|
||||
int fragmentCharSize = field.fieldOptions().numberOfFragments() == 0 ?
|
||||
Integer.MAX_VALUE : field.fieldOptions().fragmentCharSize();
|
||||
// we highlight against the low level reader and docId, because if we load source, we want to reuse it if possible
|
||||
// Only send matched fields if they were requested to save time.
|
||||
if (field.fieldOptions().matchedFields() != null && !field.fieldOptions().matchedFields().isEmpty()) {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), field.fieldOptions().matchedFields(), fragmentCharSize,
|
||||
numberOfFragments, entry.fragListBuilder, entry.fragmentsBuilder, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
} else {
|
||||
fragments = cache.fvh.getBestFragments(fieldQuery, hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), fragmentCharSize, numberOfFragments, entry.fragListBuilder,
|
||||
entry.fragmentsBuilder, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
|
||||
}
|
||||
|
||||
if (CollectionUtils.isEmpty(fragments) == false) {
|
||||
return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
|
||||
}
|
||||
|
||||
int noMatchSize = fieldContext.field.fieldOptions().noMatchSize();
|
||||
if (noMatchSize > 0) {
|
||||
// Essentially we just request that a fragment is built from 0 to noMatchSize using
|
||||
// the normal fragmentsBuilder
|
||||
FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
|
||||
fieldFragList.add(0, noMatchSize, Collections.emptyList());
|
||||
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), fieldFragList, 1, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
if (CollectionUtils.isEmpty(fragments) == false) {
|
||||
return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
|
||||
}
|
||||
|
||||
int noMatchSize = fieldContext.field.fieldOptions().noMatchSize();
|
||||
if (noMatchSize > 0) {
|
||||
// Essentially we just request that a fragment is built from 0 to noMatchSize using
|
||||
// the normal fragmentsBuilder
|
||||
FieldFragList fieldFragList = new SimpleFieldFragList(-1 /*ignored*/);
|
||||
fieldFragList.add(0, noMatchSize, Collections.<WeightedPhraseInfo>emptyList());
|
||||
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(),
|
||||
fieldType.name(), fieldFragList, 1, field.fieldOptions().preTags(),
|
||||
field.fieldOptions().postTags(), encoder);
|
||||
if (CollectionUtils.isEmpty(fragments) == false) {
|
||||
return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
|
||||
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -244,14 +236,14 @@ public class FastVectorHighlighter implements Highlighter {
|
|||
}
|
||||
}
|
||||
|
||||
private class FieldHighlightEntry {
|
||||
private static class FieldHighlightEntry {
|
||||
public FragListBuilder fragListBuilder;
|
||||
public FragmentsBuilder fragmentsBuilder;
|
||||
public FieldQuery noFieldMatchFieldQuery;
|
||||
public FieldQuery fieldMatchFieldQuery;
|
||||
}
|
||||
|
||||
private class HighlighterEntry {
|
||||
private static class HighlighterEntry {
|
||||
public org.apache.lucene.search.vectorhighlight.FastVectorHighlighter fvh;
|
||||
public Map<MappedFieldType, FieldHighlightEntry> fields = new HashMap<>();
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.search.fetch.FetchSubPhase;
|
|||
import org.elasticsearch.search.fetch.FetchSubPhaseProcessor;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -65,7 +66,7 @@ public class HighlightPhase implements FetchSubPhase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void process(HitContext hitContext) {
|
||||
public void process(HitContext hitContext) throws IOException {
|
||||
Map<String, HighlightField> highlightFields = new HashMap<>();
|
||||
for (String field : contextBuilders.keySet()) {
|
||||
FieldHighlightContext fieldContext = contextBuilders.get(field).apply(hitContext);
|
||||
|
|
|
@ -20,12 +20,14 @@ package org.elasticsearch.search.fetch.subphase.highlight;
|
|||
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Highlights a search result.
|
||||
*/
|
||||
public interface Highlighter {
|
||||
|
||||
HighlightField highlight(FieldHighlightContext fieldContext);
|
||||
HighlightField highlight(FieldHighlightContext fieldContext) throws IOException;
|
||||
|
||||
boolean canHighlight(MappedFieldType fieldType);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
|||
import org.apache.lucene.search.highlight.Encoder;
|
||||
import org.apache.lucene.search.highlight.Formatter;
|
||||
import org.apache.lucene.search.highlight.Fragmenter;
|
||||
import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
|
||||
import org.apache.lucene.search.highlight.NullFragmenter;
|
||||
import org.apache.lucene.search.highlight.QueryScorer;
|
||||
import org.apache.lucene.search.highlight.SimpleFragmenter;
|
||||
|
@ -33,18 +34,15 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
|
|||
import org.apache.lucene.search.highlight.TextFragment;
|
||||
import org.apache.lucene.util.BytesRefHash;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -55,7 +53,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
private static final String CACHE_KEY = "highlight-plain";
|
||||
|
||||
@Override
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) {
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
|
||||
SearchHighlightContext.Field field = fieldContext.field;
|
||||
QueryShardContext context = fieldContext.context;
|
||||
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
|
||||
|
@ -111,56 +109,47 @@ public class PlainHighlighter implements Highlighter {
|
|||
};
|
||||
final int maxAnalyzedOffset = context.getIndexSettings().getHighlightMaxAnalyzedOffset();
|
||||
|
||||
try {
|
||||
textsToHighlight = HighlightUtils.loadFieldValues(fieldType, hitContext, fieldContext.forceSource);
|
||||
textsToHighlight = HighlightUtils.loadFieldValues(fieldType, hitContext, fieldContext.forceSource);
|
||||
|
||||
for (Object textToHighlight : textsToHighlight) {
|
||||
String text = convertFieldValue(fieldType, textToHighlight);
|
||||
int textLength = text.length();
|
||||
if (keywordIgnoreAbove != null && textLength > keywordIgnoreAbove) {
|
||||
continue; // skip highlighting keyword terms that were ignored during indexing
|
||||
}
|
||||
if (textLength > maxAnalyzedOffset) {
|
||||
throw new IllegalArgumentException(
|
||||
"The length of [" + fieldContext.fieldName + "] field of [" + hitContext.hit().getId() +
|
||||
"] doc of [" + context.index().getName() + "] index " +
|
||||
"has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
|
||||
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
|
||||
"] index level setting. " + "For large texts, indexing with offsets or term vectors, and highlighting " +
|
||||
"with unified or fvh highlighter is recommended!");
|
||||
}
|
||||
|
||||
try (TokenStream tokenStream = analyzer.tokenStream(fieldType.name(), text)) {
|
||||
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
|
||||
// can't perform highlighting if the stream has no terms (binary token stream) or no offsets
|
||||
continue;
|
||||
}
|
||||
TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
|
||||
for (TextFragment bestTextFragment : bestTextFragments) {
|
||||
if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
|
||||
fragsList.add(bestTextFragment);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Object textToHighlight : textsToHighlight) {
|
||||
String text = convertFieldValue(fieldType, textToHighlight);
|
||||
int textLength = text.length();
|
||||
if (keywordIgnoreAbove != null && textLength > keywordIgnoreAbove) {
|
||||
continue; // skip highlighting keyword terms that were ignored during indexing
|
||||
}
|
||||
} catch (Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, BytesRefHash.MaxBytesLengthExceededException.class) != null) {
|
||||
if (textLength > maxAnalyzedOffset) {
|
||||
throw new IllegalArgumentException(
|
||||
"The length of [" + fieldContext.fieldName + "] field of [" + hitContext.hit().getId() +
|
||||
"] doc of [" + context.index().getName() + "] index " +
|
||||
"has exceeded [" + maxAnalyzedOffset + "] - maximum allowed to be analyzed for highlighting. " +
|
||||
"This maximum can be set by changing the [" + IndexSettings.MAX_ANALYZED_OFFSET_SETTING.getKey() +
|
||||
"] index level setting. " + "For large texts, indexing with offsets or term vectors, and highlighting " +
|
||||
"with unified or fvh highlighter is recommended!");
|
||||
}
|
||||
|
||||
try (TokenStream tokenStream = analyzer.tokenStream(fieldType.name(), text)) {
|
||||
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
|
||||
// can't perform highlighting if the stream has no terms (binary token stream) or no offsets
|
||||
continue;
|
||||
}
|
||||
TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
|
||||
for (TextFragment bestTextFragment : bestTextFragments) {
|
||||
if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
|
||||
fragsList.add(bestTextFragment);
|
||||
}
|
||||
}
|
||||
} catch (BytesRefHash.MaxBytesLengthExceededException e) {
|
||||
// this can happen if for example a field is not_analyzed and ignore_above option is set.
|
||||
// the field will be ignored when indexing but the huge term is still in the source and
|
||||
// the plain highlighter will parse the source and try to analyze it.
|
||||
return null;
|
||||
} else {
|
||||
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
|
||||
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
|
||||
// ignore and continue to the next value
|
||||
} catch (InvalidTokenOffsetsException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
|
||||
if (field.fieldOptions().scoreOrdered()) {
|
||||
CollectionUtil.introSort(fragsList, new Comparator<TextFragment>() {
|
||||
@Override
|
||||
public int compare(TextFragment o1, TextFragment o2) {
|
||||
return Math.round(o2.getScore() - o1.getScore());
|
||||
}
|
||||
});
|
||||
CollectionUtil.introSort(fragsList, (o1, o2) -> Math.round(o2.getScore() - o1.getScore()));
|
||||
}
|
||||
String[] fragments;
|
||||
// number_of_fragments is set to 0 but we have a multivalued field
|
||||
|
@ -171,7 +160,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
}
|
||||
} else {
|
||||
// refine numberOfFragments if needed
|
||||
numberOfFragments = fragsList.size() < numberOfFragments ? fragsList.size() : numberOfFragments;
|
||||
numberOfFragments = Math.min(fragsList.size(), numberOfFragments);
|
||||
fragments = new String[numberOfFragments];
|
||||
for (int i = 0; i < fragments.length; i++) {
|
||||
fragments[i] = fragsList.get(i).toString();
|
||||
|
@ -186,13 +175,7 @@ public class PlainHighlighter implements Highlighter {
|
|||
if (noMatchSize > 0 && textsToHighlight.size() > 0) {
|
||||
// Pull an excerpt from the beginning of the string but make sure to split the string on a term boundary.
|
||||
String fieldContents = textsToHighlight.get(0).toString();
|
||||
int end;
|
||||
try {
|
||||
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, fieldType.name(), fieldContents);
|
||||
} catch (Exception e) {
|
||||
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
|
||||
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
|
||||
}
|
||||
int end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, fieldType.name(), fieldContents);
|
||||
if (end > 0) {
|
||||
return new HighlightField(fieldContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
|
||||
}
|
||||
|
|
|
@ -61,66 +61,14 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) {
|
||||
public HighlightField highlight(FieldHighlightContext fieldContext) throws IOException {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, CustomUnifiedHighlighter> cache = (Map<String, CustomUnifiedHighlighter>) fieldContext.hitContext.cache()
|
||||
.computeIfAbsent(UnifiedHighlighter.class.getName(), k -> new HashMap<>());
|
||||
CustomUnifiedHighlighter highlighter = (CustomUnifiedHighlighter) cache.computeIfAbsent(fieldContext.fieldName, f -> {
|
||||
Encoder encoder = fieldContext.field.fieldOptions().encoder().equals("html")
|
||||
? HighlightUtils.Encoders.HTML
|
||||
: HighlightUtils.Encoders.DEFAULT;
|
||||
int maxAnalyzedOffset = fieldContext.context.getIndexSettings().getHighlightMaxAnalyzedOffset();
|
||||
int keywordIgnoreAbove = Integer.MAX_VALUE;
|
||||
if (fieldContext.fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
|
||||
KeywordFieldMapper mapper = (KeywordFieldMapper) fieldContext.context.getMapperService().documentMapper()
|
||||
.mappers().getMapper(fieldContext.fieldName);
|
||||
keywordIgnoreAbove = mapper.ignoreAbove();
|
||||
}
|
||||
int numberOfFragments = fieldContext.field.fieldOptions().numberOfFragments();
|
||||
Analyzer analyzer = getAnalyzer(fieldContext.context.getMapperService().documentMapper());
|
||||
PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder);
|
||||
IndexSearcher searcher = fieldContext.context.searcher();
|
||||
OffsetSource offsetSource = getOffsetSource(fieldContext.fieldType);
|
||||
BreakIterator breakIterator;
|
||||
int higlighterNumberOfFragments;
|
||||
if (numberOfFragments == 0
|
||||
// non-tokenized fields should not use any break iterator (ignore boundaryScannerType)
|
||||
|| fieldContext.fieldType.getTextSearchInfo().isTokenized() == false) {
|
||||
/*
|
||||
* We use a control char to separate values, which is the
|
||||
* only char that the custom break iterator breaks the text
|
||||
* on, so we don't lose the distinction between the different
|
||||
* values of a field and we get back a snippet per value
|
||||
*/
|
||||
breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
higlighterNumberOfFragments = numberOfFragments == 0 ? Integer.MAX_VALUE - 1 : numberOfFragments;
|
||||
} else {
|
||||
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
|
||||
breakIterator = getBreakIterator(fieldContext.field);
|
||||
higlighterNumberOfFragments = numberOfFragments;
|
||||
}
|
||||
try {
|
||||
return new CustomUnifiedHighlighter(
|
||||
searcher,
|
||||
analyzer,
|
||||
offsetSource,
|
||||
passageFormatter,
|
||||
fieldContext.field.fieldOptions().boundaryScannerLocale(),
|
||||
breakIterator,
|
||||
fieldContext.context.getFullyQualifiedIndex().getName(),
|
||||
fieldContext.fieldName,
|
||||
fieldContext.query,
|
||||
fieldContext.field.fieldOptions().noMatchSize(),
|
||||
higlighterNumberOfFragments,
|
||||
fieldMatcher(fieldContext),
|
||||
keywordIgnoreAbove,
|
||||
maxAnalyzedOffset
|
||||
);
|
||||
} catch (IOException e) {
|
||||
throw new FetchPhaseExecutionException(fieldContext.shardTarget,
|
||||
"Failed to highlight field [" + fieldContext.fieldName + "]", e);
|
||||
}
|
||||
});
|
||||
if (cache.containsKey(fieldContext.fieldName) == false) {
|
||||
cache.put(fieldContext.fieldName, buildHighlighter(fieldContext));
|
||||
}
|
||||
CustomUnifiedHighlighter highlighter = cache.get(fieldContext.fieldName);
|
||||
MappedFieldType fieldType = fieldContext.fieldType;
|
||||
SearchHighlightContext.Field field = fieldContext.field;
|
||||
FetchSubPhase.HitContext hitContext = fieldContext.hitContext;
|
||||
|
@ -166,10 +114,61 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
return new HighlightField(fieldContext.fieldName, Text.convertFromStringArray(fragments));
|
||||
}
|
||||
|
||||
CustomUnifiedHighlighter buildHighlighter(FieldHighlightContext fieldContext) throws IOException {
|
||||
Encoder encoder = fieldContext.field.fieldOptions().encoder().equals("html")
|
||||
? HighlightUtils.Encoders.HTML
|
||||
: HighlightUtils.Encoders.DEFAULT;
|
||||
int maxAnalyzedOffset = fieldContext.context.getIndexSettings().getHighlightMaxAnalyzedOffset();
|
||||
int keywordIgnoreAbove = Integer.MAX_VALUE;
|
||||
if (fieldContext.fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
|
||||
KeywordFieldMapper mapper = (KeywordFieldMapper) fieldContext.context.getMapperService().documentMapper()
|
||||
.mappers().getMapper(fieldContext.fieldName);
|
||||
keywordIgnoreAbove = mapper.ignoreAbove();
|
||||
}
|
||||
int numberOfFragments = fieldContext.field.fieldOptions().numberOfFragments();
|
||||
Analyzer analyzer = getAnalyzer(fieldContext.context.getMapperService().documentMapper());
|
||||
PassageFormatter passageFormatter = getPassageFormatter(fieldContext.hitContext, fieldContext.field, encoder);
|
||||
IndexSearcher searcher = fieldContext.context.searcher();
|
||||
OffsetSource offsetSource = getOffsetSource(fieldContext.fieldType);
|
||||
BreakIterator breakIterator;
|
||||
int higlighterNumberOfFragments;
|
||||
if (numberOfFragments == 0
|
||||
// non-tokenized fields should not use any break iterator (ignore boundaryScannerType)
|
||||
|| fieldContext.fieldType.getTextSearchInfo().isTokenized() == false) {
|
||||
/*
|
||||
* We use a control char to separate values, which is the
|
||||
* only char that the custom break iterator breaks the text
|
||||
* on, so we don't lose the distinction between the different
|
||||
* values of a field and we get back a snippet per value
|
||||
*/
|
||||
breakIterator = new CustomSeparatorBreakIterator(MULTIVAL_SEP_CHAR);
|
||||
higlighterNumberOfFragments = numberOfFragments == 0 ? Integer.MAX_VALUE - 1 : numberOfFragments;
|
||||
} else {
|
||||
//using paragraph separator we make sure that each field value holds a discrete passage for highlighting
|
||||
breakIterator = getBreakIterator(fieldContext.field);
|
||||
higlighterNumberOfFragments = numberOfFragments;
|
||||
}
|
||||
return new CustomUnifiedHighlighter(
|
||||
searcher,
|
||||
analyzer,
|
||||
offsetSource,
|
||||
passageFormatter,
|
||||
fieldContext.field.fieldOptions().boundaryScannerLocale(),
|
||||
breakIterator,
|
||||
fieldContext.context.getFullyQualifiedIndex().getName(),
|
||||
fieldContext.fieldName,
|
||||
fieldContext.query,
|
||||
fieldContext.field.fieldOptions().noMatchSize(),
|
||||
higlighterNumberOfFragments,
|
||||
fieldMatcher(fieldContext),
|
||||
keywordIgnoreAbove,
|
||||
maxAnalyzedOffset
|
||||
);
|
||||
}
|
||||
|
||||
protected PassageFormatter getPassageFormatter(HitContext hitContext, SearchHighlightContext.Field field, Encoder encoder) {
|
||||
CustomPassageFormatter passageFormatter = new CustomPassageFormatter(field.fieldOptions().preTags()[0],
|
||||
return new CustomPassageFormatter(field.fieldOptions().preTags()[0],
|
||||
field.fieldOptions().postTags()[0], encoder);
|
||||
return passageFormatter;
|
||||
}
|
||||
|
||||
|
||||
|
@ -244,7 +243,7 @@ public class UnifiedHighlighter implements Highlighter {
|
|||
private Predicate<String> fieldMatcher(FieldHighlightContext fieldContext) {
|
||||
if (fieldContext.field.fieldOptions().requireFieldMatch()) {
|
||||
String fieldName = fieldContext.fieldName;
|
||||
return name -> fieldName.equals(name);
|
||||
return fieldName::equals;
|
||||
}
|
||||
// ignore terms that targets the _id field since they use a different encoding
|
||||
// that is not compatible with utf8
|
||||
|
|
Loading…
Reference in New Issue