diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index f4fa8ba5c9b..e7e3e28bb3a 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -126,6 +126,8 @@ Bug Fixes
other ranges had more than one clause matching (Ahmet Arslan,
hossman, Mike McCandless)
+* LUCENE-7286: Added support for highlighting SynonymQuery. (Adrien Grand)
+
Other
* LUCENE-7295: TermAutomatonQuery.hashCode calculates Automaton.toDot().hash,
diff --git a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
index 43f75ccb335..7507bdde6e8 100644
--- a/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
+++ b/lucene/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
@@ -53,6 +53,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
+import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.join.ToChildBlockJoinQuery;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
@@ -138,7 +139,7 @@ public class WeightedSpanTermExtractor {
SpanNearQuery sp = new SpanNearQuery(clauses, phraseQuery.getSlop() + positionGaps, inorder);
extractWeightedSpanTerms(terms, sp, boost);
}
- } else if (query instanceof TermQuery) {
+ } else if (query instanceof TermQuery || query instanceof SynonymQuery) {
extractWeightedTerms(terms, query, boost);
} else if (query instanceof SpanQuery) {
extractWeightedSpanTerms(terms, (SpanQuery) query, boost);
diff --git a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
index 3c6f1086d7e..cf727d7154a 100644
--- a/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
+++ b/lucene/highlighter/src/test/org/apache/lucene/search/highlight/HighlighterTest.java
@@ -71,6 +71,7 @@ import org.apache.lucene.search.PhraseQuery.Builder;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RegexpQuery;
+import org.apache.lucene.search.SynonymQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
@@ -223,6 +224,24 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
assertEquals("This piece of text refers to Kennedy at the beginning then has a longer piece of text that is very", fragment);
}
+ public void testHighlightingSynonymQuery() throws Exception {
+ searcher = newSearcher(reader);
+ Query query = new SynonymQuery(new Term(FIELD_NAME, "jfk"), new Term(FIELD_NAME, "kennedy"));
+ QueryScorer scorer = new QueryScorer(query, FIELD_NAME);
+ Highlighter highlighter = new Highlighter(scorer);
+ TokenStream stream = getAnyTokenStream(FIELD_NAME, 2);
+ Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);
+ highlighter.setTextFragmenter(fragmenter);
+ String storedField = searcher.doc(2).get(FIELD_NAME);
+ String fragment = highlighter.getBestFragment(stream, storedField);
+ assertEquals("JFK has been shot", fragment);
+
+ stream = getAnyTokenStream(FIELD_NAME, 3);
+ storedField = searcher.doc(3).get(FIELD_NAME);
+ fragment = highlighter.getBestFragment(stream, storedField);
+ assertEquals("John Kennedy has been shot", fragment);
+ }
+
public void testHighlightUnknownQueryAfterRewrite() throws IOException, InvalidTokenOffsetsException {
Query query = new Query() {
@@ -2093,7 +2112,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
ramDir = newDirectory();
fieldType = random().nextBoolean() ? FIELD_TYPE_TV : TextField.TYPE_STORED;
- IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(analyzer));
+ IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()));
for (String text : texts) {
writer.addDocument(doc(FIELD_NAME, text));