LUCENE-8874: Show SPI names instead of class names in Luke Analysis tab.

This commit is contained in:
Tomoko Uchida 2019-06-30 13:27:24 +09:00
parent 7ac77ce441
commit 299bfe55b3
4 changed files with 15 additions and 9 deletions

View File

@ -136,6 +136,8 @@ Improvements
* LUCENE-8793: Luke enhanced UI for CustomAnalyzer: show detailed analysis steps. (Jun Ohtani via Tomoko Uchida)
* LUCENE-8874: Show SPI names instead of class names in Luke Analysis tab. (Tomoko Uchida)
* LUCENE-8855: Add Accountable to some Query implementations (ab, Adrien Grand)
* LUCENE-8894: Add APIs to find SPI names for Tokenizer/CharFilter/TokenFilter factory classes. (Tomoko Uchida)

View File

@ -37,6 +37,9 @@ import java.awt.Window;
import java.io.IOException;
import org.apache.lucene.analysis.custom.CustomAnalyzer;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.luke.app.desktop.Preferences;
import org.apache.lucene.luke.app.desktop.PreferencesFactory;
import org.apache.lucene.luke.app.desktop.util.DialogOpener;
@ -109,7 +112,7 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory {
c.weighty = 0.5;
panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.charfilters")), c);
String[] charFilters = analyzer.getCharFilterFactories().stream().map(f -> f.getClass().getName()).toArray(String[]::new);
String[] charFilters = analyzer.getCharFilterFactories().stream().map(f -> CharFilterFactory.findSPIName(f.getClass())).toArray(String[]::new);
JList<String> charFilterList = new JList<>(charFilters);
charFilterList.setVisibleRowCount(charFilters.length == 0 ? 1 : Math.min(charFilters.length, 5));
c.gridx = 1;
@ -124,7 +127,7 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory {
c.weighty = 0.1;
panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenizer")), c);
String tokenizer = analyzer.getTokenizerFactory().getClass().getName();
String tokenizer = TokenizerFactory.findSPIName(analyzer.getTokenizerFactory().getClass());
JTextField tokenizerTF = new JTextField(tokenizer);
tokenizerTF.setColumns(30);
tokenizerTF.setEditable(false);
@ -142,7 +145,7 @@ public class AnalysisChainDialogFactory implements DialogOpener.DialogFactory {
c.weighty = 0.5;
panel.add(new JLabel(MessageUtils.getLocalizedMessage("analysis.dialog.chain.label.tokenfilters")), c);
String[] tokenFilters = analyzer.getTokenFilterFactories().stream().map(f -> f.getClass().getName()).toArray(String[]::new);
String[] tokenFilters = analyzer.getTokenFilterFactories().stream().map(f -> TokenFilterFactory.findSPIName(f.getClass())).toArray(String[]::new);
JList<String> tokenFilterList = new JList<>(tokenFilters);
tokenFilterList.setVisibleRowCount(tokenFilters.length == 0 ? 1 : Math.min(tokenFilters.length, 5));
tokenFilterList.setMinimumSize(new Dimension(300, 25));

View File

@ -246,7 +246,7 @@ public final class AnalysisImpl implements Analysis {
Reader readerForWriteOut = new StringReader(charFilteredSource);
readerForWriteOut = charFilterFactory.create(readerForWriteOut);
charFilteredSource = writeCharStream(readerForWriteOut);
charfilteredTexts.add(new CharfilteredText(readerForWriteOut.getClass().getName(), charFilteredSource));
charfilteredTexts.add(new CharfilteredText(CharFilterFactory.findSPIName(charFilterFactory.getClass()), charFilteredSource));
}
reader = cs;
}
@ -258,13 +258,14 @@ public final class AnalysisImpl implements Analysis {
((Tokenizer)tokenStream).setReader(reader);
List<Token> tokens = new ArrayList<>();
List<AttributeSource> attributeSources = analyzeTokenStream(tokenStream, tokens);
namedTokens.add(new NamedTokens(tokenStream.getClass().getName(), tokens));
namedTokens.add(new NamedTokens(TokenizerFactory.findSPIName(tokenizerFactory.getClass()), tokens));
ListBasedTokenStream listBasedTokenStream = new ListBasedTokenStream(tokenStream, attributeSources);
for (TokenFilterFactory tokenFilterFactory : tokenFilterFactories) {
tokenStream = tokenFilterFactory.create(listBasedTokenStream);
tokens = new ArrayList<>();
attributeSources = analyzeTokenStream(tokenStream, tokens);
namedTokens.add(new NamedTokens(tokenStream.getClass().getName(), tokens));
namedTokens.add(new NamedTokens(TokenFilterFactory.findSPIName(tokenFilterFactory.getClass()), tokens));
try {
listBasedTokenStream.close();
} catch (IOException e) {

View File

@ -165,12 +165,12 @@ public class AnalysisImplTest extends LuceneTestCase {
assertNotNull(result);
assertNotNull(result.getCharfilteredTexts());
assertEquals(1,result.getCharfilteredTexts().size());
assertEquals("org.apache.lucene.analysis.charfilter.HTMLStripCharFilter", result.getCharfilteredTexts().get(0).getName());
assertEquals("htmlStrip", result.getCharfilteredTexts().get(0).getName());
assertNotNull(result.getNamedTokens());
assertEquals(2, result.getNamedTokens().size());
//FIXME check each namedTokensList
assertEquals("org.apache.lucene.analysis.core.KeywordTokenizer", result.getNamedTokens().get(0).getName());
assertEquals("org.apache.lucene.analysis.core.LowerCaseFilter", result.getNamedTokens().get(1).getName());
assertEquals("keyword", result.getNamedTokens().get(0).getName());
assertEquals("lowercase", result.getNamedTokens().get(1).getName());
}
}