mirror of https://github.com/apache/lucene.git
LUCENE-4255: clean up analysis factory exception handling
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1365865 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
64dcd8855d
commit
ca736cea8e
|
@ -49,30 +49,25 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
|
|||
protected NormalizeCharMap normMap;
|
||||
private String mapping;
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
mapping = args.get( "mapping" );
|
||||
// TODO: this should use inputstreams from the loader, not File!
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
mapping = args.get("mapping");
|
||||
|
||||
if( mapping != null ){
|
||||
if (mapping != null) {
|
||||
List<String> wlist = null;
|
||||
try{
|
||||
File mappingFile = new File( mapping );
|
||||
if( mappingFile.exists() ){
|
||||
wlist = loader.getLines( mapping );
|
||||
File mappingFile = new File(mapping);
|
||||
if (mappingFile.exists()) {
|
||||
wlist = loader.getLines(mapping);
|
||||
} else {
|
||||
List<String> files = splitFileNames(mapping);
|
||||
wlist = new ArrayList<String>();
|
||||
for (String file : files) {
|
||||
List<String> lines = loader.getLines(file.trim());
|
||||
wlist.addAll(lines);
|
||||
}
|
||||
else{
|
||||
List<String> files = splitFileNames( mapping );
|
||||
wlist = new ArrayList<String>();
|
||||
for( String file : files ){
|
||||
List<String> lines = loader.getLines( file.trim() );
|
||||
wlist.addAll( lines );
|
||||
}
|
||||
}
|
||||
}
|
||||
catch( IOException e ){
|
||||
throw new InitializationException("IOException thrown while loading mappings", e);
|
||||
}
|
||||
final NormalizeCharMap.Builder builder = new NormalizeCharMap.Builder();
|
||||
parseRules( wlist, builder );
|
||||
parseRules(wlist, builder);
|
||||
normMap = builder.build();
|
||||
if (normMap.map == null) {
|
||||
// if the inner FST is null, it means it accepts nothing (e.g. the file is empty)
|
||||
|
@ -95,7 +90,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
|
|||
for( String rule : rules ){
|
||||
Matcher m = p.matcher( rule );
|
||||
if( !m.find() )
|
||||
throw new InitializationException("Invalid Mapping Rule : [" + rule + "], file = " + mapping);
|
||||
throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "], file = " + mapping);
|
||||
builder.add( parseString( m.group( 1 ) ), parseString( m.group( 2 ) ) );
|
||||
}
|
||||
}
|
||||
|
@ -110,7 +105,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
|
|||
char c = s.charAt( readPos++ );
|
||||
if( c == '\\' ){
|
||||
if( readPos >= len )
|
||||
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||
throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
|
||||
c = s.charAt( readPos++ );
|
||||
switch( c ) {
|
||||
case '\\' : c = '\\'; break;
|
||||
|
@ -122,7 +117,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
|
|||
case 'f' : c = '\f'; break;
|
||||
case 'u' :
|
||||
if( readPos + 3 >= len )
|
||||
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||
throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
|
||||
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
||||
readPos += 4;
|
||||
break;
|
||||
|
|
|
@ -42,19 +42,15 @@ import org.apache.lucene.analysis.util.*;
|
|||
public class CommonGramsFilterFactory extends TokenFilterFactory implements
|
||||
ResourceLoaderAware {
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String commonWordFiles = args.get("words");
|
||||
ignoreCase = getBoolean("ignoreCase", false);
|
||||
|
||||
if (commonWordFiles != null) {
|
||||
try {
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
commonWords = getSnowballWordSet(loader, commonWordFiles, ignoreCase);
|
||||
} else {
|
||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading common word file", e);
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
commonWords = getSnowballWordSet(loader, commonWordFiles, ignoreCase);
|
||||
} else {
|
||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||
}
|
||||
} else {
|
||||
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||
|
|
|
@ -50,19 +50,15 @@ public class CommonGramsQueryFilterFactory extends TokenFilterFactory
|
|||
assureMatchVersion();
|
||||
}
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String commonWordFiles = args.get("words");
|
||||
ignoreCase = getBoolean("ignoreCase", false);
|
||||
|
||||
if (commonWordFiles != null) {
|
||||
try {
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
commonWords = getSnowballWordSet(loader, commonWordFiles, ignoreCase);
|
||||
} else {
|
||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading common word file", e);
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
commonWords = getSnowballWordSet(loader, commonWordFiles, ignoreCase);
|
||||
} else {
|
||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||
}
|
||||
} else {
|
||||
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||
|
|
|
@ -48,7 +48,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends TokenFilterFactory
|
|||
assureMatchVersion();
|
||||
dictFile = args.get("dictionary");
|
||||
if (null == dictFile) {
|
||||
throw new InitializationException("Missing required parameter: dictionary");
|
||||
throw new IllegalArgumentException("Missing required parameter: dictionary");
|
||||
}
|
||||
|
||||
minWordSize= getInt("minWordSize",CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||
|
@ -56,12 +56,8 @@ public class DictionaryCompoundWordTokenFilterFactory extends TokenFilterFactory
|
|||
maxSubwordSize= getInt("maxSubwordSize",CompoundWordTokenFilterBase.DEFAULT_MAX_SUBWORD_SIZE);
|
||||
onlyLongestMatch = getBoolean("onlyLongestMatch",true);
|
||||
}
|
||||
public void inform(ResourceLoader loader) {
|
||||
try {
|
||||
dictionary = super.getWordSet(loader, dictFile, false);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading dictionary", e);
|
||||
}
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
dictionary = super.getWordSet(loader, dictFile, false);
|
||||
}
|
||||
public TokenStream create(TokenStream input) {
|
||||
// if the dictionary is null, it means it was empty
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.compound;
|
|||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
|
@ -134,7 +135,7 @@ public class HyphenationCompoundWordTokenFilter extends
|
|||
* @throws Exception
|
||||
*/
|
||||
public static HyphenationTree getHyphenationTree(String hyphenationFilename)
|
||||
throws Exception {
|
||||
throws IOException {
|
||||
return getHyphenationTree(new InputSource(hyphenationFilename));
|
||||
}
|
||||
|
||||
|
@ -146,7 +147,7 @@ public class HyphenationCompoundWordTokenFilter extends
|
|||
* @throws Exception
|
||||
*/
|
||||
public static HyphenationTree getHyphenationTree(File hyphenationFile)
|
||||
throws Exception {
|
||||
throws IOException {
|
||||
return getHyphenationTree(new InputSource(hyphenationFile.toURL().toExternalForm()));
|
||||
}
|
||||
|
||||
|
@ -158,7 +159,7 @@ public class HyphenationCompoundWordTokenFilter extends
|
|||
* @throws Exception
|
||||
*/
|
||||
public static HyphenationTree getHyphenationTree(InputSource hyphenationSource)
|
||||
throws Exception {
|
||||
throws IOException {
|
||||
HyphenationTree tree = new HyphenationTree();
|
||||
tree.loadPatterns(hyphenationSource);
|
||||
return tree;
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.util.*;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import org.xml.sax.InputSource;
|
||||
|
||||
|
@ -75,7 +76,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends TokenFilterFactor
|
|||
encoding = args.get("encoding");
|
||||
hypFile = args.get("hyphenator");
|
||||
if (null == hypFile) {
|
||||
throw new InitializationException("Missing required parameter: hyphenator");
|
||||
throw new IllegalArgumentException("Missing required parameter: hyphenator");
|
||||
}
|
||||
|
||||
minWordSize = getInt("minWordSize", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||
|
@ -84,7 +85,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends TokenFilterFactor
|
|||
onlyLongestMatch = getBoolean("onlyLongestMatch", false);
|
||||
}
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
InputStream stream = null;
|
||||
try {
|
||||
if (dictFile != null) // the dictionary can be empty.
|
||||
|
@ -96,8 +97,6 @@ public class HyphenationCompoundWordTokenFilterFactory extends TokenFilterFactor
|
|||
is.setEncoding(encoding); // if it's null let xml parser decide
|
||||
is.setSystemId(hypFile);
|
||||
hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
|
||||
} catch (Exception e) { // TODO: getHyphenationTree really shouldn't throw "Exception"
|
||||
throw new InitializationException("Exception thrown while loading dictionary and hyphenation file", e);
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(stream);
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.analysis.compound.hyphenation;
|
||||
|
||||
/**
|
||||
* This class has been taken from the Apache FOP project (http://xmlgraphics.apache.org/fop/). They have been slightly modified.
|
||||
*/
|
||||
public class HyphenationException extends Exception {
|
||||
|
||||
/**
|
||||
* @see java.lang.Throwable#Throwable(String)
|
||||
*/
|
||||
public HyphenationException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
|
||||
}
|
|
@ -18,8 +18,8 @@
|
|||
package org.apache.lucene.analysis.compound.hyphenation;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
||||
|
@ -108,25 +108,20 @@ public class HyphenationTree extends TernaryTree implements PatternConsumer {
|
|||
* Read hyphenation patterns from an XML file.
|
||||
*
|
||||
* @param f the filename
|
||||
* @throws HyphenationException In case the parsing fails
|
||||
* @throws IOException In case the parsing fails
|
||||
*/
|
||||
public void loadPatterns(File f) throws HyphenationException {
|
||||
try {
|
||||
InputSource src = new InputSource(f.toURL().toExternalForm());
|
||||
loadPatterns(src);
|
||||
} catch (MalformedURLException e) {
|
||||
throw new HyphenationException("Error converting the File '" + f
|
||||
+ "' to a URL: " + e.getMessage());
|
||||
}
|
||||
public void loadPatterns(File f) throws IOException {
|
||||
InputSource src = new InputSource(f.toURL().toExternalForm());
|
||||
loadPatterns(src);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read hyphenation patterns from an XML file.
|
||||
*
|
||||
* @param source the InputSource for the file
|
||||
* @throws HyphenationException In case the parsing fails
|
||||
* @throws IOException In case the parsing fails
|
||||
*/
|
||||
public void loadPatterns(InputSource source) throws HyphenationException {
|
||||
public void loadPatterns(InputSource source) throws IOException {
|
||||
PatternParser pp = new PatternParser(this);
|
||||
ivalues = new TernaryTree();
|
||||
|
||||
|
|
|
@ -27,9 +27,7 @@ import org.xml.sax.Attributes;
|
|||
|
||||
// Java
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.parsers.SAXParserFactory;
|
||||
|
@ -87,9 +85,9 @@ public class PatternParser extends DefaultHandler {
|
|||
* Parses a hyphenation pattern file.
|
||||
*
|
||||
* @param filename the filename
|
||||
* @throws HyphenationException In case of an exception while parsing
|
||||
* @throws IOException In case of an exception while parsing
|
||||
*/
|
||||
public void parse(String filename) throws HyphenationException {
|
||||
public void parse(String filename) throws IOException {
|
||||
parse(new InputSource(filename));
|
||||
}
|
||||
|
||||
|
@ -97,33 +95,24 @@ public class PatternParser extends DefaultHandler {
|
|||
* Parses a hyphenation pattern file.
|
||||
*
|
||||
* @param file the pattern file
|
||||
* @throws HyphenationException In case of an exception while parsing
|
||||
* @throws IOException In case of an exception while parsing
|
||||
*/
|
||||
public void parse(File file) throws HyphenationException {
|
||||
try {
|
||||
InputSource src = new InputSource(file.toURL().toExternalForm());
|
||||
parse(src);
|
||||
} catch (MalformedURLException e) {
|
||||
throw new HyphenationException("Error converting the File '" + file
|
||||
+ "' to a URL: " + e.getMessage());
|
||||
}
|
||||
public void parse(File file) throws IOException {
|
||||
InputSource src = new InputSource(file.toURL().toExternalForm());
|
||||
parse(src);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a hyphenation pattern file.
|
||||
*
|
||||
* @param source the InputSource for the file
|
||||
* @throws HyphenationException In case of an exception while parsing
|
||||
* @throws IOException In case of an exception while parsing
|
||||
*/
|
||||
public void parse(InputSource source) throws HyphenationException {
|
||||
public void parse(InputSource source) throws IOException {
|
||||
try {
|
||||
parser.parse(source);
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
throw new HyphenationException("File not found: " + fnfe.getMessage());
|
||||
} catch (IOException ioe) {
|
||||
throw new HyphenationException(ioe.getMessage());
|
||||
} catch (SAXException e) {
|
||||
throw new HyphenationException(errMsg);
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,20 +46,16 @@ public class StopFilterFactory extends TokenFilterFactory implements ResourceLoa
|
|||
}
|
||||
|
||||
@Override
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String stopWordFiles = args.get("words");
|
||||
ignoreCase = getBoolean("ignoreCase",false);
|
||||
enablePositionIncrements = getBoolean("enablePositionIncrements",false);
|
||||
|
||||
if (stopWordFiles != null) {
|
||||
try {
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
stopWords = getSnowballWordSet(loader, stopWordFiles, ignoreCase);
|
||||
} else {
|
||||
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading stopwords", e);
|
||||
if ("snowball".equalsIgnoreCase(args.get("format"))) {
|
||||
stopWords = getSnowballWordSet(loader, stopWordFiles, ignoreCase);
|
||||
} else {
|
||||
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
|
||||
}
|
||||
} else {
|
||||
stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.core;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.core.TypeTokenFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
@ -43,25 +42,21 @@ import java.util.Set;
|
|||
public class TypeTokenFilterFactory extends TokenFilterFactory implements ResourceLoaderAware {
|
||||
|
||||
@Override
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String stopTypesFiles = args.get("types");
|
||||
enablePositionIncrements = getBoolean("enablePositionIncrements", false);
|
||||
useWhitelist = getBoolean("useWhitelist", false);
|
||||
if (stopTypesFiles != null) {
|
||||
try {
|
||||
List<String> files = splitFileNames(stopTypesFiles);
|
||||
if (files.size() > 0) {
|
||||
stopTypes = new HashSet<String>();
|
||||
for (String file : files) {
|
||||
List<String> typesLines = loader.getLines(file.trim());
|
||||
stopTypes.addAll(typesLines);
|
||||
}
|
||||
List<String> files = splitFileNames(stopTypesFiles);
|
||||
if (files.size() > 0) {
|
||||
stopTypes = new HashSet<String>();
|
||||
for (String file : files) {
|
||||
List<String> typesLines = loader.getLines(file.trim());
|
||||
stopTypes.addAll(typesLines);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading types", e);
|
||||
}
|
||||
} else {
|
||||
throw new InitializationException("Missing required parameter: types.");
|
||||
throw new IllegalArgumentException("Missing required parameter: types.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
|
||||
import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.MultiTermAwareComponent;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
|
@ -44,7 +43,7 @@ public class GreekLowerCaseFilterFactory extends TokenFilterFactory implements M
|
|||
super.init(args);
|
||||
assureMatchVersion();
|
||||
if (args.containsKey("charset"))
|
||||
throw new InitializationException(
|
||||
throw new IllegalArgumentException(
|
||||
"The charset parameter is no longer supported. "
|
||||
+ "Please process your documents as Unicode instead.");
|
||||
}
|
||||
|
|
|
@ -39,16 +39,12 @@ public class ElisionFilterFactory extends TokenFilterFactory implements Resource
|
|||
|
||||
private CharArraySet articles;
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String articlesFile = args.get("articles");
|
||||
boolean ignoreCase = getBoolean("ignoreCase", false);
|
||||
|
||||
if (articlesFile != null) {
|
||||
try {
|
||||
articles = getWordSet(loader, articlesFile, ignoreCase);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading articles", e);
|
||||
}
|
||||
articles = getWordSet(loader, articlesFile, ignoreCase);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,14 +17,15 @@ package org.apache.lucene.analysis.hunspell;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.text.ParseException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.hunspell.HunspellDictionary;
|
||||
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
@ -66,11 +67,11 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
|
|||
*
|
||||
* @param loader ResourceLoader used to load the files
|
||||
*/
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
assureMatchVersion();
|
||||
String dictionaryArg = args.get(PARAM_DICTIONARY);
|
||||
if (dictionaryArg == null) {
|
||||
throw new InitializationException("Parameter " + PARAM_DICTIONARY + " is mandatory.");
|
||||
throw new IllegalArgumentException("Parameter " + PARAM_DICTIONARY + " is mandatory.");
|
||||
}
|
||||
String dictionaryFiles[] = args.get(PARAM_DICTIONARY).split(",");
|
||||
String affixFile = args.get(PARAM_AFFIX);
|
||||
|
@ -78,7 +79,7 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
|
|||
if(pic != null) {
|
||||
if(pic.equalsIgnoreCase(TRUE)) ignoreCase = true;
|
||||
else if(pic.equalsIgnoreCase(FALSE)) ignoreCase = false;
|
||||
else throw new InitializationException("Unknown value for " + PARAM_IGNORE_CASE + ": " + pic + ". Must be true or false");
|
||||
else throw new IllegalArgumentException("Unknown value for " + PARAM_IGNORE_CASE + ": " + pic + ". Must be true or false");
|
||||
}
|
||||
|
||||
String strictAffixParsingParam = args.get(PARAM_STRICT_AFFIX_PARSING);
|
||||
|
@ -86,7 +87,7 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
|
|||
if(strictAffixParsingParam != null) {
|
||||
if(strictAffixParsingParam.equalsIgnoreCase(FALSE)) strictAffixParsing = false;
|
||||
else if(strictAffixParsingParam.equalsIgnoreCase(TRUE)) strictAffixParsing = true;
|
||||
else throw new InitializationException("Unknown value for " + PARAM_STRICT_AFFIX_PARSING + ": " + strictAffixParsingParam + ". Must be true or false");
|
||||
else throw new IllegalArgumentException("Unknown value for " + PARAM_STRICT_AFFIX_PARSING + ": " + strictAffixParsingParam + ". Must be true or false");
|
||||
}
|
||||
|
||||
InputStream affix = null;
|
||||
|
@ -100,8 +101,8 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
|
|||
affix = loader.openResource(affixFile);
|
||||
|
||||
this.dictionary = new HunspellDictionary(affix, dictionaries, luceneMatchVersion, ignoreCase, strictAffixParsing);
|
||||
} catch (Exception e) {
|
||||
throw new InitializationException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
|
||||
} catch (ParseException e) {
|
||||
throw new IOException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(affix);
|
||||
IOUtils.closeWhileHandlingException(dictionaries);
|
||||
|
|
|
@ -44,17 +44,13 @@ public class KeepWordFilterFactory extends TokenFilterFactory implements Resourc
|
|||
assureMatchVersion();
|
||||
}
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String wordFiles = args.get("words");
|
||||
ignoreCase = getBoolean("ignoreCase", false);
|
||||
enablePositionIncrements = getBoolean("enablePositionIncrements",false);
|
||||
|
||||
if (wordFiles != null) {
|
||||
try {
|
||||
words = getWordSet(loader, wordFiles, ignoreCase);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading words", e);
|
||||
}
|
||||
words = getWordSet(loader, wordFiles, ignoreCase);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,15 +39,11 @@ public class KeywordMarkerFilterFactory extends TokenFilterFactory implements Re
|
|||
private CharArraySet protectedWords;
|
||||
private boolean ignoreCase;
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String wordFiles = args.get(PROTECTED_TOKENS);
|
||||
ignoreCase = getBoolean("ignoreCase", false);
|
||||
if (wordFiles != null) {
|
||||
try {
|
||||
protectedWords = getWordSet(loader, wordFiles, ignoreCase);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||
}
|
||||
protectedWords = getWordSet(loader, wordFiles, ignoreCase);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.miscellaneous;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -47,7 +46,7 @@ public class LengthFilterFactory extends TokenFilterFactory {
|
|||
String minKey = args.get(MIN_KEY);
|
||||
String maxKey = args.get(MAX_KEY);
|
||||
if (minKey == null || maxKey == null) {
|
||||
throw new InitializationException("Both " + MIN_KEY + " and " + MAX_KEY + " are mandatory");
|
||||
throw new IllegalArgumentException("Both " + MIN_KEY + " and " + MAX_KEY + " are mandatory");
|
||||
}
|
||||
min=Integer.parseInt(minKey);
|
||||
max=Integer.parseInt(maxKey);
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
/**
|
||||
|
@ -44,7 +43,7 @@ public class LimitTokenCountFilterFactory extends TokenFilterFactory {
|
|||
super.init( args );
|
||||
String maxTokenCountArg = args.get("maxTokenCount");
|
||||
if (maxTokenCountArg == null) {
|
||||
throw new InitializationException("maxTokenCount is mandatory.");
|
||||
throw new IllegalArgumentException("maxTokenCount is mandatory.");
|
||||
}
|
||||
maxTokenCount = Integer.parseInt(args.get(maxTokenCountArg));
|
||||
}
|
||||
|
|
|
@ -39,26 +39,22 @@ public class StemmerOverrideFilterFactory extends TokenFilterFactory implements
|
|||
private CharArrayMap<String> dictionary = null;
|
||||
private boolean ignoreCase;
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String dictionaryFiles = args.get("dictionary");
|
||||
ignoreCase = getBoolean("ignoreCase", false);
|
||||
if (dictionaryFiles != null) {
|
||||
assureMatchVersion();
|
||||
List<String> files = splitFileNames(dictionaryFiles);
|
||||
try {
|
||||
if (files.size() > 0) {
|
||||
dictionary = new CharArrayMap<String>(luceneMatchVersion,
|
||||
files.size() * 10, ignoreCase);
|
||||
for (String file : files) {
|
||||
List<String> list = loader.getLines(file.trim());
|
||||
for (String line : list) {
|
||||
String[] mapping = line.split("\t", 2);
|
||||
dictionary.put(mapping[0], mapping[1]);
|
||||
}
|
||||
if (files.size() > 0) {
|
||||
dictionary = new CharArrayMap<String>(luceneMatchVersion,
|
||||
files.size() * 10, ignoreCase);
|
||||
for (String file : files) {
|
||||
List<String> list = loader.getLines(file.trim());
|
||||
for (String line : list) {
|
||||
String[] mapping = line.split("\t", 2);
|
||||
dictionary.put(mapping[0], mapping[1]);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading dictionary", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
/**
|
||||
|
@ -45,13 +44,8 @@ public class TrimFilterFactory extends TokenFilterFactory {
|
|||
super.init( args );
|
||||
|
||||
String v = args.get( "updateOffsets" );
|
||||
if( v != null ) {
|
||||
try {
|
||||
updateOffsets = Boolean.valueOf( v );
|
||||
}
|
||||
catch( Exception ex ) {
|
||||
throw new InitializationException("Error reading updateOffsets value. Must be true or false.", ex);
|
||||
}
|
||||
if (v != null) {
|
||||
updateOffsets = Boolean.valueOf( v );
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,28 +53,20 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
|
|||
public static final String PROTECTED_TOKENS = "protected";
|
||||
public static final String TYPES = "types";
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String wordFiles = args.get(PROTECTED_TOKENS);
|
||||
if (wordFiles != null) {
|
||||
try {
|
||||
protectedWords = getWordSet(loader, wordFiles, false);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||
}
|
||||
protectedWords = getWordSet(loader, wordFiles, false);
|
||||
}
|
||||
String types = args.get(TYPES);
|
||||
if (types != null) {
|
||||
try {
|
||||
List<String> files = splitFileNames( types );
|
||||
List<String> wlist = new ArrayList<String>();
|
||||
for( String file : files ){
|
||||
List<String> lines = loader.getLines( file.trim() );
|
||||
wlist.addAll( lines );
|
||||
}
|
||||
typeTable = parseTypes(wlist);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException while loading types", e);
|
||||
List<String> files = splitFileNames( types );
|
||||
List<String> wlist = new ArrayList<String>();
|
||||
for( String file : files ){
|
||||
List<String> lines = loader.getLines( file.trim() );
|
||||
wlist.addAll( lines );
|
||||
}
|
||||
typeTable = parseTypes(wlist);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -128,13 +120,13 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
|
|||
for( String rule : rules ){
|
||||
Matcher m = typePattern.matcher(rule);
|
||||
if( !m.find() )
|
||||
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]");
|
||||
throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]");
|
||||
String lhs = parseString(m.group(1).trim());
|
||||
Byte rhs = parseType(m.group(2).trim());
|
||||
if (lhs.length() != 1)
|
||||
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
|
||||
throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
|
||||
if (rhs == null)
|
||||
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
|
||||
throw new IllegalArgumentException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
|
||||
typeMap.put(lhs.charAt(0), rhs);
|
||||
}
|
||||
|
||||
|
@ -174,7 +166,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
|
|||
char c = s.charAt( readPos++ );
|
||||
if( c == '\\' ){
|
||||
if( readPos >= len )
|
||||
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||
throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
|
||||
c = s.charAt( readPos++ );
|
||||
switch( c ) {
|
||||
case '\\' : c = '\\'; break;
|
||||
|
@ -185,7 +177,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
|
|||
case 'f' : c = '\f'; break;
|
||||
case 'u' :
|
||||
if( readPos + 3 >= len )
|
||||
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||
throw new IllegalArgumentException("Invalid escaped char in [" + s + "]");
|
||||
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
||||
readPos += 4;
|
||||
break;
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
|
||||
/**
|
||||
|
@ -53,7 +52,7 @@ public class PathHierarchyTokenizerFactory extends TokenizerFactory {
|
|||
String v = args.get( "delimiter" );
|
||||
if( v != null ){
|
||||
if( v.length() != 1 ){
|
||||
throw new InitializationException("delimiter should be a char. \"" + v + "\" is invalid");
|
||||
throw new IllegalArgumentException("delimiter should be a char. \"" + v + "\" is invalid");
|
||||
}
|
||||
else{
|
||||
delimiter = v.charAt(0);
|
||||
|
@ -66,7 +65,7 @@ public class PathHierarchyTokenizerFactory extends TokenizerFactory {
|
|||
v = args.get( "replace" );
|
||||
if( v != null ){
|
||||
if( v.length() != 1 ){
|
||||
throw new InitializationException("replace should be a char. \"" + v + "\" is invalid");
|
||||
throw new IllegalArgumentException("replace should be a char. \"" + v + "\" is invalid");
|
||||
}
|
||||
else{
|
||||
replacement = v.charAt(0);
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.pattern;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.pattern.PatternReplaceFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -58,7 +57,7 @@ public class PatternReplaceFilterFactory extends TokenFilterFactory {
|
|||
if (r.equals("first")) {
|
||||
all = false;
|
||||
} else {
|
||||
throw new InitializationException
|
||||
throw new IllegalArgumentException
|
||||
("Configuration Error: 'replace' must be 'first' or 'all' in "
|
||||
+ this.getClass().getName());
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.regex.Pattern;
|
|||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
|
||||
/**
|
||||
|
@ -84,12 +83,7 @@ public class PatternTokenizerFactory extends TokenizerFactory
|
|||
group = -1; // use 'split'
|
||||
String g = args.get( GROUP );
|
||||
if( g != null ) {
|
||||
try {
|
||||
group = Integer.parseInt( g );
|
||||
}
|
||||
catch( Exception ex ) {
|
||||
throw new InitializationException("invalid group argument: " + g);
|
||||
}
|
||||
group = Integer.parseInt( g );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -100,7 +94,7 @@ public class PatternTokenizerFactory extends TokenizerFactory
|
|||
try {
|
||||
return new PatternTokenizer(in, pattern, group);
|
||||
} catch( IOException ex ) {
|
||||
throw new InitializationException("IOException thrown creating PatternTokenizer instance", ex);
|
||||
throw new RuntimeException("IOException thrown creating PatternTokenizer instance", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.analysis.payloads.PayloadEncoder;
|
|||
import org.apache.lucene.analysis.payloads.FloatEncoder;
|
||||
import org.apache.lucene.analysis.payloads.IntegerEncoder;
|
||||
import org.apache.lucene.analysis.payloads.IdentityEncoder;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
@ -62,7 +61,7 @@ public class DelimitedPayloadTokenFilterFactory extends TokenFilterFactory imple
|
|||
public void inform(ResourceLoader loader) {
|
||||
String encoderClass = args.get(ENCODER_ATTR);
|
||||
if (encoderClass == null) {
|
||||
throw new InitializationException("Parameter " + ENCODER_ATTR + " is mandatory");
|
||||
throw new IllegalArgumentException("Parameter " + ENCODER_ATTR + " is mandatory");
|
||||
}
|
||||
if (encoderClass.equals("float")){
|
||||
encoder = new FloatEncoder();
|
||||
|
@ -79,7 +78,7 @@ public class DelimitedPayloadTokenFilterFactory extends TokenFilterFactory imple
|
|||
if (delim.length() == 1) {
|
||||
delimiter = delim.charAt(0);
|
||||
} else{
|
||||
throw new InitializationException("Delimiter must be one character only");
|
||||
throw new IllegalArgumentException("Delimiter must be one character only");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.payloads;
|
|||
|
||||
import org.apache.lucene.analysis.payloads.NumericPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -43,7 +42,7 @@ public class NumericPayloadTokenFilterFactory extends TokenFilterFactory {
|
|||
String payloadArg = args.get("payload");
|
||||
typeMatch = args.get("typeMatch");
|
||||
if (payloadArg == null || typeMatch == null) {
|
||||
throw new InitializationException("Both payload and typeMatch are required");
|
||||
throw new IllegalArgumentException("Both payload and typeMatch are required");
|
||||
}
|
||||
payload = Float.parseFloat(payloadArg);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.shingle;
|
|||
|
||||
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -49,17 +48,17 @@ public class ShingleFilterFactory extends TokenFilterFactory {
|
|||
maxShingleSize = getInt("maxShingleSize",
|
||||
ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
|
||||
if (maxShingleSize < 2) {
|
||||
throw new InitializationException("Invalid maxShingleSize (" + maxShingleSize
|
||||
throw new IllegalArgumentException("Invalid maxShingleSize (" + maxShingleSize
|
||||
+ ") - must be at least 2");
|
||||
}
|
||||
minShingleSize = getInt("minShingleSize",
|
||||
ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
|
||||
if (minShingleSize < 2) {
|
||||
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
|
||||
throw new IllegalArgumentException("Invalid minShingleSize (" + minShingleSize
|
||||
+ ") - must be at least 2");
|
||||
}
|
||||
if (minShingleSize > maxShingleSize) {
|
||||
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
|
||||
throw new IllegalArgumentException("Invalid minShingleSize (" + minShingleSize
|
||||
+ ") - must be no greater than maxShingleSize ("
|
||||
+ maxShingleSize + ")");
|
||||
}
|
||||
|
|
|
@ -49,14 +49,10 @@ public class SnowballPorterFilterFactory extends TokenFilterFactory implements R
|
|||
private Class<?> stemClass;
|
||||
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String wordFiles = args.get(PROTECTED_TOKENS);
|
||||
if (wordFiles != null) {
|
||||
try {
|
||||
protectedWords = getWordSet(loader, wordFiles, false);
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||
}
|
||||
protectedWords = getWordSet(loader, wordFiles, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,7 +67,7 @@ public class SnowballPorterFilterFactory extends TokenFilterFactory implements R
|
|||
try {
|
||||
stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer");
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new InitializationException("Can't find class for stemmer language " + language, e);
|
||||
throw new IllegalArgumentException("Can't find class for stemmer language " + language, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -80,7 +76,7 @@ public class SnowballPorterFilterFactory extends TokenFilterFactory implements R
|
|||
try {
|
||||
program = (SnowballProgram)stemClass.newInstance();
|
||||
} catch (Exception e) {
|
||||
throw new InitializationException("Error instantiating stemmer for language " + language + "from class " + stemClass, e);
|
||||
throw new RuntimeException("Error instantiating stemmer for language " + language + "from class " + stemClass, e);
|
||||
}
|
||||
|
||||
if (protectedWords != null)
|
||||
|
|
|
@ -63,7 +63,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
|
|||
}
|
||||
|
||||
@Override
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
final boolean ignoreCase = getBoolean("ignoreCase", false);
|
||||
this.ignoreCase = ignoreCase;
|
||||
|
||||
|
@ -89,10 +89,10 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
|
|||
map = loadWordnetSynonyms(loader, true, analyzer);
|
||||
} else {
|
||||
// TODO: somehow make this more pluggable
|
||||
throw new InitializationException("Unrecognized synonyms format: " + format);
|
||||
throw new IllegalArgumentException("Unrecognized synonyms format: " + format);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new InitializationException("Exception thrown while loading synonyms", e);
|
||||
} catch (ParseException e) {
|
||||
throw new IOException("Error parsing synonyms file:", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +103,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
|
|||
final boolean expand = getBoolean("expand", true);
|
||||
String synonyms = args.get("synonyms");
|
||||
if (synonyms == null)
|
||||
throw new InitializationException("Missing required argument 'synonyms'.");
|
||||
throw new IllegalArgumentException("Missing required argument 'synonyms'.");
|
||||
|
||||
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
||||
.onMalformedInput(CodingErrorAction.REPORT)
|
||||
|
@ -131,7 +131,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
|
|||
final boolean expand = getBoolean("expand", true);
|
||||
String synonyms = args.get("synonyms");
|
||||
if (synonyms == null)
|
||||
throw new InitializationException("Missing required argument 'synonyms'.");
|
||||
throw new IllegalArgumentException("Missing required argument 'synonyms'.");
|
||||
|
||||
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
||||
.onMalformedInput(CodingErrorAction.REPORT)
|
||||
|
@ -153,7 +153,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
|
|||
}
|
||||
|
||||
// (there are no tests for this functionality)
|
||||
private TokenizerFactory loadTokenizerFactory(ResourceLoader loader, String cname){
|
||||
private TokenizerFactory loadTokenizerFactory(ResourceLoader loader, String cname) throws IOException {
|
||||
TokenizerFactory tokFactory = loader.newInstance(cname, TokenizerFactory.class);
|
||||
tokFactory.setLuceneMatchVersion(luceneMatchVersion);
|
||||
tokFactory.init(args);
|
||||
|
|
|
@ -59,7 +59,7 @@ public abstract class AbstractAnalysisFactory {
|
|||
* to inform user, that for this factory a {@link #luceneMatchVersion} is required */
|
||||
protected final void assureMatchVersion() {
|
||||
if (luceneMatchVersion == null) {
|
||||
throw new InitializationException("Configuration Error: Factory '" + this.getClass().getName() +
|
||||
throw new IllegalArgumentException("Configuration Error: Factory '" + this.getClass().getName() +
|
||||
"' needs a 'luceneMatchVersion' parameter");
|
||||
}
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ public abstract class AbstractAnalysisFactory {
|
|||
if (useDefault) {
|
||||
return defaultVal;
|
||||
}
|
||||
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||
throw new IllegalArgumentException("Configuration Error: missing parameter '" + name + "'");
|
||||
}
|
||||
return Integer.parseInt(s);
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ public abstract class AbstractAnalysisFactory {
|
|||
String s = args.get(name);
|
||||
if (s==null) {
|
||||
if (useDefault) return defaultVal;
|
||||
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||
throw new IllegalArgumentException("Configuration Error: missing parameter '" + name + "'");
|
||||
}
|
||||
return Boolean.parseBoolean(s);
|
||||
}
|
||||
|
@ -108,11 +108,11 @@ public abstract class AbstractAnalysisFactory {
|
|||
try {
|
||||
String pat = args.get(name);
|
||||
if (null == pat) {
|
||||
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||
throw new IllegalArgumentException("Configuration Error: missing parameter '" + name + "'");
|
||||
}
|
||||
return Pattern.compile(args.get(name));
|
||||
} catch (PatternSyntaxException e) {
|
||||
throw new InitializationException
|
||||
throw new IllegalArgumentException
|
||||
("Configuration Error: '" + name + "' can not be parsed in " +
|
||||
this.getClass().getSimpleName(), e);
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
package org.apache.lucene.analysis.util;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Exception representing an error occurring during the initialization of a Factory.
|
||||
*/
|
||||
public class InitializationException extends RuntimeException {
|
||||
|
||||
public InitializationException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
public InitializationException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
}
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
package org.apache.lucene.analysis.util;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Interface for a component that needs to be initialized by
|
||||
* an implementation of {@link ResourceLoader}.
|
||||
|
@ -25,5 +27,5 @@ package org.apache.lucene.analysis.util;
|
|||
*/
|
||||
public interface ResourceLoaderAware {
|
||||
|
||||
void inform(ResourceLoader loader);
|
||||
void inform(ResourceLoader loader) throws IOException;
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ package org.apache.lucene.analysis.charfilter;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestMappingCharFilterFactory extends LuceneTestCase {
|
||||
|
@ -29,7 +28,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
|
|||
f.parseString( "\\" );
|
||||
fail( "escape character cannot be alone." );
|
||||
}
|
||||
catch (InitializationException expected) {}
|
||||
catch (IllegalArgumentException expected) {}
|
||||
|
||||
assertEquals( "unexpected escaped characters",
|
||||
"\\\"\n\t\r\b\f", f.parseString( "\\\\\\\"\\n\\t\\r\\b\\f" ) );
|
||||
|
@ -42,7 +41,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
|
|||
f.parseString( "\\u000" );
|
||||
fail( "invalid length check." );
|
||||
}
|
||||
catch (InitializationException expected) {}
|
||||
catch (IllegalArgumentException expected) {}
|
||||
|
||||
try {
|
||||
f.parseString( "\\u123x" );
|
||||
|
|
|
@ -50,7 +50,6 @@ import org.apache.lucene.analysis.util.CharFilterFactory;
|
|||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
|
@ -127,7 +126,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
|||
if (!(instance instanceof ResourceLoaderAware)) {
|
||||
assertSame(c, instance.create(new StringReader("")).getClass());
|
||||
}
|
||||
} catch (InitializationException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always works, some require ResourceLoader
|
||||
}
|
||||
} else if (TokenFilter.class.isAssignableFrom(c)) {
|
||||
|
@ -147,7 +146,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
|||
assertSame(c, createdClazz);
|
||||
}
|
||||
}
|
||||
} catch (InitializationException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always works, some require ResourceLoader
|
||||
}
|
||||
} else if (CharFilter.class.isAssignableFrom(c)) {
|
||||
|
@ -167,7 +166,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
|||
assertSame(c, createdClazz);
|
||||
}
|
||||
}
|
||||
} catch (InitializationException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always works, some require ResourceLoader
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.analysis.MockTokenizer;
|
|||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
|
||||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.MultiTermAwareComponent;
|
||||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.StringMockResourceLoader;
|
||||
|
@ -121,7 +120,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
|||
factory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
|
||||
factory.init(Collections.<String,String>emptyMap());
|
||||
success = true;
|
||||
} catch (InitializationException ignored) {
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
// its ok if we dont provide the right parameters to throw this
|
||||
}
|
||||
|
||||
|
@ -130,8 +129,10 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
|||
try {
|
||||
((ResourceLoaderAware) factory).inform(new StringMockResourceLoader(""));
|
||||
success = true;
|
||||
} catch (InitializationException ignored) {
|
||||
} catch (IOException ignored) {
|
||||
// its ok if the right files arent available or whatever to throw this
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
// is this ok? I guess so
|
||||
}
|
||||
}
|
||||
return success;
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.core;
|
|||
|
||||
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.ResourceAsStreamResourceLoader;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
import org.junit.Test;
|
||||
|
@ -96,8 +95,8 @@ public class TestTypeTokenFilterFactory extends BaseTokenStreamTestCase {
|
|||
typeTokenFilterFactory.setLuceneMatchVersion(TEST_VERSION_CURRENT);
|
||||
typeTokenFilterFactory.init(args);
|
||||
typeTokenFilterFactory.inform(new ResourceAsStreamResourceLoader(getClass()));
|
||||
fail("not supplying 'types' parameter should cause an InitializationException");
|
||||
} catch (InitializationException e) {
|
||||
fail("not supplying 'types' parameter should cause an IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// everything ok
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.icu.ICUNormalizer2Filter;
|
||||
import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.MultiTermAwareComponent;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
|
@ -65,7 +64,7 @@ public class ICUNormalizer2FilterFactory extends TokenFilterFactory implements M
|
|||
else if (mode.equals("decompose"))
|
||||
normalizer = Normalizer2.getInstance(null, name, Normalizer2.Mode.DECOMPOSE);
|
||||
else
|
||||
throw new InitializationException("Invalid mode: " + mode);
|
||||
throw new IllegalArgumentException("Invalid mode: " + mode);
|
||||
|
||||
String filter = args.get("filter");
|
||||
if (filter != null) {
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.icu.ICUTransformFilter;
|
||||
import org.apache.lucene.analysis.util.AbstractAnalysisFactory;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.MultiTermAwareComponent;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
|
@ -47,7 +46,7 @@ public class ICUTransformFilterFactory extends TokenFilterFactory implements Mul
|
|||
super.init(args);
|
||||
String id = args.get("id");
|
||||
if (id == null) {
|
||||
throw new InitializationException("id is required.");
|
||||
throw new IllegalArgumentException("id is required.");
|
||||
}
|
||||
|
||||
int dir;
|
||||
|
@ -57,7 +56,7 @@ public class ICUTransformFilterFactory extends TokenFilterFactory implements Mul
|
|||
else if (direction.equalsIgnoreCase("reverse"))
|
||||
dir = Transliterator.REVERSE;
|
||||
else
|
||||
throw new InitializationException("invalid direction: " + direction);
|
||||
throw new IllegalArgumentException("invalid direction: " + direction);
|
||||
|
||||
transliterator = Transliterator.getInstance(id, dir);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.ja;
|
|||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
import java.util.Map;
|
||||
|
@ -45,7 +44,7 @@ public class JapaneseKatakanaStemFilterFactory extends TokenFilterFactory {
|
|||
super.init(args);
|
||||
minimumLength = getInt(MINIMUM_LENGTH_PARAM, JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH);
|
||||
if (minimumLength < 2) {
|
||||
throw new InitializationException("Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
|
||||
throw new IllegalArgumentException("Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -42,21 +42,17 @@ public class JapanesePartOfSpeechStopFilterFactory extends TokenFilterFactory im
|
|||
private boolean enablePositionIncrements;
|
||||
private Set<String> stopTags;
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
String stopTagFiles = args.get("tags");
|
||||
enablePositionIncrements = getBoolean("enablePositionIncrements", false);
|
||||
stopTags = null;
|
||||
try {
|
||||
CharArraySet cas = getWordSet(loader, stopTagFiles, false);
|
||||
if (cas != null) {
|
||||
stopTags = new HashSet<String>();
|
||||
for (Object element : cas) {
|
||||
char chars[] = (char[]) element;
|
||||
stopTags.add(new String(chars));
|
||||
}
|
||||
CharArraySet cas = getWordSet(loader, stopTagFiles, false);
|
||||
if (cas != null) {
|
||||
stopTags = new HashSet<String>();
|
||||
for (Object element : cas) {
|
||||
char chars[] = (char[]) element;
|
||||
stopTags.add(new String(chars));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("IOException thrown while loading tags", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.analysis.ja;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
|
@ -30,7 +31,6 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
import org.apache.lucene.analysis.ja.JapaneseTokenizer;
|
||||
import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
|
||||
import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
|
@ -68,26 +68,22 @@ public class JapaneseTokenizerFactory extends TokenizerFactory implements Resour
|
|||
private boolean discardPunctuation;
|
||||
|
||||
@Override
|
||||
public void inform(ResourceLoader loader) {
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
mode = getMode(args);
|
||||
String userDictionaryPath = args.get(USER_DICT_PATH);
|
||||
try {
|
||||
if (userDictionaryPath != null) {
|
||||
InputStream stream = loader.openResource(userDictionaryPath);
|
||||
String encoding = args.get(USER_DICT_ENCODING);
|
||||
if (encoding == null) {
|
||||
encoding = IOUtils.UTF_8;
|
||||
}
|
||||
CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
|
||||
.onMalformedInput(CodingErrorAction.REPORT)
|
||||
.onUnmappableCharacter(CodingErrorAction.REPORT);
|
||||
Reader reader = new InputStreamReader(stream, decoder);
|
||||
userDictionary = new UserDictionary(reader);
|
||||
} else {
|
||||
userDictionary = null;
|
||||
if (userDictionaryPath != null) {
|
||||
InputStream stream = loader.openResource(userDictionaryPath);
|
||||
String encoding = args.get(USER_DICT_ENCODING);
|
||||
if (encoding == null) {
|
||||
encoding = IOUtils.UTF_8;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new InitializationException("Exception thrown while loading dictionary", e);
|
||||
CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
|
||||
.onMalformedInput(CodingErrorAction.REPORT)
|
||||
.onUnmappableCharacter(CodingErrorAction.REPORT);
|
||||
Reader reader = new InputStreamReader(stream, decoder);
|
||||
userDictionary = new UserDictionary(reader);
|
||||
} else {
|
||||
userDictionary = null;
|
||||
}
|
||||
discardPunctuation = getBoolean(DISCARD_PUNCTUATION, true);
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.commons.codec.Encoder;
|
|||
import org.apache.commons.codec.language.*;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.phonetic.PhoneticFilter;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
|
||||
/**
|
||||
|
@ -90,7 +89,7 @@ public class PhoneticFilterFactory extends TokenFilterFactory
|
|||
|
||||
String name = args.get( ENCODER );
|
||||
if( name == null ) {
|
||||
throw new InitializationException("Missing required parameter: " + ENCODER
|
||||
throw new IllegalArgumentException("Missing required parameter: " + ENCODER
|
||||
+ " [" + registry.keySet() + "]");
|
||||
}
|
||||
clazz = registry.get(name.toUpperCase(Locale.ROOT));
|
||||
|
@ -104,7 +103,7 @@ public class PhoneticFilterFactory extends TokenFilterFactory
|
|||
try {
|
||||
setMaxCodeLenMethod = clazz.getMethod("setMaxCodeLen", int.class);
|
||||
} catch (Exception e) {
|
||||
throw new InitializationException("Encoder " + name + " / " + clazz + " does not support " + MAX_CODE_LENGTH, e);
|
||||
throw new IllegalArgumentException("Encoder " + name + " / " + clazz + " does not support " + MAX_CODE_LENGTH, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,9 +118,9 @@ public class PhoneticFilterFactory extends TokenFilterFactory
|
|||
try {
|
||||
return Class.forName(lookupName).asSubclass(Encoder.class);
|
||||
} catch (ClassNotFoundException cnfe) {
|
||||
throw new InitializationException("Unknown encoder: " + name + " must be full class name or one of " + registry.keySet(), cnfe);
|
||||
throw new IllegalArgumentException("Unknown encoder: " + name + " must be full class name or one of " + registry.keySet(), cnfe);
|
||||
} catch (ClassCastException e) {
|
||||
throw new InitializationException("Not an encoder: " + name + " must be full class name or one of " + registry.keySet(), e);
|
||||
throw new IllegalArgumentException("Not an encoder: " + name + " must be full class name or one of " + registry.keySet(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,7 +137,7 @@ public class PhoneticFilterFactory extends TokenFilterFactory
|
|||
return encoder;
|
||||
} catch (Exception e) {
|
||||
final Throwable t = (e instanceof InvocationTargetException) ? e.getCause() : e;
|
||||
throw new InitializationException("Error initializing encoder: " + name + " / " + clazz, t);
|
||||
throw new IllegalArgumentException("Error initializing encoder: " + name + " / " + clazz, t);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.stempel.StempelFilter;
|
||||
import org.apache.lucene.analysis.stempel.StempelStemmer;
|
||||
import org.apache.lucene.analysis.util.ResourceLoader;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.ResourceLoaderAware;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.egothor.stemmer.Trie;
|
||||
|
@ -39,11 +38,7 @@ public class StempelPolishStemFilterFactory extends TokenFilterFactory implement
|
|||
return new StempelFilter(input, new StempelStemmer(stemmer));
|
||||
}
|
||||
|
||||
public void inform(ResourceLoader loader) {
|
||||
try {
|
||||
stemmer = StempelStemmer.load(loader.openResource(STEMTABLE));
|
||||
} catch (IOException e) {
|
||||
throw new InitializationException("Could not load stem table: " + STEMTABLE, e);
|
||||
}
|
||||
public void inform(ResourceLoader loader) throws IOException {
|
||||
stemmer = StempelStemmer.load(loader.openResource(STEMTABLE));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ package org.apache.lucene.analysis.uima;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.apache.lucene.analysis.uima.UIMAAnnotationsTokenizer;
|
||||
|
||||
|
@ -39,7 +38,7 @@ public class UIMAAnnotationsTokenizerFactory extends TokenizerFactory {
|
|||
descriptorPath = args.get("descriptorPath");
|
||||
tokenType = args.get("tokenType");
|
||||
if (descriptorPath == null || tokenType == null) {
|
||||
throw new InitializationException("Both descriptorPath and tokenType are mandatory");
|
||||
throw new IllegalArgumentException("Both descriptorPath and tokenType are mandatory");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.lucene.analysis.uima;
|
|||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.uima.UIMATypeAwareAnnotationsTokenizer;
|
||||
import org.apache.lucene.analysis.util.InitializationException;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
|
||||
import java.io.Reader;
|
||||
|
@ -41,7 +40,7 @@ public class UIMATypeAwareAnnotationsTokenizerFactory extends TokenizerFactory {
|
|||
tokenType = args.get("tokenType");
|
||||
featurePath = args.get("featurePath");
|
||||
if (descriptorPath == null || tokenType == null || featurePath == null) {
|
||||
throw new InitializationException("descriptorPath, tokenType, and featurePath are mandatory");
|
||||
throw new IllegalArgumentException("descriptorPath, tokenType, and featurePath are mandatory");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -604,7 +604,7 @@ public class SolrResourceLoader implements ResourceLoader
|
|||
/**
|
||||
* Tell all {@link ResourceLoaderAware} instances about the loader
|
||||
*/
|
||||
public void inform( ResourceLoader loader )
|
||||
public void inform( ResourceLoader loader ) throws IOException
|
||||
{
|
||||
|
||||
// make a copy to avoid potential deadlock of a callback adding to the list
|
||||
|
|
|
@ -40,6 +40,7 @@ import javax.xml.xpath.XPathConstants;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -109,7 +110,11 @@ public final class IndexSchema {
|
|||
is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(name));
|
||||
}
|
||||
readSchema(is);
|
||||
loader.inform( loader );
|
||||
try {
|
||||
loader.inform( loader );
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue