SOLR-3363: Consolidated Analysis Factory Exceptions

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1329536 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Christopher John Male 2012-04-24 03:31:34 +00:00
parent d176899fea
commit 7f45484847
39 changed files with 113 additions and 107 deletions

View File

@ -271,6 +271,9 @@ New Features
field content that was already processed and split into tokens using some external processing
chain. Serialization format is pluggable, and defaults to JSON. (ab)
* SOLR-3363: Consolidated Exceptions in Analysis Factories so they only throw
InitalizationExceptions (Chris Male)
Optimizations
----------------------

View File

@ -54,7 +54,7 @@ public abstract class BaseCharFilterFactory implements CharFilterFactory {
String s = args.get(name);
if (s==null) {
if (useDefault) return defaultVal;
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
}
return Integer.parseInt(s);
}

View File

@ -73,7 +73,7 @@ abstract class BaseTokenStreamFactory {
* to inform user, that for this factory a {@link #luceneMatchVersion} is required */
protected final void assureMatchVersion() {
if (luceneMatchVersion == null) {
throw new RuntimeException("Configuration Error: Factory '" + this.getClass().getName() +
throw new InitializationException("Configuration Error: Factory '" + this.getClass().getName() +
"' needs a 'luceneMatchVersion' parameter");
} else if (!luceneMatchVersion.onOrAfter(Version.LUCENE_40)) {
log.warn(getClass().getSimpleName() + " is using deprecated " + luceneMatchVersion +
@ -100,7 +100,7 @@ abstract class BaseTokenStreamFactory {
String s = args.get(name);
if (s==null) {
if (useDefault) return defaultVal;
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
}
return Integer.parseInt(s);
}
@ -113,7 +113,7 @@ abstract class BaseTokenStreamFactory {
String s = args.get(name);
if (s==null) {
if (useDefault) return defaultVal;
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
}
return Boolean.parseBoolean(s);
}

View File

@ -28,8 +28,6 @@ import org.apache.commons.io.IOUtils;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.collation.CollationKeyFilter;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.util.plugin.ResourceLoaderAware;
/**
@ -84,11 +82,11 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
String decomposition = args.get("decomposition");
if (custom == null && language == null)
throw new SolrException(ErrorCode.SERVER_ERROR, "Either custom or language is required.");
throw new InitializationException("Either custom or language is required.");
if (custom != null &&
(language != null || country != null || variant != null))
throw new SolrException(ErrorCode.SERVER_ERROR, "Cannot specify both language and custom. "
throw new InitializationException("Cannot specify both language and custom. "
+ "To tailor rules for a built-in language, see the javadocs for RuleBasedCollator. "
+ "Then save the entire customized ruleset to a file, and use with the custom parameter");
@ -111,7 +109,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
else if (strength.equalsIgnoreCase("identical"))
collator.setStrength(Collator.IDENTICAL);
else
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid strength: " + strength);
throw new InitializationException("Invalid strength: " + strength);
}
// set the decomposition flag, otherwise it will be the default.
@ -123,7 +121,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
else if (decomposition.equalsIgnoreCase("full"))
collator.setDecomposition(Collator.FULL_DECOMPOSITION);
else
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid decomposition: " + decomposition);
throw new InitializationException("Invalid decomposition: " + decomposition);
}
}
@ -139,8 +137,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
Locale locale;
if (language != null && country == null && variant != null)
throw new SolrException(ErrorCode.SERVER_ERROR,
"To specify variant, country is required");
throw new InitializationException("To specify variant, country is required");
else if (language != null && country != null && variant != null)
locale = new Locale(language, country, variant);
else if (language != null && country != null)
@ -163,10 +160,10 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
return new RuleBasedCollator(rules);
} catch (IOException e) {
// io error
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading rules", e);
} catch (ParseException e) {
// invalid rules
throw new RuntimeException(e);
throw new InitializationException("ParseException thrown while parsing rules", e);
} finally {
IOUtils.closeQuietly(input);
}

View File

@ -55,7 +55,7 @@ public class CommonGramsFilterFactory extends BaseTokenFilterFactory implements
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading common word file", e);
}
} else {
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;

View File

@ -62,7 +62,7 @@ public class CommonGramsQueryFilterFactory extends BaseTokenFilterFactory
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading common word file", e);
}
} else {
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;

View File

@ -23,7 +23,6 @@ import org.apache.lucene.analysis.payloads.FloatEncoder;
import org.apache.lucene.analysis.payloads.IntegerEncoder;
import org.apache.lucene.analysis.payloads.IdentityEncoder;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.util.plugin.ResourceLoaderAware;
import java.util.Map;
@ -75,7 +74,7 @@ public class DelimitedPayloadTokenFilterFactory extends BaseTokenFilterFactory i
if (delim.length() == 1) {
delimiter = delim.charAt(0);
} else{
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Delimiter must be one character only");
throw new InitializationException("Delimiter must be one character only");
}
}
}

View File

@ -22,7 +22,6 @@ import org.apache.lucene.analysis.compound.*;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.solr.util.plugin.ResourceLoaderAware;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.lucene.analysis.TokenStream;
import java.util.Map;
@ -53,8 +52,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends BaseTokenFilterFac
assureMatchVersion();
dictFile = args.get("dictionary");
if (null == dictFile) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
"Missing required parameter: dictionary");
throw new InitializationException("Missing required parameter: dictionary");
}
minWordSize= getInt("minWordSize",CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
@ -66,7 +64,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends BaseTokenFilterFac
try {
dictionary = super.getWordSet(loader, dictFile, false);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading dictionary", e);
}
}
public DictionaryCompoundWordTokenFilter create(TokenStream input) {

View File

@ -52,7 +52,7 @@ public class ElisionFilterFactory extends BaseTokenFilterFactory implements Reso
try {
articles = getWordSet(loader, articlesFile, ignoreCase);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading articles", e);
}
}
}

View File

@ -39,7 +39,6 @@ import org.apache.lucene.analysis.synonym.SolrSynonymParser;
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
import org.apache.lucene.util.Version;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.util.plugin.ResourceLoaderAware;
@ -87,10 +86,10 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
map = loadWordnetSynonyms(loader, true, analyzer);
} else {
// TODO: somehow make this more pluggable
throw new RuntimeException("Unrecognized synonyms format: " + format);
throw new InitializationException("Unrecognized synonyms format: " + format);
}
} catch (Exception e) {
throw new RuntimeException(e);
throw new InitializationException("Exception thrown while loading synonyms", e);
}
if (map.fst == null) {
@ -105,7 +104,7 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
final boolean expand = getBoolean("expand", true);
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
throw new InitializationException("Missing required argument 'synonyms'.");
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
@ -133,7 +132,7 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
final boolean expand = getBoolean("expand", true);
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
throw new InitializationException("Missing required argument 'synonyms'.");
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)

View File

@ -22,8 +22,6 @@ import java.util.Map;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
/**
* Factory for {@link GreekLowerCaseFilter}.
@ -44,7 +42,7 @@ public class GreekLowerCaseFilterFactory extends BaseTokenFilterFactory implemen
super.init(args);
assureMatchVersion();
if (args.containsKey("charset"))
throw new SolrException(ErrorCode.SERVER_ERROR,
throw new InitializationException(
"The charset parameter is no longer supported. "
+ "Please process your documents as Unicode instead.");
}

View File

@ -25,8 +25,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.hunspell.HunspellDictionary;
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.util.plugin.ResourceLoaderAware;
/**
@ -69,7 +67,7 @@ public class HunspellStemFilterFactory extends BaseTokenFilterFactory implements
if(pic != null) {
if(pic.equalsIgnoreCase(TRUE)) ignoreCase = true;
else if(pic.equalsIgnoreCase(FALSE)) ignoreCase = false;
else throw new SolrException(ErrorCode.UNKNOWN, "Unknown value for "+PARAM_IGNORE_CASE+": "+pic+". Must be true or false");
else throw new InitializationException("Unknown value for " + PARAM_IGNORE_CASE + ": " + pic + ". Must be true or false");
}
try {
@ -79,7 +77,7 @@ public class HunspellStemFilterFactory extends BaseTokenFilterFactory implements
}
this.dictionary = new HunspellDictionary(loader.openResource(affixFile), dictionaries, luceneMatchVersion, ignoreCase);
} catch (Exception e) {
throw new RuntimeException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
throw new InitializationException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
}
}

View File

@ -25,7 +25,6 @@ import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.solr.analysis.BaseTokenFilterFactory;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.util.plugin.ResourceLoaderAware;
import java.util.Map;
@ -79,8 +78,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends BaseTokenFilterFa
encoding = args.get("encoding");
hypFile = args.get("hyphenator");
if (null == hypFile) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Missing required parameter: hyphenator");
throw new InitializationException("Missing required parameter: hyphenator");
}
minWordSize = getInt("minWordSize", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
@ -102,7 +100,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends BaseTokenFilterFa
is.setSystemId(hypFile);
hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
} catch (Exception e) { // TODO: getHyphenationTree really shouldn't throw "Exception"
throw new RuntimeException(e);
throw new InitializationException("Exception thrown while loading dictionary and hyphenation file", e);
} finally {
IOUtils.closeQuietly(stream);
}

View File

@ -0,0 +1,32 @@
package org.apache.solr.analysis;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Exception representing an error occuring during the initialization of a Factory.
*/
public class InitializationException extends RuntimeException {
public InitializationException(String message) {
super(message);
}
public InitializationException(String message, Throwable cause) {
super(message, cause);
}
}

View File

@ -19,7 +19,6 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter;
import org.apache.solr.common.SolrException;
import java.util.Map;
@ -44,8 +43,7 @@ public class JapaneseKatakanaStemFilterFactory extends BaseTokenFilterFactory {
super.init(args);
minimumLength = getInt(MINIMUM_LENGTH_PARAM, JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH);
if (minimumLength < 2) {
throw new SolrException(SolrException.ErrorCode.UNKNOWN,
"Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
throw new InitializationException("Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
}
}

View File

@ -55,7 +55,7 @@ public class JapanesePartOfSpeechStopFilterFactory extends BaseTokenFilterFactor
stopTags.add(new String(chars));
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading tags", e);
}
}

View File

@ -32,7 +32,6 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
import org.apache.lucene.analysis.ja.dict.UserDictionary;
import org.apache.lucene.util.IOUtils;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.util.plugin.ResourceLoaderAware;
/**
@ -80,7 +79,7 @@ public class JapaneseTokenizerFactory extends BaseTokenizerFactory implements Re
userDictionary = null;
}
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
throw new InitializationException("Exception thrown while loading dictionary", e);
}
}

View File

@ -55,7 +55,7 @@ public class KeepWordFilterFactory extends BaseTokenFilterFactory implements Res
try {
words = getWordSet(loader, wordFiles, ignoreCase);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading words", e);
}
}
}

View File

@ -48,7 +48,7 @@ public class KeywordMarkerFilterFactory extends BaseTokenFilterFactory implement
try {
protectedWords = getWordSet(loader, wordFiles, ignoreCase);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading protected words", e);
}
}
}

View File

@ -71,7 +71,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
}
}
catch( IOException e ){
throw new RuntimeException( e );
throw new InitializationException("IOException thrown while loading mappings", e);
}
normMap = new NormalizeCharMap();
parseRules( wlist, normMap );
@ -89,7 +89,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
for( String rule : rules ){
Matcher m = p.matcher( rule );
if( !m.find() )
throw new RuntimeException( "Invalid Mapping Rule : [" + rule + "], file = " + mapping );
throw new InitializationException("Invalid Mapping Rule : [" + rule + "], file = " + mapping);
normMap.add( parseString( m.group( 1 ) ), parseString( m.group( 2 ) ) );
}
}
@ -104,7 +104,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
char c = s.charAt( readPos++ );
if( c == '\\' ){
if( readPos >= len )
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
throw new InitializationException("Invalid escaped char in [" + s + "]");
c = s.charAt( readPos++ );
switch( c ) {
case '\\' : c = '\\'; break;
@ -116,7 +116,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
case 'f' : c = '\f'; break;
case 'u' :
if( readPos + 3 >= len )
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
throw new InitializationException("Invalid escaped char in [" + s + "]");
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
readPos += 4;
break;

View File

@ -51,7 +51,7 @@ public class PathHierarchyTokenizerFactory extends BaseTokenizerFactory {
String v = args.get( "delimiter" );
if( v != null ){
if( v.length() != 1 ){
throw new IllegalArgumentException( "delimiter should be a char. \"" + v + "\" is invalid" );
throw new InitializationException("delimiter should be a char. \"" + v + "\" is invalid");
}
else{
delimiter = v.charAt(0);
@ -64,7 +64,7 @@ public class PathHierarchyTokenizerFactory extends BaseTokenizerFactory {
v = args.get( "replace" );
if( v != null ){
if( v.length() != 1 ){
throw new IllegalArgumentException( "replace should be a char. \"" + v + "\" is invalid" );
throw new InitializationException("replace should be a char. \"" + v + "\" is invalid");
}
else{
replacement = v.charAt(0);

View File

@ -51,7 +51,7 @@ public class PatternReplaceCharFilterFactory extends BaseCharFilterFactory {
try {
p = Pattern.compile(args.get("pattern"));
} catch (PatternSyntaxException e) {
throw new RuntimeException
throw new InitializationException
("Configuration Error: 'pattern' can not be parsed in " +
this.getClass().getName(), e);
}

View File

@ -47,7 +47,7 @@ public class PatternReplaceFilterFactory extends BaseTokenFilterFactory {
try {
p = Pattern.compile(args.get("pattern"));
} catch (PatternSyntaxException e) {
throw new RuntimeException
throw new InitializationException
("Configuration Error: 'pattern' can not be parsed in " +
this.getClass().getName(), e);
}
@ -62,7 +62,7 @@ public class PatternReplaceFilterFactory extends BaseTokenFilterFactory {
if (r.equals("first")) {
all = false;
} else {
throw new RuntimeException
throw new InitializationException
("Configuration Error: 'replace' must be 'first' or 'all' in "
+ this.getClass().getName());
}

View File

@ -24,7 +24,6 @@ import java.util.regex.Pattern;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.pattern.PatternTokenizer;
import org.apache.solr.common.SolrException;
/**
@ -81,7 +80,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
this.args = args;
String regex = args.get( PATTERN );
if( regex == null ) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "missing required argument: "+PATTERN );
throw new InitializationException("missing required argument: " + PATTERN);
}
int flags = 0; // TODO? -- read flags from config CASE_INSENSITIVE, etc
pattern = Pattern.compile( regex, flags );
@ -93,7 +92,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
group = Integer.parseInt( g );
}
catch( Exception ex ) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "invalid group argument: "+g );
throw new InitializationException("invalid group argument: " + g);
}
}
}
@ -105,7 +104,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
try {
return new PatternTokenizer(in, pattern, group);
} catch( IOException ex ) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, ex );
throw new InitializationException("IOException thrown creating PatternTokenizer instance", ex);
}
}
}

View File

@ -33,7 +33,6 @@ import org.apache.commons.codec.language.RefinedSoundex;
import org.apache.commons.codec.language.Soundex;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.phonetic.PhoneticFilter;
import org.apache.solr.common.SolrException;
/**
* Factory for {@link PhoneticFilter}.
@ -87,8 +86,8 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
String name = args.get( ENCODER );
if( name == null ) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Missing required parameter: "+ENCODER
+" ["+registry.keySet()+"]" );
throw new InitializationException("Missing required parameter: " + ENCODER
+ " [" + registry.keySet() + "]");
}
Class<? extends Encoder> clazz = registry.get(name.toUpperCase(Locale.ENGLISH));
if( clazz == null ) {
@ -111,7 +110,7 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
}
}
catch (Exception e) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Error initializing: "+name + "/"+clazz, e);
throw new InitializationException("Error initializing: " + name + "/" + clazz, e);
}
}
@ -123,11 +122,11 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
try {
clazz = lookupEncoder(name);
} catch (ClassNotFoundException cnfe) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Unknown encoder: "+name +" ["+registry.keySet()+"]" );
throw new InitializationException("Unknown encoder: " + name + " [" + registry.keySet() + "]");
}
}
catch (ClassCastException e) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Not an encoder: "+name +" ["+registry.keySet()+"]" );
throw new InitializationException("Not an encoder: " + name + " [" + registry.keySet() + "]");
}
return clazz;
}

View File

@ -21,8 +21,6 @@ import java.io.Reader;
import java.util.Map;
import org.apache.lucene.analysis.ru.RussianLetterTokenizer;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
/** @deprecated Use {@link StandardTokenizerFactory} instead.
* This tokenizer has no Russian-specific functionality.
@ -34,7 +32,7 @@ public class RussianLetterTokenizerFactory extends BaseTokenizerFactory {
public void init(Map<String, String> args) {
super.init(args);
if (args.containsKey("charset"))
throw new SolrException(ErrorCode.SERVER_ERROR,
throw new InitializationException(
"The charset parameter is no longer supported. "
+ "Please process your documents as Unicode instead.");
assureMatchVersion();

View File

@ -21,8 +21,6 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.shingle.ShingleFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import java.util.Map;
@ -51,20 +49,17 @@ public class ShingleFilterFactory extends BaseTokenFilterFactory {
maxShingleSize = getInt("maxShingleSize",
ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
if (maxShingleSize < 2) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Invalid maxShingleSize (" + maxShingleSize
throw new InitializationException("Invalid maxShingleSize (" + maxShingleSize
+ ") - must be at least 2");
}
minShingleSize = getInt("minShingleSize",
ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
if (minShingleSize < 2) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Invalid minShingleSize (" + minShingleSize
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
+ ") - must be at least 2");
}
if (minShingleSize > maxShingleSize) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Invalid minShingleSize (" + minShingleSize
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
+ ") - must be no greater than maxShingleSize ("
+ maxShingleSize + ")");
}

View File

@ -20,7 +20,6 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.util.plugin.ResourceLoaderAware;
@ -50,7 +49,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
public void inform(ResourceLoader loader) {
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
throw new InitializationException("Missing required argument 'synonyms'.");
boolean ignoreCase = getBoolean("ignoreCase", false);
boolean expand = getBoolean("expand", true);
@ -84,7 +83,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
}
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading synonym rules", e);
}
return wlist;
}
@ -106,7 +105,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
List<List<String>> target;
if (mapping.size() > 2) {
throw new RuntimeException("Invalid Synonym Rule:" + rule);
throw new InitializationException("Invalid Synonym Rule:" + rule);
} else if (mapping.size()==2) {
source = getSynList(mapping.get(0), synSep, tokFactory);
target = getSynList(mapping.get(1), synSep, tokFactory);
@ -160,7 +159,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
tokList.add( termAtt.toString() );
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while tokenizing source", e);
}
finally{
reader.close();

View File

@ -71,7 +71,7 @@ class SlowSynonymMap {
}
if (currMap.synonyms != null && !mergeExisting) {
throw new RuntimeException("SynonymFilter: there is already a mapping for " + singleMatch);
throw new InitializationException("SynonymFilter: there is already a mapping for " + singleMatch);
}
List<Token> superset = currMap.synonyms==null ? replacement :
mergeTokens(Arrays.asList(currMap.synonyms), replacement);

View File

@ -56,7 +56,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
try {
protectedWords = getWordSet(loader, wordFiles, false);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading protected words", e);
}
}
}
@ -72,7 +72,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
try {
stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer");
} catch (ClassNotFoundException e) {
throw new RuntimeException("Can't find class for stemmer language " + language, e);
throw new InitializationException("Can't find class for stemmer language " + language, e);
}
}
@ -81,7 +81,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
try {
program = (SnowballProgram)stemClass.newInstance();
} catch (Exception e) {
throw new RuntimeException("Error instantiating stemmer for language " + language + "from class " +stemClass, e);
throw new InitializationException("Error instantiating stemmer for language " + language + "from class " + stemClass, e);
}
if (protectedWords != null)

View File

@ -61,7 +61,7 @@ public class StemmerOverrideFilterFactory extends BaseTokenFilterFactory impleme
}
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading dictionary", e);
}
}
}

View File

@ -61,7 +61,7 @@ public class StopFilterFactory extends BaseTokenFilterFactory implements Resourc
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading stopwords", e);
}
} else {
stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);

View File

@ -50,7 +50,7 @@ public class SynonymFilterFactory extends BaseTokenFilterFactory implements Reso
// check if you use the new optional arg "format". this makes no sense for the old one,
// as its wired to solr's synonyms format only.
if (args.containsKey("format") && !args.get("format").equals("solr")) {
throw new IllegalArgumentException("You must specify luceneMatchVersion >= 3.4 to use alternate synonyms formats");
throw new InitializationException("You must specify luceneMatchVersion >= 3.4 to use alternate synonyms formats");
}
delegator = new SlowSynonymFilterFactory();
}

View File

@ -21,7 +21,6 @@ import java.util.Map;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
import org.apache.solr.common.SolrException;
/**
* Factory for {@link TrimFilter}.
@ -49,7 +48,7 @@ public class TrimFilterFactory extends BaseTokenFilterFactory {
updateOffsets = Boolean.valueOf( v );
}
catch( Exception ex ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Error reading updateOffsets value. Must be true or false.", ex );
throw new InitializationException("Error reading updateOffsets value. Must be true or false.", ex);
}
}
}

View File

@ -20,7 +20,6 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.TypeTokenFilter;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.util.plugin.ResourceLoaderAware;
@ -58,10 +57,10 @@ public class TypeTokenFilterFactory extends BaseTokenFilterFactory implements Re
}
}
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading types", e);
}
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required parameter: types.");
throw new InitializationException("Missing required parameter: types.");
}
}

View File

@ -63,7 +63,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
try {
protectedWords = getWordSet(loader, wordFiles, false);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException thrown while loading protected words", e);
}
}
String types = args.get(TYPES);
@ -77,7 +77,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
}
typeTable = parseTypes(wlist);
} catch (IOException e) {
throw new RuntimeException(e);
throw new InitializationException("IOException while loading types", e);
}
}
}
@ -132,13 +132,13 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
for( String rule : rules ){
Matcher m = typePattern.matcher(rule);
if( !m.find() )
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]");
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]");
String lhs = parseString(m.group(1).trim());
Byte rhs = parseType(m.group(2).trim());
if (lhs.length() != 1)
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
if (rhs == null)
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
typeMap.put(lhs.charAt(0), rhs);
}
@ -178,7 +178,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
char c = s.charAt( readPos++ );
if( c == '\\' ){
if( readPos >= len )
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
throw new InitializationException("Invalid escaped char in [" + s + "]");
c = s.charAt( readPos++ );
switch( c ) {
case '\\' : c = '\\'; break;
@ -189,7 +189,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
case 'f' : c = '\f'; break;
case 'u' :
if( readPos + 3 >= len )
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
throw new InitializationException("Invalid escaped char in [" + s + "]");
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
readPos += 4;
break;

View File

@ -28,7 +28,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
f.parseString( "\\" );
fail( "escape character cannot be alone." );
}
catch( RuntimeException expected ){}
catch (InitializationException expected) {}
assertEquals( "unexpected escaped characters",
"\\\"\n\t\r\b\f", f.parseString( "\\\\\\\"\\n\\t\\r\\b\\f" ) );
@ -41,7 +41,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
f.parseString( "\\u000" );
fail( "invalid length check." );
}
catch( RuntimeException expected ){}
catch (InitializationException expected) {}
try {
f.parseString( "\\u123x" );

View File

@ -43,7 +43,7 @@ public class TestSynonymMap extends LuceneTestCase {
SlowSynonymFilterFactory.parseRules( rules, synMap, "=>", ",", true, null);
fail( "RuntimeException must be thrown." );
}
catch( RuntimeException expected ){}
catch(InitializationException expected) {}
}
public void testReadMappingRules() throws Exception {

View File

@ -19,7 +19,6 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.SolrException;
import org.apache.solr.core.SolrResourceLoader;
import org.junit.Test;
@ -91,8 +90,8 @@ public class TestTypeTokenFilterFactory extends BaseTokenTestCase {
args.put("enablePositionIncrements", "false");
typeTokenFilterFactory.init(args);
typeTokenFilterFactory.inform(new SolrResourceLoader(null, null));
fail("not supplying 'types' parameter should cause a SolrException");
} catch (SolrException e) {
fail("not supplying 'types' parameter should cause an InitializationException");
} catch (InitializationException e) {
// everything ok
}
}