mirror of https://github.com/apache/lucene.git
SOLR-3363: Consolidated Analysis Factory Exceptions
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1329536 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d176899fea
commit
7f45484847
|
@ -271,6 +271,9 @@ New Features
|
||||||
field content that was already processed and split into tokens using some external processing
|
field content that was already processed and split into tokens using some external processing
|
||||||
chain. Serialization format is pluggable, and defaults to JSON. (ab)
|
chain. Serialization format is pluggable, and defaults to JSON. (ab)
|
||||||
|
|
||||||
|
* SOLR-3363: Consolidated Exceptions in Analysis Factories so they only throw
|
||||||
|
InitalizationExceptions (Chris Male)
|
||||||
|
|
||||||
Optimizations
|
Optimizations
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ public abstract class BaseCharFilterFactory implements CharFilterFactory {
|
||||||
String s = args.get(name);
|
String s = args.get(name);
|
||||||
if (s==null) {
|
if (s==null) {
|
||||||
if (useDefault) return defaultVal;
|
if (useDefault) return defaultVal;
|
||||||
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
|
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||||
}
|
}
|
||||||
return Integer.parseInt(s);
|
return Integer.parseInt(s);
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ abstract class BaseTokenStreamFactory {
|
||||||
* to inform user, that for this factory a {@link #luceneMatchVersion} is required */
|
* to inform user, that for this factory a {@link #luceneMatchVersion} is required */
|
||||||
protected final void assureMatchVersion() {
|
protected final void assureMatchVersion() {
|
||||||
if (luceneMatchVersion == null) {
|
if (luceneMatchVersion == null) {
|
||||||
throw new RuntimeException("Configuration Error: Factory '" + this.getClass().getName() +
|
throw new InitializationException("Configuration Error: Factory '" + this.getClass().getName() +
|
||||||
"' needs a 'luceneMatchVersion' parameter");
|
"' needs a 'luceneMatchVersion' parameter");
|
||||||
} else if (!luceneMatchVersion.onOrAfter(Version.LUCENE_40)) {
|
} else if (!luceneMatchVersion.onOrAfter(Version.LUCENE_40)) {
|
||||||
log.warn(getClass().getSimpleName() + " is using deprecated " + luceneMatchVersion +
|
log.warn(getClass().getSimpleName() + " is using deprecated " + luceneMatchVersion +
|
||||||
|
@ -100,7 +100,7 @@ abstract class BaseTokenStreamFactory {
|
||||||
String s = args.get(name);
|
String s = args.get(name);
|
||||||
if (s==null) {
|
if (s==null) {
|
||||||
if (useDefault) return defaultVal;
|
if (useDefault) return defaultVal;
|
||||||
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
|
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||||
}
|
}
|
||||||
return Integer.parseInt(s);
|
return Integer.parseInt(s);
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ abstract class BaseTokenStreamFactory {
|
||||||
String s = args.get(name);
|
String s = args.get(name);
|
||||||
if (s==null) {
|
if (s==null) {
|
||||||
if (useDefault) return defaultVal;
|
if (useDefault) return defaultVal;
|
||||||
throw new RuntimeException("Configuration Error: missing parameter '" + name + "'");
|
throw new InitializationException("Configuration Error: missing parameter '" + name + "'");
|
||||||
}
|
}
|
||||||
return Boolean.parseBoolean(s);
|
return Boolean.parseBoolean(s);
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,8 +28,6 @@ import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.collation.CollationKeyFilter;
|
import org.apache.lucene.collation.CollationKeyFilter;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.SolrException.ErrorCode;
|
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -84,11 +82,11 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
|
||||||
String decomposition = args.get("decomposition");
|
String decomposition = args.get("decomposition");
|
||||||
|
|
||||||
if (custom == null && language == null)
|
if (custom == null && language == null)
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Either custom or language is required.");
|
throw new InitializationException("Either custom or language is required.");
|
||||||
|
|
||||||
if (custom != null &&
|
if (custom != null &&
|
||||||
(language != null || country != null || variant != null))
|
(language != null || country != null || variant != null))
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Cannot specify both language and custom. "
|
throw new InitializationException("Cannot specify both language and custom. "
|
||||||
+ "To tailor rules for a built-in language, see the javadocs for RuleBasedCollator. "
|
+ "To tailor rules for a built-in language, see the javadocs for RuleBasedCollator. "
|
||||||
+ "Then save the entire customized ruleset to a file, and use with the custom parameter");
|
+ "Then save the entire customized ruleset to a file, and use with the custom parameter");
|
||||||
|
|
||||||
|
@ -111,7 +109,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
|
||||||
else if (strength.equalsIgnoreCase("identical"))
|
else if (strength.equalsIgnoreCase("identical"))
|
||||||
collator.setStrength(Collator.IDENTICAL);
|
collator.setStrength(Collator.IDENTICAL);
|
||||||
else
|
else
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid strength: " + strength);
|
throw new InitializationException("Invalid strength: " + strength);
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the decomposition flag, otherwise it will be the default.
|
// set the decomposition flag, otherwise it will be the default.
|
||||||
|
@ -123,7 +121,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
|
||||||
else if (decomposition.equalsIgnoreCase("full"))
|
else if (decomposition.equalsIgnoreCase("full"))
|
||||||
collator.setDecomposition(Collator.FULL_DECOMPOSITION);
|
collator.setDecomposition(Collator.FULL_DECOMPOSITION);
|
||||||
else
|
else
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid decomposition: " + decomposition);
|
throw new InitializationException("Invalid decomposition: " + decomposition);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,8 +137,7 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
|
||||||
Locale locale;
|
Locale locale;
|
||||||
|
|
||||||
if (language != null && country == null && variant != null)
|
if (language != null && country == null && variant != null)
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException("To specify variant, country is required");
|
||||||
"To specify variant, country is required");
|
|
||||||
else if (language != null && country != null && variant != null)
|
else if (language != null && country != null && variant != null)
|
||||||
locale = new Locale(language, country, variant);
|
locale = new Locale(language, country, variant);
|
||||||
else if (language != null && country != null)
|
else if (language != null && country != null)
|
||||||
|
@ -163,10 +160,10 @@ public class CollationKeyFilterFactory extends BaseTokenFilterFactory implements
|
||||||
return new RuleBasedCollator(rules);
|
return new RuleBasedCollator(rules);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// io error
|
// io error
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading rules", e);
|
||||||
} catch (ParseException e) {
|
} catch (ParseException e) {
|
||||||
// invalid rules
|
// invalid rules
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("ParseException thrown while parsing rules", e);
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeQuietly(input);
|
IOUtils.closeQuietly(input);
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class CommonGramsFilterFactory extends BaseTokenFilterFactory implements
|
||||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading common word file", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class CommonGramsQueryFilterFactory extends BaseTokenFilterFactory
|
||||||
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
commonWords = getWordSet(loader, commonWordFiles, ignoreCase);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading common word file", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
commonWords = StopAnalyzer.ENGLISH_STOP_WORDS_SET;
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.analysis.payloads.FloatEncoder;
|
||||||
import org.apache.lucene.analysis.payloads.IntegerEncoder;
|
import org.apache.lucene.analysis.payloads.IntegerEncoder;
|
||||||
import org.apache.lucene.analysis.payloads.IdentityEncoder;
|
import org.apache.lucene.analysis.payloads.IdentityEncoder;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -75,7 +74,7 @@ public class DelimitedPayloadTokenFilterFactory extends BaseTokenFilterFactory i
|
||||||
if (delim.length() == 1) {
|
if (delim.length() == 1) {
|
||||||
delimiter = delim.charAt(0);
|
delimiter = delim.charAt(0);
|
||||||
} else{
|
} else{
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Delimiter must be one character only");
|
throw new InitializationException("Delimiter must be one character only");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ import org.apache.lucene.analysis.compound.*;
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -53,8 +52,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends BaseTokenFilterFac
|
||||||
assureMatchVersion();
|
assureMatchVersion();
|
||||||
dictFile = args.get("dictionary");
|
dictFile = args.get("dictionary");
|
||||||
if (null == dictFile) {
|
if (null == dictFile) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
|
throw new InitializationException("Missing required parameter: dictionary");
|
||||||
"Missing required parameter: dictionary");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
minWordSize= getInt("minWordSize",CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
minWordSize= getInt("minWordSize",CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||||
|
@ -66,7 +64,7 @@ public class DictionaryCompoundWordTokenFilterFactory extends BaseTokenFilterFac
|
||||||
try {
|
try {
|
||||||
dictionary = super.getWordSet(loader, dictFile, false);
|
dictionary = super.getWordSet(loader, dictFile, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading dictionary", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public DictionaryCompoundWordTokenFilter create(TokenStream input) {
|
public DictionaryCompoundWordTokenFilter create(TokenStream input) {
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class ElisionFilterFactory extends BaseTokenFilterFactory implements Reso
|
||||||
try {
|
try {
|
||||||
articles = getWordSet(loader, articlesFile, ignoreCase);
|
articles = getWordSet(loader, articlesFile, ignoreCase);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading articles", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,6 @@ import org.apache.lucene.analysis.synonym.SolrSynonymParser;
|
||||||
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
|
import org.apache.lucene.analysis.synonym.WordnetSynonymParser;
|
||||||
import org.apache.lucene.util.Version;
|
import org.apache.lucene.util.Version;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.util.StrUtils;
|
import org.apache.solr.common.util.StrUtils;
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
|
@ -87,10 +86,10 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
|
||||||
map = loadWordnetSynonyms(loader, true, analyzer);
|
map = loadWordnetSynonyms(loader, true, analyzer);
|
||||||
} else {
|
} else {
|
||||||
// TODO: somehow make this more pluggable
|
// TODO: somehow make this more pluggable
|
||||||
throw new RuntimeException("Unrecognized synonyms format: " + format);
|
throw new InitializationException("Unrecognized synonyms format: " + format);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("Exception thrown while loading synonyms", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (map.fst == null) {
|
if (map.fst == null) {
|
||||||
|
@ -105,7 +104,7 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
|
||||||
final boolean expand = getBoolean("expand", true);
|
final boolean expand = getBoolean("expand", true);
|
||||||
String synonyms = args.get("synonyms");
|
String synonyms = args.get("synonyms");
|
||||||
if (synonyms == null)
|
if (synonyms == null)
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
|
throw new InitializationException("Missing required argument 'synonyms'.");
|
||||||
|
|
||||||
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
||||||
.onMalformedInput(CodingErrorAction.REPORT)
|
.onMalformedInput(CodingErrorAction.REPORT)
|
||||||
|
@ -133,7 +132,7 @@ final class FSTSynonymFilterFactory extends BaseTokenFilterFactory implements Re
|
||||||
final boolean expand = getBoolean("expand", true);
|
final boolean expand = getBoolean("expand", true);
|
||||||
String synonyms = args.get("synonyms");
|
String synonyms = args.get("synonyms");
|
||||||
if (synonyms == null)
|
if (synonyms == null)
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
|
throw new InitializationException("Missing required argument 'synonyms'.");
|
||||||
|
|
||||||
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
|
||||||
.onMalformedInput(CodingErrorAction.REPORT)
|
.onMalformedInput(CodingErrorAction.REPORT)
|
||||||
|
|
|
@ -22,8 +22,6 @@ import java.util.Map;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
|
import org.apache.lucene.analysis.el.GreekLowerCaseFilter;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.SolrException.ErrorCode;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory for {@link GreekLowerCaseFilter}.
|
* Factory for {@link GreekLowerCaseFilter}.
|
||||||
|
@ -44,7 +42,7 @@ public class GreekLowerCaseFilterFactory extends BaseTokenFilterFactory implemen
|
||||||
super.init(args);
|
super.init(args);
|
||||||
assureMatchVersion();
|
assureMatchVersion();
|
||||||
if (args.containsKey("charset"))
|
if (args.containsKey("charset"))
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException(
|
||||||
"The charset parameter is no longer supported. "
|
"The charset parameter is no longer supported. "
|
||||||
+ "Please process your documents as Unicode instead.");
|
+ "Please process your documents as Unicode instead.");
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,6 @@ import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.hunspell.HunspellDictionary;
|
import org.apache.lucene.analysis.hunspell.HunspellDictionary;
|
||||||
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
|
import org.apache.lucene.analysis.hunspell.HunspellStemFilter;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.SolrException.ErrorCode;
|
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -69,7 +67,7 @@ public class HunspellStemFilterFactory extends BaseTokenFilterFactory implements
|
||||||
if(pic != null) {
|
if(pic != null) {
|
||||||
if(pic.equalsIgnoreCase(TRUE)) ignoreCase = true;
|
if(pic.equalsIgnoreCase(TRUE)) ignoreCase = true;
|
||||||
else if(pic.equalsIgnoreCase(FALSE)) ignoreCase = false;
|
else if(pic.equalsIgnoreCase(FALSE)) ignoreCase = false;
|
||||||
else throw new SolrException(ErrorCode.UNKNOWN, "Unknown value for "+PARAM_IGNORE_CASE+": "+pic+". Must be true or false");
|
else throw new InitializationException("Unknown value for " + PARAM_IGNORE_CASE + ": " + pic + ". Must be true or false");
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
@ -79,7 +77,7 @@ public class HunspellStemFilterFactory extends BaseTokenFilterFactory implements
|
||||||
}
|
}
|
||||||
this.dictionary = new HunspellDictionary(loader.openResource(affixFile), dictionaries, luceneMatchVersion, ignoreCase);
|
this.dictionary = new HunspellDictionary(loader.openResource(affixFile), dictionaries, luceneMatchVersion, ignoreCase);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
|
throw new InitializationException("Unable to load hunspell data! [dictionary=" + args.get("dictionary") + ",affix=" + affixFile + "]", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree;
|
||||||
import org.apache.lucene.analysis.util.CharArraySet;
|
import org.apache.lucene.analysis.util.CharArraySet;
|
||||||
import org.apache.solr.analysis.BaseTokenFilterFactory;
|
import org.apache.solr.analysis.BaseTokenFilterFactory;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -79,8 +78,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends BaseTokenFilterFa
|
||||||
encoding = args.get("encoding");
|
encoding = args.get("encoding");
|
||||||
hypFile = args.get("hyphenator");
|
hypFile = args.get("hyphenator");
|
||||||
if (null == hypFile) {
|
if (null == hypFile) {
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
throw new InitializationException("Missing required parameter: hyphenator");
|
||||||
"Missing required parameter: hyphenator");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
minWordSize = getInt("minWordSize", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
minWordSize = getInt("minWordSize", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||||
|
@ -102,7 +100,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends BaseTokenFilterFa
|
||||||
is.setSystemId(hypFile);
|
is.setSystemId(hypFile);
|
||||||
hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
|
hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
|
||||||
} catch (Exception e) { // TODO: getHyphenationTree really shouldn't throw "Exception"
|
} catch (Exception e) { // TODO: getHyphenationTree really shouldn't throw "Exception"
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("Exception thrown while loading dictionary and hyphenation file", e);
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeQuietly(stream);
|
IOUtils.closeQuietly(stream);
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,32 @@
|
||||||
|
package org.apache.solr.analysis;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
* contributor license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright ownership.
|
||||||
|
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
* (the "License"); you may not use this file except in compliance with
|
||||||
|
* the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Exception representing an error occuring during the initialization of a Factory.
|
||||||
|
*/
|
||||||
|
public class InitializationException extends RuntimeException {
|
||||||
|
|
||||||
|
public InitializationException(String message) {
|
||||||
|
super(message);
|
||||||
|
}
|
||||||
|
|
||||||
|
public InitializationException(String message, Throwable cause) {
|
||||||
|
super(message, cause);
|
||||||
|
}
|
||||||
|
}
|
|
@ -19,7 +19,6 @@ package org.apache.solr.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter;
|
import org.apache.lucene.analysis.ja.JapaneseKatakanaStemFilter;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -44,8 +43,7 @@ public class JapaneseKatakanaStemFilterFactory extends BaseTokenFilterFactory {
|
||||||
super.init(args);
|
super.init(args);
|
||||||
minimumLength = getInt(MINIMUM_LENGTH_PARAM, JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH);
|
minimumLength = getInt(MINIMUM_LENGTH_PARAM, JapaneseKatakanaStemFilter.DEFAULT_MINIMUM_LENGTH);
|
||||||
if (minimumLength < 2) {
|
if (minimumLength < 2) {
|
||||||
throw new SolrException(SolrException.ErrorCode.UNKNOWN,
|
throw new InitializationException("Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
|
||||||
"Illegal " + MINIMUM_LENGTH_PARAM + " " + minimumLength + " (must be 2 or greater)");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class JapanesePartOfSpeechStopFilterFactory extends BaseTokenFilterFactor
|
||||||
stopTags.add(new String(chars));
|
stopTags.add(new String(chars));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading tags", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode;
|
||||||
import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
import org.apache.lucene.analysis.ja.dict.UserDictionary;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -80,7 +79,7 @@ public class JapaneseTokenizerFactory extends BaseTokenizerFactory implements Re
|
||||||
userDictionary = null;
|
userDictionary = null;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
|
throw new InitializationException("Exception thrown while loading dictionary", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class KeepWordFilterFactory extends BaseTokenFilterFactory implements Res
|
||||||
try {
|
try {
|
||||||
words = getWordSet(loader, wordFiles, ignoreCase);
|
words = getWordSet(loader, wordFiles, ignoreCase);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading words", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,7 @@ public class KeywordMarkerFilterFactory extends BaseTokenFilterFactory implement
|
||||||
try {
|
try {
|
||||||
protectedWords = getWordSet(loader, wordFiles, ignoreCase);
|
protectedWords = getWordSet(loader, wordFiles, ignoreCase);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch( IOException e ){
|
catch( IOException e ){
|
||||||
throw new RuntimeException( e );
|
throw new InitializationException("IOException thrown while loading mappings", e);
|
||||||
}
|
}
|
||||||
normMap = new NormalizeCharMap();
|
normMap = new NormalizeCharMap();
|
||||||
parseRules( wlist, normMap );
|
parseRules( wlist, normMap );
|
||||||
|
@ -89,7 +89,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
|
||||||
for( String rule : rules ){
|
for( String rule : rules ){
|
||||||
Matcher m = p.matcher( rule );
|
Matcher m = p.matcher( rule );
|
||||||
if( !m.find() )
|
if( !m.find() )
|
||||||
throw new RuntimeException( "Invalid Mapping Rule : [" + rule + "], file = " + mapping );
|
throw new InitializationException("Invalid Mapping Rule : [" + rule + "], file = " + mapping);
|
||||||
normMap.add( parseString( m.group( 1 ) ), parseString( m.group( 2 ) ) );
|
normMap.add( parseString( m.group( 1 ) ), parseString( m.group( 2 ) ) );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
|
||||||
char c = s.charAt( readPos++ );
|
char c = s.charAt( readPos++ );
|
||||||
if( c == '\\' ){
|
if( c == '\\' ){
|
||||||
if( readPos >= len )
|
if( readPos >= len )
|
||||||
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
|
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||||
c = s.charAt( readPos++ );
|
c = s.charAt( readPos++ );
|
||||||
switch( c ) {
|
switch( c ) {
|
||||||
case '\\' : c = '\\'; break;
|
case '\\' : c = '\\'; break;
|
||||||
|
@ -116,7 +116,7 @@ public class MappingCharFilterFactory extends BaseCharFilterFactory implements
|
||||||
case 'f' : c = '\f'; break;
|
case 'f' : c = '\f'; break;
|
||||||
case 'u' :
|
case 'u' :
|
||||||
if( readPos + 3 >= len )
|
if( readPos + 3 >= len )
|
||||||
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
|
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||||
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
||||||
readPos += 4;
|
readPos += 4;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -51,7 +51,7 @@ public class PathHierarchyTokenizerFactory extends BaseTokenizerFactory {
|
||||||
String v = args.get( "delimiter" );
|
String v = args.get( "delimiter" );
|
||||||
if( v != null ){
|
if( v != null ){
|
||||||
if( v.length() != 1 ){
|
if( v.length() != 1 ){
|
||||||
throw new IllegalArgumentException( "delimiter should be a char. \"" + v + "\" is invalid" );
|
throw new InitializationException("delimiter should be a char. \"" + v + "\" is invalid");
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
delimiter = v.charAt(0);
|
delimiter = v.charAt(0);
|
||||||
|
@ -64,7 +64,7 @@ public class PathHierarchyTokenizerFactory extends BaseTokenizerFactory {
|
||||||
v = args.get( "replace" );
|
v = args.get( "replace" );
|
||||||
if( v != null ){
|
if( v != null ){
|
||||||
if( v.length() != 1 ){
|
if( v.length() != 1 ){
|
||||||
throw new IllegalArgumentException( "replace should be a char. \"" + v + "\" is invalid" );
|
throw new InitializationException("replace should be a char. \"" + v + "\" is invalid");
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
replacement = v.charAt(0);
|
replacement = v.charAt(0);
|
||||||
|
|
|
@ -51,7 +51,7 @@ public class PatternReplaceCharFilterFactory extends BaseCharFilterFactory {
|
||||||
try {
|
try {
|
||||||
p = Pattern.compile(args.get("pattern"));
|
p = Pattern.compile(args.get("pattern"));
|
||||||
} catch (PatternSyntaxException e) {
|
} catch (PatternSyntaxException e) {
|
||||||
throw new RuntimeException
|
throw new InitializationException
|
||||||
("Configuration Error: 'pattern' can not be parsed in " +
|
("Configuration Error: 'pattern' can not be parsed in " +
|
||||||
this.getClass().getName(), e);
|
this.getClass().getName(), e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ public class PatternReplaceFilterFactory extends BaseTokenFilterFactory {
|
||||||
try {
|
try {
|
||||||
p = Pattern.compile(args.get("pattern"));
|
p = Pattern.compile(args.get("pattern"));
|
||||||
} catch (PatternSyntaxException e) {
|
} catch (PatternSyntaxException e) {
|
||||||
throw new RuntimeException
|
throw new InitializationException
|
||||||
("Configuration Error: 'pattern' can not be parsed in " +
|
("Configuration Error: 'pattern' can not be parsed in " +
|
||||||
this.getClass().getName(), e);
|
this.getClass().getName(), e);
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ public class PatternReplaceFilterFactory extends BaseTokenFilterFactory {
|
||||||
if (r.equals("first")) {
|
if (r.equals("first")) {
|
||||||
all = false;
|
all = false;
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException
|
throw new InitializationException
|
||||||
("Configuration Error: 'replace' must be 'first' or 'all' in "
|
("Configuration Error: 'replace' must be 'first' or 'all' in "
|
||||||
+ this.getClass().getName());
|
+ this.getClass().getName());
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@ import java.util.regex.Pattern;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -81,7 +80,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
|
||||||
this.args = args;
|
this.args = args;
|
||||||
String regex = args.get( PATTERN );
|
String regex = args.get( PATTERN );
|
||||||
if( regex == null ) {
|
if( regex == null ) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "missing required argument: "+PATTERN );
|
throw new InitializationException("missing required argument: " + PATTERN);
|
||||||
}
|
}
|
||||||
int flags = 0; // TODO? -- read flags from config CASE_INSENSITIVE, etc
|
int flags = 0; // TODO? -- read flags from config CASE_INSENSITIVE, etc
|
||||||
pattern = Pattern.compile( regex, flags );
|
pattern = Pattern.compile( regex, flags );
|
||||||
|
@ -93,7 +92,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
|
||||||
group = Integer.parseInt( g );
|
group = Integer.parseInt( g );
|
||||||
}
|
}
|
||||||
catch( Exception ex ) {
|
catch( Exception ex ) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "invalid group argument: "+g );
|
throw new InitializationException("invalid group argument: " + g);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -105,7 +104,7 @@ public class PatternTokenizerFactory extends BaseTokenizerFactory
|
||||||
try {
|
try {
|
||||||
return new PatternTokenizer(in, pattern, group);
|
return new PatternTokenizer(in, pattern, group);
|
||||||
} catch( IOException ex ) {
|
} catch( IOException ex ) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, ex );
|
throw new InitializationException("IOException thrown creating PatternTokenizer instance", ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.commons.codec.language.RefinedSoundex;
|
||||||
import org.apache.commons.codec.language.Soundex;
|
import org.apache.commons.codec.language.Soundex;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.phonetic.PhoneticFilter;
|
import org.apache.lucene.analysis.phonetic.PhoneticFilter;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory for {@link PhoneticFilter}.
|
* Factory for {@link PhoneticFilter}.
|
||||||
|
@ -87,8 +86,8 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
|
||||||
|
|
||||||
String name = args.get( ENCODER );
|
String name = args.get( ENCODER );
|
||||||
if( name == null ) {
|
if( name == null ) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Missing required parameter: "+ENCODER
|
throw new InitializationException("Missing required parameter: " + ENCODER
|
||||||
+" ["+registry.keySet()+"]" );
|
+ " [" + registry.keySet() + "]");
|
||||||
}
|
}
|
||||||
Class<? extends Encoder> clazz = registry.get(name.toUpperCase(Locale.ENGLISH));
|
Class<? extends Encoder> clazz = registry.get(name.toUpperCase(Locale.ENGLISH));
|
||||||
if( clazz == null ) {
|
if( clazz == null ) {
|
||||||
|
@ -111,7 +110,7 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (Exception e) {
|
catch (Exception e) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Error initializing: "+name + "/"+clazz, e);
|
throw new InitializationException("Error initializing: " + name + "/" + clazz, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,11 +122,11 @@ public class PhoneticFilterFactory extends BaseTokenFilterFactory
|
||||||
try {
|
try {
|
||||||
clazz = lookupEncoder(name);
|
clazz = lookupEncoder(name);
|
||||||
} catch (ClassNotFoundException cnfe) {
|
} catch (ClassNotFoundException cnfe) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Unknown encoder: "+name +" ["+registry.keySet()+"]" );
|
throw new InitializationException("Unknown encoder: " + name + " [" + registry.keySet() + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (ClassCastException e) {
|
catch (ClassCastException e) {
|
||||||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "Not an encoder: "+name +" ["+registry.keySet()+"]" );
|
throw new InitializationException("Not an encoder: " + name + " [" + registry.keySet() + "]");
|
||||||
}
|
}
|
||||||
return clazz;
|
return clazz;
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,8 +21,6 @@ import java.io.Reader;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.ru.RussianLetterTokenizer;
|
import org.apache.lucene.analysis.ru.RussianLetterTokenizer;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.SolrException.ErrorCode;
|
|
||||||
|
|
||||||
/** @deprecated Use {@link StandardTokenizerFactory} instead.
|
/** @deprecated Use {@link StandardTokenizerFactory} instead.
|
||||||
* This tokenizer has no Russian-specific functionality.
|
* This tokenizer has no Russian-specific functionality.
|
||||||
|
@ -34,7 +32,7 @@ public class RussianLetterTokenizerFactory extends BaseTokenizerFactory {
|
||||||
public void init(Map<String, String> args) {
|
public void init(Map<String, String> args) {
|
||||||
super.init(args);
|
super.init(args);
|
||||||
if (args.containsKey("charset"))
|
if (args.containsKey("charset"))
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException(
|
||||||
"The charset parameter is no longer supported. "
|
"The charset parameter is no longer supported. "
|
||||||
+ "Please process your documents as Unicode instead.");
|
+ "Please process your documents as Unicode instead.");
|
||||||
assureMatchVersion();
|
assureMatchVersion();
|
||||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.solr.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.SolrException.ErrorCode;
|
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -51,20 +49,17 @@ public class ShingleFilterFactory extends BaseTokenFilterFactory {
|
||||||
maxShingleSize = getInt("maxShingleSize",
|
maxShingleSize = getInt("maxShingleSize",
|
||||||
ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
|
ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE);
|
||||||
if (maxShingleSize < 2) {
|
if (maxShingleSize < 2) {
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException("Invalid maxShingleSize (" + maxShingleSize
|
||||||
"Invalid maxShingleSize (" + maxShingleSize
|
|
||||||
+ ") - must be at least 2");
|
+ ") - must be at least 2");
|
||||||
}
|
}
|
||||||
minShingleSize = getInt("minShingleSize",
|
minShingleSize = getInt("minShingleSize",
|
||||||
ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
|
ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE);
|
||||||
if (minShingleSize < 2) {
|
if (minShingleSize < 2) {
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
|
||||||
"Invalid minShingleSize (" + minShingleSize
|
|
||||||
+ ") - must be at least 2");
|
+ ") - must be at least 2");
|
||||||
}
|
}
|
||||||
if (minShingleSize > maxShingleSize) {
|
if (minShingleSize > maxShingleSize) {
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
throw new InitializationException("Invalid minShingleSize (" + minShingleSize
|
||||||
"Invalid minShingleSize (" + minShingleSize
|
|
||||||
+ ") - must be no greater than maxShingleSize ("
|
+ ") - must be no greater than maxShingleSize ("
|
||||||
+ maxShingleSize + ")");
|
+ maxShingleSize + ")");
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.solr.analysis;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.util.StrUtils;
|
import org.apache.solr.common.util.StrUtils;
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
|
@ -50,7 +49,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
|
||||||
public void inform(ResourceLoader loader) {
|
public void inform(ResourceLoader loader) {
|
||||||
String synonyms = args.get("synonyms");
|
String synonyms = args.get("synonyms");
|
||||||
if (synonyms == null)
|
if (synonyms == null)
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required argument 'synonyms'.");
|
throw new InitializationException("Missing required argument 'synonyms'.");
|
||||||
boolean ignoreCase = getBoolean("ignoreCase", false);
|
boolean ignoreCase = getBoolean("ignoreCase", false);
|
||||||
boolean expand = getBoolean("expand", true);
|
boolean expand = getBoolean("expand", true);
|
||||||
|
|
||||||
|
@ -84,7 +83,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading synonym rules", e);
|
||||||
}
|
}
|
||||||
return wlist;
|
return wlist;
|
||||||
}
|
}
|
||||||
|
@ -106,7 +105,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
|
||||||
List<List<String>> target;
|
List<List<String>> target;
|
||||||
|
|
||||||
if (mapping.size() > 2) {
|
if (mapping.size() > 2) {
|
||||||
throw new RuntimeException("Invalid Synonym Rule:" + rule);
|
throw new InitializationException("Invalid Synonym Rule:" + rule);
|
||||||
} else if (mapping.size()==2) {
|
} else if (mapping.size()==2) {
|
||||||
source = getSynList(mapping.get(0), synSep, tokFactory);
|
source = getSynList(mapping.get(0), synSep, tokFactory);
|
||||||
target = getSynList(mapping.get(1), synSep, tokFactory);
|
target = getSynList(mapping.get(1), synSep, tokFactory);
|
||||||
|
@ -160,7 +159,7 @@ final class SlowSynonymFilterFactory extends BaseTokenFilterFactory implements R
|
||||||
tokList.add( termAtt.toString() );
|
tokList.add( termAtt.toString() );
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while tokenizing source", e);
|
||||||
}
|
}
|
||||||
finally{
|
finally{
|
||||||
reader.close();
|
reader.close();
|
||||||
|
|
|
@ -71,7 +71,7 @@ class SlowSynonymMap {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (currMap.synonyms != null && !mergeExisting) {
|
if (currMap.synonyms != null && !mergeExisting) {
|
||||||
throw new RuntimeException("SynonymFilter: there is already a mapping for " + singleMatch);
|
throw new InitializationException("SynonymFilter: there is already a mapping for " + singleMatch);
|
||||||
}
|
}
|
||||||
List<Token> superset = currMap.synonyms==null ? replacement :
|
List<Token> superset = currMap.synonyms==null ? replacement :
|
||||||
mergeTokens(Arrays.asList(currMap.synonyms), replacement);
|
mergeTokens(Arrays.asList(currMap.synonyms), replacement);
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
|
||||||
try {
|
try {
|
||||||
protectedWords = getWordSet(loader, wordFiles, false);
|
protectedWords = getWordSet(loader, wordFiles, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
|
||||||
try {
|
try {
|
||||||
stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer");
|
stemClass = Class.forName("org.tartarus.snowball.ext." + language + "Stemmer");
|
||||||
} catch (ClassNotFoundException e) {
|
} catch (ClassNotFoundException e) {
|
||||||
throw new RuntimeException("Can't find class for stemmer language " + language, e);
|
throw new InitializationException("Can't find class for stemmer language " + language, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ public class SnowballPorterFilterFactory extends BaseTokenFilterFactory implemen
|
||||||
try {
|
try {
|
||||||
program = (SnowballProgram)stemClass.newInstance();
|
program = (SnowballProgram)stemClass.newInstance();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException("Error instantiating stemmer for language " + language + "from class " +stemClass, e);
|
throw new InitializationException("Error instantiating stemmer for language " + language + "from class " + stemClass, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (protectedWords != null)
|
if (protectedWords != null)
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class StemmerOverrideFilterFactory extends BaseTokenFilterFactory impleme
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading dictionary", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class StopFilterFactory extends BaseTokenFilterFactory implements Resourc
|
||||||
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
|
stopWords = getWordSet(loader, stopWordFiles, ignoreCase);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading stopwords", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
stopWords = new CharArraySet(luceneMatchVersion, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase);
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class SynonymFilterFactory extends BaseTokenFilterFactory implements Reso
|
||||||
// check if you use the new optional arg "format". this makes no sense for the old one,
|
// check if you use the new optional arg "format". this makes no sense for the old one,
|
||||||
// as its wired to solr's synonyms format only.
|
// as its wired to solr's synonyms format only.
|
||||||
if (args.containsKey("format") && !args.get("format").equals("solr")) {
|
if (args.containsKey("format") && !args.get("format").equals("solr")) {
|
||||||
throw new IllegalArgumentException("You must specify luceneMatchVersion >= 3.4 to use alternate synonyms formats");
|
throw new InitializationException("You must specify luceneMatchVersion >= 3.4 to use alternate synonyms formats");
|
||||||
}
|
}
|
||||||
delegator = new SlowSynonymFilterFactory();
|
delegator = new SlowSynonymFilterFactory();
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@ import java.util.Map;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory for {@link TrimFilter}.
|
* Factory for {@link TrimFilter}.
|
||||||
|
@ -49,7 +48,7 @@ public class TrimFilterFactory extends BaseTokenFilterFactory {
|
||||||
updateOffsets = Boolean.valueOf( v );
|
updateOffsets = Boolean.valueOf( v );
|
||||||
}
|
}
|
||||||
catch( Exception ex ) {
|
catch( Exception ex ) {
|
||||||
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Error reading updateOffsets value. Must be true or false.", ex );
|
throw new InitializationException("Error reading updateOffsets value. Must be true or false.", ex);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.solr.analysis;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.core.TypeTokenFilter;
|
import org.apache.lucene.analysis.core.TypeTokenFilter;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.common.util.StrUtils;
|
import org.apache.solr.common.util.StrUtils;
|
||||||
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
import org.apache.solr.util.plugin.ResourceLoaderAware;
|
||||||
|
|
||||||
|
@ -58,10 +57,10 @@ public class TypeTokenFilterFactory extends BaseTokenFilterFactory implements Re
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading types", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Missing required parameter: types.");
|
throw new InitializationException("Missing required parameter: types.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
|
||||||
try {
|
try {
|
||||||
protectedWords = getWordSet(loader, wordFiles, false);
|
protectedWords = getWordSet(loader, wordFiles, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException thrown while loading protected words", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
String types = args.get(TYPES);
|
String types = args.get(TYPES);
|
||||||
|
@ -77,7 +77,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
|
||||||
}
|
}
|
||||||
typeTable = parseTypes(wlist);
|
typeTable = parseTypes(wlist);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new InitializationException("IOException while loading types", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -132,13 +132,13 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
|
||||||
for( String rule : rules ){
|
for( String rule : rules ){
|
||||||
Matcher m = typePattern.matcher(rule);
|
Matcher m = typePattern.matcher(rule);
|
||||||
if( !m.find() )
|
if( !m.find() )
|
||||||
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]");
|
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]");
|
||||||
String lhs = parseString(m.group(1).trim());
|
String lhs = parseString(m.group(1).trim());
|
||||||
Byte rhs = parseType(m.group(2).trim());
|
Byte rhs = parseType(m.group(2).trim());
|
||||||
if (lhs.length() != 1)
|
if (lhs.length() != 1)
|
||||||
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
|
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Only a single character is allowed.");
|
||||||
if (rhs == null)
|
if (rhs == null)
|
||||||
throw new RuntimeException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
|
throw new InitializationException("Invalid Mapping Rule : [" + rule + "]. Illegal type.");
|
||||||
typeMap.put(lhs.charAt(0), rhs);
|
typeMap.put(lhs.charAt(0), rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
|
||||||
char c = s.charAt( readPos++ );
|
char c = s.charAt( readPos++ );
|
||||||
if( c == '\\' ){
|
if( c == '\\' ){
|
||||||
if( readPos >= len )
|
if( readPos >= len )
|
||||||
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
|
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||||
c = s.charAt( readPos++ );
|
c = s.charAt( readPos++ );
|
||||||
switch( c ) {
|
switch( c ) {
|
||||||
case '\\' : c = '\\'; break;
|
case '\\' : c = '\\'; break;
|
||||||
|
@ -189,7 +189,7 @@ public class WordDelimiterFilterFactory extends BaseTokenFilterFactory implement
|
||||||
case 'f' : c = '\f'; break;
|
case 'f' : c = '\f'; break;
|
||||||
case 'u' :
|
case 'u' :
|
||||||
if( readPos + 3 >= len )
|
if( readPos + 3 >= len )
|
||||||
throw new RuntimeException( "Invalid escaped char in [" + s + "]" );
|
throw new InitializationException("Invalid escaped char in [" + s + "]");
|
||||||
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
c = (char)Integer.parseInt( s.substring( readPos, readPos + 4 ), 16 );
|
||||||
readPos += 4;
|
readPos += 4;
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -28,7 +28,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
|
||||||
f.parseString( "\\" );
|
f.parseString( "\\" );
|
||||||
fail( "escape character cannot be alone." );
|
fail( "escape character cannot be alone." );
|
||||||
}
|
}
|
||||||
catch( RuntimeException expected ){}
|
catch (InitializationException expected) {}
|
||||||
|
|
||||||
assertEquals( "unexpected escaped characters",
|
assertEquals( "unexpected escaped characters",
|
||||||
"\\\"\n\t\r\b\f", f.parseString( "\\\\\\\"\\n\\t\\r\\b\\f" ) );
|
"\\\"\n\t\r\b\f", f.parseString( "\\\\\\\"\\n\\t\\r\\b\\f" ) );
|
||||||
|
@ -41,7 +41,7 @@ public class TestMappingCharFilterFactory extends LuceneTestCase {
|
||||||
f.parseString( "\\u000" );
|
f.parseString( "\\u000" );
|
||||||
fail( "invalid length check." );
|
fail( "invalid length check." );
|
||||||
}
|
}
|
||||||
catch( RuntimeException expected ){}
|
catch (InitializationException expected) {}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
f.parseString( "\\u123x" );
|
f.parseString( "\\u123x" );
|
||||||
|
|
|
@ -43,7 +43,7 @@ public class TestSynonymMap extends LuceneTestCase {
|
||||||
SlowSynonymFilterFactory.parseRules( rules, synMap, "=>", ",", true, null);
|
SlowSynonymFilterFactory.parseRules( rules, synMap, "=>", ",", true, null);
|
||||||
fail( "RuntimeException must be thrown." );
|
fail( "RuntimeException must be thrown." );
|
||||||
}
|
}
|
||||||
catch( RuntimeException expected ){}
|
catch(InitializationException expected) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testReadMappingRules() throws Exception {
|
public void testReadMappingRules() throws Exception {
|
||||||
|
|
|
@ -19,7 +19,6 @@ package org.apache.solr.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.NumericTokenStream;
|
import org.apache.lucene.analysis.NumericTokenStream;
|
||||||
import org.apache.solr.common.ResourceLoader;
|
import org.apache.solr.common.ResourceLoader;
|
||||||
import org.apache.solr.common.SolrException;
|
|
||||||
import org.apache.solr.core.SolrResourceLoader;
|
import org.apache.solr.core.SolrResourceLoader;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -91,8 +90,8 @@ public class TestTypeTokenFilterFactory extends BaseTokenTestCase {
|
||||||
args.put("enablePositionIncrements", "false");
|
args.put("enablePositionIncrements", "false");
|
||||||
typeTokenFilterFactory.init(args);
|
typeTokenFilterFactory.init(args);
|
||||||
typeTokenFilterFactory.inform(new SolrResourceLoader(null, null));
|
typeTokenFilterFactory.inform(new SolrResourceLoader(null, null));
|
||||||
fail("not supplying 'types' parameter should cause a SolrException");
|
fail("not supplying 'types' parameter should cause an InitializationException");
|
||||||
} catch (SolrException e) {
|
} catch (InitializationException e) {
|
||||||
// everything ok
|
// everything ok
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue