add "throws IOException" so that these analyzers compile again against Lucene 1.4

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@150991 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Daniel Naber 2004-08-04 17:57:14 +00:00
parent 14f0da2aa2
commit 4006836a60
2 changed files with 9 additions and 8 deletions

View File

@ -62,6 +62,7 @@ import org.apache.lucene.analysis.de.WordlistLoader;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.util.Hashtable;
import java.util.HashSet;
@ -72,7 +73,7 @@ import java.util.Set;
* will not be indexed at all) and an external list of exclusions (word that will
* not be stemmed, but indexed).
*
* @author João Kramer
* @author Jo<EFBFBD>o Kramer
*/
public final class BrazilianAnalyzer extends Analyzer {
@ -134,8 +135,8 @@ public final class BrazilianAnalyzer extends Analyzer {
/**
* Builds an analyzer with the given stop words.
*/
public BrazilianAnalyzer( File stopwords ) {
stoptable = new HashSet(WordlistLoader.getWordtable( stopwords ).keySet());
public BrazilianAnalyzer( File stopwords ) throws IOException {
stoptable = WordlistLoader.getWordSet( stopwords );
}
/**
@ -153,8 +154,8 @@ public final class BrazilianAnalyzer extends Analyzer {
/**
* Builds an exclusionlist from the words contained in the given file.
*/
public void setStemExclusionTable( File exclusionlist ) {
excltable = new HashSet(WordlistLoader.getWordtable( exclusionlist ).keySet());
public void setStemExclusionTable( File exclusionlist ) throws IOException {
excltable = WordlistLoader.getWordSet( exclusionlist );
}
/**

View File

@ -136,8 +136,8 @@ public final class CzechAnalyzer extends Analyzer {
/**
* Builds an analyzer with the given stop words.
*/
public CzechAnalyzer( File stopwords ) {
stoptable = new HashSet(WordlistLoader.getWordtable( stopwords ).keySet());
public CzechAnalyzer( File stopwords ) throws IOException {
stoptable = WordlistLoader.getWordSet( stopwords );
}
/**
@ -182,7 +182,7 @@ public final class CzechAnalyzer extends Analyzer {
TokenStream result = new StandardTokenizer( reader );
result = new StandardFilter( result );
result = new LowerCaseFilter( result );
result = new StopFilter( result, stoptable );
result = new StopFilter( result, stoptable );
return result;
}
}