mirror of https://github.com/apache/lucene.git
add "throws IOException" so that these analyzers compile again against Lucene 1.4
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@150991 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
14f0da2aa2
commit
4006836a60
|
@ -62,6 +62,7 @@ import org.apache.lucene.analysis.de.WordlistLoader;
|
||||||
import org.apache.lucene.analysis.standard.StandardFilter;
|
import org.apache.lucene.analysis.standard.StandardFilter;
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
import java.util.Hashtable;
|
import java.util.Hashtable;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -72,7 +73,7 @@ import java.util.Set;
|
||||||
* will not be indexed at all) and an external list of exclusions (word that will
|
* will not be indexed at all) and an external list of exclusions (word that will
|
||||||
* not be stemmed, but indexed).
|
* not be stemmed, but indexed).
|
||||||
*
|
*
|
||||||
* @author João Kramer
|
* @author Jo<EFBFBD>o Kramer
|
||||||
*/
|
*/
|
||||||
public final class BrazilianAnalyzer extends Analyzer {
|
public final class BrazilianAnalyzer extends Analyzer {
|
||||||
|
|
||||||
|
@ -134,8 +135,8 @@ public final class BrazilianAnalyzer extends Analyzer {
|
||||||
/**
|
/**
|
||||||
* Builds an analyzer with the given stop words.
|
* Builds an analyzer with the given stop words.
|
||||||
*/
|
*/
|
||||||
public BrazilianAnalyzer( File stopwords ) {
|
public BrazilianAnalyzer( File stopwords ) throws IOException {
|
||||||
stoptable = new HashSet(WordlistLoader.getWordtable( stopwords ).keySet());
|
stoptable = WordlistLoader.getWordSet( stopwords );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -153,8 +154,8 @@ public final class BrazilianAnalyzer extends Analyzer {
|
||||||
/**
|
/**
|
||||||
* Builds an exclusionlist from the words contained in the given file.
|
* Builds an exclusionlist from the words contained in the given file.
|
||||||
*/
|
*/
|
||||||
public void setStemExclusionTable( File exclusionlist ) {
|
public void setStemExclusionTable( File exclusionlist ) throws IOException {
|
||||||
excltable = new HashSet(WordlistLoader.getWordtable( exclusionlist ).keySet());
|
excltable = WordlistLoader.getWordSet( exclusionlist );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -136,8 +136,8 @@ public final class CzechAnalyzer extends Analyzer {
|
||||||
/**
|
/**
|
||||||
* Builds an analyzer with the given stop words.
|
* Builds an analyzer with the given stop words.
|
||||||
*/
|
*/
|
||||||
public CzechAnalyzer( File stopwords ) {
|
public CzechAnalyzer( File stopwords ) throws IOException {
|
||||||
stoptable = new HashSet(WordlistLoader.getWordtable( stopwords ).keySet());
|
stoptable = WordlistLoader.getWordSet( stopwords );
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -182,7 +182,7 @@ public final class CzechAnalyzer extends Analyzer {
|
||||||
TokenStream result = new StandardTokenizer( reader );
|
TokenStream result = new StandardTokenizer( reader );
|
||||||
result = new StandardFilter( result );
|
result = new StandardFilter( result );
|
||||||
result = new LowerCaseFilter( result );
|
result = new LowerCaseFilter( result );
|
||||||
result = new StopFilter( result, stoptable );
|
result = new StopFilter( result, stoptable );
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue