small javadoc fixes

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@178832 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Daniel Naber 2005-05-27 23:00:49 +00:00
parent f4e819c006
commit 816f370c0e
4 changed files with 14 additions and 13 deletions

View File

@ -19,7 +19,8 @@ package org.apache.lucene.analysis;
import java.io.Reader;
/**
* "Tokenizes" the entire stream as a single token.
* "Tokenizes" the entire stream as a single token. This is useful
* for data like zip codes, ids, and some product names.
*/
public class KeywordAnalyzer extends Analyzer {
public TokenStream tokenStream(String fieldName,

View File

@ -31,16 +31,16 @@ import java.util.HashSet;
import java.util.Set;
/**
* Analyzer for brazilian language. Supports an external list of stopwords (words that
* Analyzer for Brazilian language. Supports an external list of stopwords (words that
* will not be indexed at all) and an external list of exclusions (word that will
* not be stemmed, but indexed).
*
* @author Jo<EFBFBD>o Kramer
* @author Jo&atilde;o Kramer
*/
public final class BrazilianAnalyzer extends Analyzer {
/**
* List of typical german stopwords.
* List of typical Brazilian stopwords.
*/
public final static String[] BRAZILIAN_STOP_WORDS = {
"a","ainda","alem","ambas","ambos","antes",
@ -68,6 +68,7 @@ public final class BrazilianAnalyzer extends Analyzer {
* Contains the stopwords used with the StopFilter.
*/
private Set stoptable = new HashSet();
/**
* Contains words that should be indexed but not stemmed.
*/

View File

@ -105,7 +105,7 @@ public final class CzechAnalyzer extends Analyzer {
/**
* Loads stopwords hash from resource stream (file, database...).
* @param wordfile File containing the wordlist
* @param encoding Encoding used (win-1250, iso-8859-2, ...}, null for default system encoding
* @param encoding Encoding used (win-1250, iso-8859-2, ...), null for default system encoding
*/
public void loadStopWords( InputStream wordfile, String encoding ) {
if ( wordfile == null ) {
@ -122,7 +122,6 @@ public final class CzechAnalyzer extends Analyzer {
else
isr = new InputStreamReader(wordfile, encoding);
LineNumberReader lnr = new LineNumberReader(isr);
String word;
while ( ( word = lnr.readLine() ) != null ) {
@ -138,7 +137,7 @@ public final class CzechAnalyzer extends Analyzer {
* Creates a TokenStream which tokenizes all the text in the provided Reader.
*
* @return A TokenStream build from a StandardTokenizer filtered with
* StandardFilter, StopFilter, GermanStemFilter and LowerCaseFilter
* StandardFilter, LowerCaseFilter, and StopFilter
*/
public final TokenStream tokenStream( String fieldName, Reader reader ) {
TokenStream result = new StandardTokenizer( reader );

View File

@ -32,19 +32,19 @@ import java.util.Hashtable;
import java.util.Set;
/**
* Analyzer for french language. Supports an external list of stopwords (words that
* Analyzer for French language. Supports an external list of stopwords (words that
* will not be indexed at all) and an external list of exclusions (word that will
* not be stemmed, but indexed).
* A default set of stopwords is used unless an other list is specified, the
* exclusionlist is empty by default.
* A default set of stopwords is used unless an alternative list is specified, the
* exclusion list is empty by default.
*
* @author Patrick Talbot (based on Gerhard Schwarz work for German)
* @author Patrick Talbot (based on Gerhard Schwarz's work for German)
* @version $Id$
*/
public final class FrenchAnalyzer extends Analyzer {
/**
* Extended list of typical french stopwords.
* Extended list of typical French stopwords.
*/
public final static String[] FRENCH_STOP_WORDS = {
"a", "afin", "ai", "ainsi", "après", "attendu", "au", "aujourd", "auquel", "aussi",
@ -142,7 +142,7 @@ public final class FrenchAnalyzer extends Analyzer {
public final TokenStream tokenStream(String fieldName, Reader reader) {
if (fieldName == null) throw new IllegalArgumentException("fieldName must not be null");
if (reader == null) throw new IllegalArgumentException("readermust not be null");
if (reader == null) throw new IllegalArgumentException("reader must not be null");
TokenStream result = new StandardTokenizer(reader);
result = new StandardFilter(result);