tiny whitespace and javadoc fixes

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@347992 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Daniel Naber 2005-11-21 21:41:43 +00:00
parent bfde3257dc
commit dea69e3360
3 changed files with 8 additions and 7 deletions

View File

@ -17,8 +17,8 @@ package org.apache.lucene.analysis;
*/ */
/** /**
* A filter that replaces accented characters in the ISO Latin 1 character set by * A filter that replaces accented characters in the ISO Latin 1 character set
* their unaccented equivalent. The case will not be altered. * (ISO-8859-1) by their unaccented equivalent. The case will not be altered.
* <p> * <p>
* For instance, '&agrave;' will be replaced by 'a'. * For instance, '&agrave;' will be replaced by 'a'.
* <p> * <p>

View File

@ -23,7 +23,8 @@ import java.io.Reader;
* Emits the entire input as a single token. * Emits the entire input as a single token.
*/ */
public class KeywordTokenizer extends Tokenizer { public class KeywordTokenizer extends Tokenizer {
private static final int DEFAULT_BUFFER_SIZE=256;
private static final int DEFAULT_BUFFER_SIZE = 256;
private boolean done; private boolean done;
private final char[] buffer; private final char[] buffer;
@ -34,8 +35,8 @@ public class KeywordTokenizer extends Tokenizer {
public KeywordTokenizer(Reader input, int bufferSize) { public KeywordTokenizer(Reader input, int bufferSize) {
super(input); super(input);
this.buffer=new char[bufferSize]; this.buffer = new char[bufferSize];
this.done=false; this.done = false;
} }
public Token next() throws IOException { public Token next() throws IOException {

View File

@ -37,7 +37,7 @@ public final class LengthFilter extends TokenFilter {
{ {
super(in); super(in);
this.min = min; this.min = min;
this.max =max; this.max = max;
} }
/** /**
@ -49,7 +49,7 @@ public final class LengthFilter extends TokenFilter {
for (Token token = input.next(); token != null; token = input.next()) for (Token token = input.next(); token != null; token = input.next())
{ {
int len = token.termText().length(); int len = token.termText().length();
if ( len >= min && len <= max) { if (len >= min && len <= max) {
return token; return token;
} }
// note: else we ignore it but should we index each part of it? // note: else we ignore it but should we index each part of it?