fix last attempted AttributeFactory link fix and update some other simple javadoc

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@807841 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2009-08-25 22:27:31 +00:00
parent 235791086f
commit a1bd9277ca
8 changed files with 8 additions and 16 deletions

View File

@ -157,7 +157,7 @@ public class WikipediaTokenizer extends Tokenizer {
/**
* Createa a new instance of the {@link org.apache.lucene.wikipedia.analysis.WikipediaTokenizer}. Attaches the
* <conde>input</code> to a the newly created JFlex scanner. Uses the given {@link AttributeSource.AttributeFactory}.
* <conde>input</code> to a the newly created JFlex scanner. Uses the given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}.
*
* @param input The input
* @param tokenOutput One of {@link #TOKENS_ONLY}, {@link #UNTOKENIZED_ONLY}, {@link #BOTH}

View File

@ -20,7 +20,6 @@ package org.apache.lucene.analysis;
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.AttributeSource.AttributeFactory;
/** A LetterTokenizer is a tokenizer that divides text at non-letters. That's
to say, it defines tokens as maximal strings of adjacent letters, as defined
@ -40,7 +39,7 @@ public class LetterTokenizer extends CharTokenizer {
super(source, in);
}
/** Construct a new LetterTokenizer using a given {@link AttributeSource.AttributeFactory}. */
/** Construct a new LetterTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public LetterTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}

View File

@ -20,7 +20,6 @@ package org.apache.lucene.analysis;
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.AttributeSource.AttributeFactory;
/**
* LowerCaseTokenizer performs the function of LetterTokenizer
@ -43,7 +42,7 @@ public final class LowerCaseTokenizer extends LetterTokenizer {
super(source, in);
}
/** Construct a new LowerCaseTokenizer using a given {@link AttributeSource.AttributeFactory}. */
/** Construct a new LowerCaseTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public LowerCaseTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}

View File

@ -20,7 +20,6 @@ package org.apache.lucene.analysis;
import java.io.Reader;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.AttributeSource.AttributeFactory;
/** A WhitespaceTokenizer is a tokenizer that divides text at whitespace.
* Adjacent sequences of non-Whitespace characters form tokens. */
@ -36,7 +35,7 @@ public class WhitespaceTokenizer extends CharTokenizer {
super(source, in);
}
/** Construct a new WhitespaceTokenizer using a given {@link AttributeSource.AttributeFactory}. */
/** Construct a new WhitespaceTokenizer using a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}. */
public WhitespaceTokenizer(AttributeFactory factory, Reader in) {
super(factory, in);
}

View File

@ -142,7 +142,7 @@ public class StandardTokenizer extends Tokenizer {
}
/**
* Creates a new StandardTokenizer with a given {@link AttributeSource.AttributeFactory}
* Creates a new StandardTokenizer with a given {@link org.apache.lucene.util.AttributeSource.AttributeFactory}
*/
public StandardTokenizer(AttributeFactory factory, Reader input, boolean replaceInvalidAcronym) {
super(factory);

View File

@ -619,10 +619,10 @@ public interface FieldCache {
/**
* If non-null, FieldCacheImpl will warn whenever
* entries are created that are not sane according to
* {@link FieldCacheSanityChecker}.
* {@link org.apache.lucene.util.FieldCacheSanityChecker}.
*/
public void setInfoStream(PrintStream stream);
/** @see setInfoStream */
/** counterpart of {@link #setInfoStream(PrintStream)} */
public PrintStream getInfoStream();
}

View File

@ -543,7 +543,7 @@ public abstract class Similarity implements Serializable {
*
* @param terms the terms in the phrase
* @param searcher the document collection being searched
* @return
* @return idf score factor
* @deprecated see {@link #idfExplain(Collection, Searcher)}
*/
public float idf(Collection terms, Searcher searcher) throws IOException {

View File

@ -51,11 +51,6 @@ public abstract class Weight implements Serializable {
/**
* An explanation of the score computation for the named document.
*
* Until 3.0, null may be passed in situations where the Searcher is not
* available, so impls must only use Searcher to generate optional
* explain info.
*
* @param searcher over the index or null
* @param reader sub-reader containing the give doc
* @param doc
* @return an Explanation for the score