clear up javadocs warnings/errors

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1003962 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2010-10-03 13:22:51 +00:00
parent 9de7083de0
commit e05117884a
29 changed files with 65 additions and 57 deletions

View File

@ -27,10 +27,10 @@ import java.lang.reflect.Method;
/**
* Implementation tying <a href="http://jakarta.apache.org/regexp">Jakarta
* Regexp</a> to RegexQuery. Jakarta Regepx internally supports a
* {@link #prefix} implementation which can offer performance gains under
* certain circumstances. Yet, the implementation appears to be rather shaky as
* it doesn't always provide a prefix even if one would exist.
* Regexp</a> to RegexQuery. Jakarta Regexp internally supports a
* {@link RegexCapabilities.RegexMatcher#prefix()} implementation which can offer
* performance gains under certain circumstances. Yet, the implementation appears
* to be rather shaky as it doesn't always provide a prefix even if one would exist.
*/
public class JakartaRegexpCapabilities implements RegexCapabilities {
private static Field prefixField;

View File

@ -27,8 +27,9 @@ import org.apache.lucene.util.UnicodeUtil;
* An implementation tying Java's built-in java.util.regex to RegexQuery.
*
* Note that because this implementation currently only returns null from
* {@link #prefix} that queries using this implementation will enumerate and
* attempt to {@link #match} each term for the specified field in the index.
* {@link RegexCapabilities.RegexMatcher#prefix()} that queries using this implementation
* will enumerate and attempt to {@link RegexCapabilities.RegexMatcher#match(BytesRef)} each
* term for the specified field in the index.
*/
public class JavaUtilRegexCapabilities implements RegexCapabilities {
private int flags = 0;

View File

@ -27,7 +27,7 @@ import org.apache.lucene.util.BytesRef;
*/
public interface RegexCapabilities extends Serializable {
/**
* Called by the constructor of {@link RegexTermEnum} allowing
* Called by the constructor of {@link RegexTermsEnum} allowing
* implementations to cache a compiled version of the regular
* expression pattern.
*
@ -38,7 +38,7 @@ public interface RegexCapabilities extends Serializable {
public interface RegexMatcher {
/**
*
* @param string
* @param term The term in bytes.
* @return true if string matches the pattern last passed to {@link #compile}.
*/
public boolean match(BytesRef term);

View File

@ -29,7 +29,7 @@ import java.io.IOException;
* The expressions supported depend on the regular expression implementation
* used by way of the {@link RegexCapabilities} interface.
*
* @see RegexTermEnum
* @see RegexTermsEnum
*/
public class RegexQuery extends MultiTermQuery implements RegexQueryCapable {
private RegexCapabilities regexImpl = new JavaUtilRegexCapabilities();

View File

@ -520,7 +520,6 @@ public final class MoreLikeThis {
*
* @param stopWords set of stopwords, if null it means to allow stop words
*
* @see org.apache.lucene.analysis.StopFilter#makeStopSet StopFilter.makeStopSet()
* @see #getStopWords
*/
public void setStopWords(Set<?> stopWords) {

View File

@ -56,7 +56,7 @@ public class DirectSpellChecker {
* Note: this is the fastest distance metric, because Levenshtein is used
* to draw candidates from the term dictionary: this just re-uses the scoring.
* <p>
* Note also that this metric differs in subtle ways from {@link LevenshteinDistance}:
* Note also that this metric differs in subtle ways from {@link LevensteinDistance}:
* <ul>
* <li> This metric treats full unicode codepoints as characters, but
* LevenshteinDistance calculates based on UTF-16 code units.
@ -286,7 +286,7 @@ public class DirectSpellChecker {
/**
* Calls {@link #suggestSimilar(Term, int, IndexReader, boolean)
* suggestSimilar(term, numSug, ir, false)
* suggestSimilar(term, numSug, ir, false)}
*/
public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir)
throws IOException {
@ -295,7 +295,7 @@ public class DirectSpellChecker {
/**
* Calls {@link #suggestSimilar(Term, int, IndexReader, boolean, float)
* suggestSimilar(term, numSug, ir, morePopular, this.accuracy)
* suggestSimilar(term, numSug, ir, morePopular, this.accuracy)}
*/
public SuggestWord[] suggestSimilar(Term term, int numSug, IndexReader ir,
boolean morePopular) throws IOException {

View File

@ -26,6 +26,8 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Bits;
import org.apache.lucene.index.codecs.standard.StandardPostingsWriter; // javadocs
/** PrefixCodedTermsReader interacts with a single instance
* of this to manage creation of {@link DocsEnum} and
* {@link DocsAndPositionsEnum} instances. It provides an

View File

@ -40,6 +40,8 @@ import org.apache.lucene.util.DoubleBarrelLRUCache;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CodecUtil;
import org.apache.lucene.index.codecs.standard.StandardPostingsReader; // javadocs
/** Handles a terms dict, but decouples all details of
* doc/freqs/positions reading to an instance of {@link
* StandardPostingsReader}. This class is reusable for

View File

@ -19,6 +19,8 @@ package org.apache.lucene.index.codecs;
import org.apache.lucene.index.DocsEnum; // for javadocs
import org.apache.lucene.index.codecs.standard.StandardPostingsReader; // javadocs
/**
* Holds all state required for {@link StandardPostingsReader}
* to produce a {@link DocsEnum} without re-seeking the

View File

@ -867,7 +867,7 @@ public class QueryParser implements QueryParserConstants {
/**
* Builds a new RegexpQuery instance
* @param prefix Regexp term
* @param regexp Regexp term
* @return new RegexpQuery instance
*/
protected Query newRegexpQuery(Term regexp) {

View File

@ -891,7 +891,7 @@ public class QueryParser {
/**
* Builds a new RegexpQuery instance
* @param prefix Regexp term
* @param regexp Regexp term
* @return new RegexpQuery instance
*/
protected Query newRegexpQuery(Term regexp) {

View File

@ -775,7 +775,7 @@ public abstract class Similarity implements Serializable {
/**
* This method forwards to {@link
* idfExplain(Term,Searcher,int)} by passing
* #idfExplain(Term,Searcher,int)} by passing
* <code>searcher.docFreq(term)</code> as the docFreq.
*/
public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException {

View File

@ -199,7 +199,6 @@ implements Serializable {
* Sort by a cached entry value
* @param creator
* @param reverse
* @param sortMissingLast
*/
public SortField( CachedArrayCreator<?> creator, boolean reverse )
{

View File

@ -35,9 +35,8 @@ import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SHIFT;
*
* <p>
* Note: The maximum capacity {@link BytesRef} instance passed to
* {@link #add(BytesRef)} must not be longer than {@link #BYTES_BLOCK_SIZE}-2 (
* {@value #BYTES_BLOCK_SIZE}-2. The internal storage is limited to 2GB total
* byte storage.
* {@link #add(BytesRef)} must not be longer than {@link ByteBlockPool#BYTE_BLOCK_SIZE}-2.
* The internal storage is limited to 2GB totalbyte storage.
* </p>
*
* @lucene.internal
@ -244,8 +243,7 @@ public final class BytesRefHash {
}
/**
* Clears the {@link BytesRef} and returns an {@link Entry} which maps to the
* given {@link BytesRef}
* Clears the {@link BytesRef} which maps to the given {@link BytesRef}
*/
public void clear(boolean resetPool) {
lastCount = count;
@ -306,8 +304,8 @@ public final class BytesRefHash {
* haven't been hashed before.
*
* @throws MaxBytesLengthExceededException
* if the given bytes are > 2 +
* {@link ByteBlockPool#BYTE_BLOCK_SIZE}
* if the given bytes are >
* {@link ByteBlockPool#BYTE_BLOCK_SIZE} - 2
*/
public int add(BytesRef bytes, int code) {
assert bytesStart != null : "Bytesstart is null - not initialized";
@ -496,7 +494,7 @@ public final class BytesRefHash {
/**
* Thrown if a {@link BytesRef} exceeds the {@link BytesRefHash} limit of
* {@link #BYTES_BLOCK_SIZE}-2 ({@value #BYTES_BLOCK_SIZE}-2).
* {@link ByteBlockPool#BYTE_BLOCK_SIZE}-2.
*/
@SuppressWarnings("serial")
public static class MaxBytesLengthExceededException extends RuntimeException {

View File

@ -70,8 +70,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator {
/**
* Creates a new {@link RecyclingByteBlockAllocator} with a block size of
* {@link ByteBlockPool#BYTE_BLOCK_SIZE} (
* {@value ByteBlockPool#BYTE_BLOCK_SIZE}, upper buffered docs limit of
* {@link ByteBlockPool#BYTE_BLOCK_SIZE}, upper buffered docs limit of
* {@link #DEFAULT_BUFFERED_BLOCKS} ({@value #DEFAULT_BUFFERED_BLOCKS}) and a
* {@link DummyConcurrentLock} instance.
*

View File

@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.AttributeSource;
/**
* Attempts to parse the {@link org.apache.lucene.analysis.Token#termBuffer()} as a Date using a {@link java.text.DateFormat}.
* Attempts to parse the {@link CharTermAttribute#buffer()} as a Date using a {@link java.text.DateFormat}.
* If the value is a Date, it will add it to the sink.
* <p/>
*

View File

@ -15,8 +15,8 @@
*/
// Generated from IANA Root Zone Database <http://www.internic.net/zones/root.zone>
// file version from Saturday, October 2, 2010 11:34:09 AM UTC
// generated on Saturday, October 2, 2010 10:07:09 PM UTC
// file version from Sunday, October 3, 2010 11:34:02 AM UTC
// generated on Sunday, October 3, 2010 1:07:42 PM UTC
// by org.apache.lucene.analysis.standard.GenerateJflexTLDMacros
ASCIITLD = "." (

View File

@ -1,4 +1,4 @@
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
package org.apache.lucene.analysis.standard;
@ -33,7 +33,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class is a scanner generated by
* <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
* on 10/2/10 6:07 PM from the specification file
* on 10/3/10 9:07 AM from the specification file
* <tt>C:/Users/rmuir/workspace/lucene-clean/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ClassicTokenizerImpl.jflex</tt>
*/
class ClassicTokenizerImpl implements StandardTokenizerInterface {

View File

@ -38,8 +38,8 @@ import java.io.Reader;
* characters (characters above the Basic Multilingual Plane, which contains
* those up to and including U+FFFF), this scanner will not recognize them
* properly. If you need to be able to process text containing supplementary
* characters, consider using the ICU4J-backed implementation in contrib/icu
* ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer})
* characters, consider using the ICU4J-backed implementation in modules/analysis/icu
* (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer)
* instead of this class, since the ICU4J-backed implementation does not have
* this limitation.
* <p>Many applications have specific tokenizer needs. If this tokenizer does

View File

@ -1,4 +1,4 @@
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
package org.apache.lucene.analysis.standard;
@ -42,8 +42,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* characters (characters above the Basic Multilingual Plane, which contains
* those up to and including U+FFFF), this scanner will not recognize them
* properly. If you need to be able to process text containing supplementary
* characters, consider using the ICU4J-backed implementation in contrib/icu
* ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer})
* characters, consider using the ICU4J-backed implementation in modules/analysis/icu
* (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer)
* instead of this class, since the ICU4J-backed implementation does not have
* this limitation.
*/
@ -2388,7 +2388,8 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface {
* scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept
* together as as a single token rather than broken up, because the logic
* required to break them at word boundaries is too complex for UAX#29.
* {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA}
* <p>
* See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;

View File

@ -40,8 +40,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* characters (characters above the Basic Multilingual Plane, which contains
* those up to and including U+FFFF), this scanner will not recognize them
* properly. If you need to be able to process text containing supplementary
* characters, consider using the ICU4J-backed implementation in contrib/icu
* ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer})
* characters, consider using the ICU4J-backed implementation in modules/analysis/icu
* (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer)
* instead of this class, since the ICU4J-backed implementation does not have
* this limitation.
*/
@ -162,7 +162,8 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost})
* scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept
* together as as a single token rather than broken up, because the logic
* required to break them at word boundaries is too complex for UAX#29.
* {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA}
* <p>
* See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN;

View File

@ -1,4 +1,4 @@
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
package org.apache.lucene.analysis.standard;
@ -48,8 +48,8 @@ import org.apache.lucene.util.AttributeSource;
* characters (characters above the Basic Multilingual Plane, which contains
* those up to and including U+FFFF), this scanner will not recognize them
* properly. If you need to be able to process text containing supplementary
* characters, consider using the ICU4J-backed implementation in contrib/icu
* ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer})
* characters, consider using the ICU4J-backed implementation in modules/analysis/icu
* (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer)
* instead of this class, since the ICU4J-backed implementation does not have
* this limitation.
*/
@ -389,7 +389,8 @@ public final class UAX29Tokenizer extends Tokenizer {
* scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept
* together as as a single token rather than broken up, because the logic
* required to break them at word boundaries is too complex for UAX#29.
* {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA}
* <p>
* See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final String SOUTH_EAST_ASIAN_TYPE = "<SOUTHEAST_ASIAN>";

View File

@ -46,8 +46,8 @@ import org.apache.lucene.util.AttributeSource;
* characters (characters above the Basic Multilingual Plane, which contains
* those up to and including U+FFFF), this scanner will not recognize them
* properly. If you need to be able to process text containing supplementary
* characters, consider using the ICU4J-backed implementation in contrib/icu
* ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer})
* characters, consider using the ICU4J-backed implementation in modules/analysis/icu
* (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer)
* instead of this class, since the ICU4J-backed implementation does not have
* this limitation.
*/
@ -89,7 +89,8 @@ ExtendNumLetEx = \p{WB:ExtendNumLet} [\p{WB:Format}\p{WB:Extend}]
* scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept
* together as as a single token rather than broken up, because the logic
* required to break them at word boundaries is too complex for UAX#29.
* {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA}
* <p>
* See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final String SOUTH_EAST_ASIAN_TYPE = "<SOUTHEAST_ASIAN>";

View File

@ -25,6 +25,8 @@ import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; // javadocs
/**
* An convenience subclass of Analyzer that makes it easy to implement
* {@link TokenStream} reuse.

View File

@ -24,8 +24,8 @@ public class StemmerUtil {
*
* @param s Input Buffer
* @param len length of input buffer
* @param suffix Suffix string to test
* @return true if <code>s</code> starts with <code>suffix</code>
* @param prefix Prefix string to test
* @return true if <code>s</code> starts with <code>prefix</code>
*/
public static boolean startsWith(char s[], int len, String prefix) {
final int prefixLen = prefix.length();

View File

@ -1,4 +1,4 @@
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */
/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
package org.apache.lucene.analysis.wikipedia;
@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class is a scanner generated by
* <a href="http://www.jflex.de/">JFlex</a> 1.5.0-SNAPSHOT
* on 10/2/10 6:07 PM from the specification file
* on 10/3/10 9:07 AM from the specification file
* <tt>C:/Users/rmuir/workspace/lucene-clean/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex</tt>
*/
class WikipediaTokenizerImpl {

View File

@ -43,8 +43,8 @@
</li>
<li>
Effective Locale-specific normalization (case differences, diacritics, etc.).
({@link org.apache.lucene.analysis.LowerCaseFilter} and
{@link org.apache.lucene.analysis.ASCIIFoldingFilter} provide these services
({@link org.apache.lucene.analysis.core.LowerCaseFilter} and
{@link org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter} provide these services
in a generic way that doesn't take into account locale-specific needs.)
</li>
</ul>

View File

@ -102,8 +102,8 @@ algorithm.
</li>
<li>
Effective Locale-specific normalization (case differences, diacritics, etc.).
({@link org.apache.lucene.analysis.LowerCaseFilter} and
{@link org.apache.lucene.analysis.ASCIIFoldingFilter} provide these services
({@link org.apache.lucene.analysis.core.LowerCaseFilter} and
{@link org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter} provide these services
in a generic way that doesn't take into account locale-specific needs.)
</li>
</ul>

View File

@ -113,7 +113,7 @@ public final class PolishAnalyzer extends StopwordAnalyzerBase {
/**
* Builds an analyzer with the given stop words. If a non-empty stem exclusion set is
* provided this analyzer will add a {@link KeywordMarkerTokenFilter} before
* provided this analyzer will add a {@link KeywordMarkerFilter} before
* stemming.
*
* @param matchVersion lucene compatibility version