diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java index d7d577eb8a8..96fc2dff18a 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JakartaRegexpCapabilities.java @@ -27,10 +27,10 @@ import java.lang.reflect.Method; /** * Implementation tying Jakarta - * Regexp to RegexQuery. Jakarta Regepx internally supports a - * {@link #prefix} implementation which can offer performance gains under - * certain circumstances. Yet, the implementation appears to be rather shaky as - * it doesn't always provide a prefix even if one would exist. + * Regexp to RegexQuery. Jakarta Regexp internally supports a + * {@link RegexCapabilities.RegexMatcher#prefix()} implementation which can offer + * performance gains under certain circumstances. Yet, the implementation appears + * to be rather shaky as it doesn't always provide a prefix even if one would exist. */ public class JakartaRegexpCapabilities implements RegexCapabilities { private static Field prefixField; diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java index b125208d6b4..f1a238d74cc 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/JavaUtilRegexCapabilities.java @@ -27,8 +27,9 @@ import org.apache.lucene.util.UnicodeUtil; * An implementation tying Java's built-in java.util.regex to RegexQuery. * * Note that because this implementation currently only returns null from - * {@link #prefix} that queries using this implementation will enumerate and - * attempt to {@link #match} each term for the specified field in the index. + * {@link RegexCapabilities.RegexMatcher#prefix()} that queries using this implementation + * will enumerate and attempt to {@link RegexCapabilities.RegexMatcher#match(BytesRef)} each + * term for the specified field in the index. */ public class JavaUtilRegexCapabilities implements RegexCapabilities { private int flags = 0; diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java index 679fde5cd60..e8c6daed713 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexCapabilities.java @@ -27,7 +27,7 @@ import org.apache.lucene.util.BytesRef; */ public interface RegexCapabilities extends Serializable { /** - * Called by the constructor of {@link RegexTermEnum} allowing + * Called by the constructor of {@link RegexTermsEnum} allowing * implementations to cache a compiled version of the regular * expression pattern. * @@ -38,7 +38,7 @@ public interface RegexCapabilities extends Serializable { public interface RegexMatcher { /** * - * @param string + * @param term The term in bytes. * @return true if string matches the pattern last passed to {@link #compile}. */ public boolean match(BytesRef term); diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexQuery.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexQuery.java index 733d81af488..61e26c73ada 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexQuery.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/regex/RegexQuery.java @@ -29,7 +29,7 @@ import java.io.IOException; * The expressions supported depend on the regular expression implementation * used by way of the {@link RegexCapabilities} interface. * - * @see RegexTermEnum + * @see RegexTermsEnum */ public class RegexQuery extends MultiTermQuery implements RegexQueryCapable { private RegexCapabilities regexImpl = new JavaUtilRegexCapabilities(); diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java index 53df3c30bc6..d8d4af6e044 100644 --- a/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java +++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/similar/MoreLikeThis.java @@ -520,7 +520,6 @@ public final class MoreLikeThis { * * @param stopWords set of stopwords, if null it means to allow stop words * - * @see org.apache.lucene.analysis.StopFilter#makeStopSet StopFilter.makeStopSet() * @see #getStopWords */ public void setStopWords(Set> stopWords) { diff --git a/lucene/contrib/spellchecker/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java b/lucene/contrib/spellchecker/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java index e0d829e78a7..3e4f8ee0306 100644 --- a/lucene/contrib/spellchecker/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java +++ b/lucene/contrib/spellchecker/src/java/org/apache/lucene/search/spell/DirectSpellChecker.java @@ -56,7 +56,7 @@ public class DirectSpellChecker { * Note: this is the fastest distance metric, because Levenshtein is used * to draw candidates from the term dictionary: this just re-uses the scoring. *
- * Note also that this metric differs in subtle ways from {@link LevenshteinDistance}: + * Note also that this metric differs in subtle ways from {@link LevensteinDistance}: *
searcher.docFreq(term)
as the docFreq.
*/
public IDFExplanation idfExplain(final Term term, final Searcher searcher) throws IOException {
diff --git a/lucene/src/java/org/apache/lucene/search/SortField.java b/lucene/src/java/org/apache/lucene/search/SortField.java
index b61a1d8cd9a..58c4582c0e3 100644
--- a/lucene/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/src/java/org/apache/lucene/search/SortField.java
@@ -199,7 +199,6 @@ implements Serializable {
* Sort by a cached entry value
* @param creator
* @param reverse
- * @param sortMissingLast
*/
public SortField( CachedArrayCreator> creator, boolean reverse )
{
diff --git a/lucene/src/java/org/apache/lucene/util/BytesRefHash.java b/lucene/src/java/org/apache/lucene/util/BytesRefHash.java
index df2aa6b57d3..a34ad775178 100644
--- a/lucene/src/java/org/apache/lucene/util/BytesRefHash.java
+++ b/lucene/src/java/org/apache/lucene/util/BytesRefHash.java
@@ -35,9 +35,8 @@ import static org.apache.lucene.util.ByteBlockPool.BYTE_BLOCK_SHIFT;
*
* * Note: The maximum capacity {@link BytesRef} instance passed to - * {@link #add(BytesRef)} must not be longer than {@link #BYTES_BLOCK_SIZE}-2 ( - * {@value #BYTES_BLOCK_SIZE}-2. The internal storage is limited to 2GB total - * byte storage. + * {@link #add(BytesRef)} must not be longer than {@link ByteBlockPool#BYTE_BLOCK_SIZE}-2. + * The internal storage is limited to 2GB totalbyte storage. *
* * @lucene.internal @@ -244,8 +243,7 @@ public final class BytesRefHash { } /** - * Clears the {@link BytesRef} and returns an {@link Entry} which maps to the - * given {@link BytesRef} + * Clears the {@link BytesRef} which maps to the given {@link BytesRef} */ public void clear(boolean resetPool) { lastCount = count; @@ -306,8 +304,8 @@ public final class BytesRefHash { * haven't been hashed before. * * @throws MaxBytesLengthExceededException - * if the given bytes are > 2 + - * {@link ByteBlockPool#BYTE_BLOCK_SIZE} + * if the given bytes are > + * {@link ByteBlockPool#BYTE_BLOCK_SIZE} - 2 */ public int add(BytesRef bytes, int code) { assert bytesStart != null : "Bytesstart is null - not initialized"; @@ -496,7 +494,7 @@ public final class BytesRefHash { /** * Thrown if a {@link BytesRef} exceeds the {@link BytesRefHash} limit of - * {@link #BYTES_BLOCK_SIZE}-2 ({@value #BYTES_BLOCK_SIZE}-2). + * {@link ByteBlockPool#BYTE_BLOCK_SIZE}-2. */ @SuppressWarnings("serial") public static class MaxBytesLengthExceededException extends RuntimeException { diff --git a/lucene/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java b/lucene/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java index 2d33fea2003..5346f9fcc3a 100644 --- a/lucene/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java +++ b/lucene/src/java/org/apache/lucene/util/RecyclingByteBlockAllocator.java @@ -70,8 +70,7 @@ public final class RecyclingByteBlockAllocator extends ByteBlockPool.Allocator { /** * Creates a new {@link RecyclingByteBlockAllocator} with a block size of - * {@link ByteBlockPool#BYTE_BLOCK_SIZE} ( - * {@value ByteBlockPool#BYTE_BLOCK_SIZE}, upper buffered docs limit of + * {@link ByteBlockPool#BYTE_BLOCK_SIZE}, upper buffered docs limit of * {@link #DEFAULT_BUFFERED_BLOCKS} ({@value #DEFAULT_BUFFERED_BLOCKS}) and a * {@link DummyConcurrentLock} instance. * diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java index 63142468cec..948f9b9cc45 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/sinks/DateRecognizerSinkFilter.java @@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.util.AttributeSource; /** - * Attempts to parse the {@link org.apache.lucene.analysis.Token#termBuffer()} as a Date using a {@link java.text.DateFormat}. + * Attempts to parse the {@link CharTermAttribute#buffer()} as a Date using a {@link java.text.DateFormat}. * If the value is a Date, it will add it to the sink. * * diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro index b30dc206383..32b23906aa9 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/ASCIITLD.jflex-macro @@ -15,8 +15,8 @@ */ // Generated from IANA Root Zone DatabaseMany applications have specific tokenizer needs. If this tokenizer does diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java index f9744361f20..8449e1267e1 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */ package org.apache.lucene.analysis.standard; @@ -42,8 +42,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; * characters (characters above the Basic Multilingual Plane, which contains * those up to and including U+FFFF), this scanner will not recognize them * properly. If you need to be able to process text containing supplementary - * characters, consider using the ICU4J-backed implementation in contrib/icu - * ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer}) + * characters, consider using the ICU4J-backed implementation in modules/analysis/icu + * (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer) * instead of this class, since the ICU4J-backed implementation does not have * this limitation. */ @@ -2388,7 +2388,8 @@ public final class StandardTokenizerImpl implements StandardTokenizerInterface { * scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept * together as as a single token rather than broken up, because the logic * required to break them at word boundaries is too complex for UAX#29. - * {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA} + *
+ * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA */ public static final int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex index 181cafcff61..49eed5ac78b 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/StandardTokenizerImpl.jflex @@ -40,8 +40,8 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; * characters (characters above the Basic Multilingual Plane, which contains * those up to and including U+FFFF), this scanner will not recognize them * properly. If you need to be able to process text containing supplementary - * characters, consider using the ICU4J-backed implementation in contrib/icu - * ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer}) + * characters, consider using the ICU4J-backed implementation in modules/analysis/icu + * (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer) * instead of this class, since the ICU4J-backed implementation does not have * this limitation. */ @@ -162,7 +162,8 @@ EMAIL = {EMAILlocalPart} "@" ({DomainNameStrict} | {EMAILbracketedHost}) * scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept * together as as a single token rather than broken up, because the logic * required to break them at word boundaries is too complex for UAX#29. - * {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA} + *
+ * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA */ public static final int SOUTH_EAST_ASIAN_TYPE = StandardTokenizer.SOUTHEAST_ASIAN; diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29Tokenizer.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29Tokenizer.java index b5a8575b180..18d047f5cbd 100644 --- a/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29Tokenizer.java +++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/standard/UAX29Tokenizer.java @@ -1,4 +1,4 @@ -/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */ +/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */ package org.apache.lucene.analysis.standard; @@ -48,8 +48,8 @@ import org.apache.lucene.util.AttributeSource; * characters (characters above the Basic Multilingual Plane, which contains * those up to and including U+FFFF), this scanner will not recognize them * properly. If you need to be able to process text containing supplementary - * characters, consider using the ICU4J-backed implementation in contrib/icu - * ({@link org.apache.lucene.analysis.icu.segmentation.ICUTokenizer}) + * characters, consider using the ICU4J-backed implementation in modules/analysis/icu + * (org.apache.lucene.analysis.icu.segmentation.ICUTokenizer) * instead of this class, since the ICU4J-backed implementation does not have * this limitation. */ @@ -389,7 +389,8 @@ public final class UAX29Tokenizer extends Tokenizer { * scripts (Thai, Lao, Myanmar, Khmer, etc.). Sequences of these are kept * together as as a single token rather than broken up, because the logic * required to break them at word boundaries is too complex for UAX#29. - * {@see Unicode Line Breaking Algorithm http://www.unicode.org/reports/tr14/#SA} + *
+ * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final String SOUTH_EAST_ASIAN_TYPE = "
+ * See Unicode Line Breaking Algorithm: http://www.unicode.org/reports/tr14/#SA
*/
public static final String SOUTH_EAST_ASIAN_TYPE = "s
starts with suffix
+ * @param prefix Prefix string to test
+ * @return true if s
starts with prefix
*/
public static boolean startsWith(char s[], int len, String prefix) {
final int prefixLen = prefix.length();
diff --git a/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java b/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
index 88cc43ed415..7927d3d59bb 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
+++ b/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.java
@@ -1,4 +1,4 @@
-/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/2/10 6:07 PM */
+/* The following code was generated by JFlex 1.5.0-SNAPSHOT on 10/3/10 9:07 AM */
package org.apache.lucene.analysis.wikipedia;
@@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class is a scanner generated by
* JFlex 1.5.0-SNAPSHOT
- * on 10/2/10 6:07 PM from the specification file
+ * on 10/3/10 9:07 AM from the specification file
* C:/Users/rmuir/workspace/lucene-clean/modules/analysis/common/src/java/org/apache/lucene/analysis/wikipedia/WikipediaTokenizerImpl.jflex
*/
class WikipediaTokenizerImpl {
diff --git a/modules/analysis/common/src/java/org/apache/lucene/collation/package.html b/modules/analysis/common/src/java/org/apache/lucene/collation/package.html
index b0c6f8016a8..03a92b211f5 100644
--- a/modules/analysis/common/src/java/org/apache/lucene/collation/package.html
+++ b/modules/analysis/common/src/java/org/apache/lucene/collation/package.html
@@ -43,8 +43,8 @@