javadocs fixes

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1182505 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-10-12 18:20:41 +00:00
parent 26bba41a3b
commit 9ba4ce2ed5
24 changed files with 48 additions and 48 deletions

View File

@ -55,7 +55,7 @@ public class FieldType implements IndexableFieldType {
/** /**
* Prevents future changes. Note, it is recommended that this is called once * Prevents future changes. Note, it is recommended that this is called once
* the FieldTypes's properties have been set, to prevent unintential state * the FieldTypes's properties have been set, to prevent unintentional state
* changes. * changes.
*/ */
public void freeze() { public void freeze() {

View File

@ -101,7 +101,7 @@ import java.util.Comparator;
public class DocTermOrds { public class DocTermOrds {
// Term ords are shifted by this, internally, to reseve // Term ords are shifted by this, internally, to reserve
// values 0 (end term) and 1 (index is a pointer into byte array) // values 0 (end term) and 1 (index is a pointer into byte array)
private final static int TNUM_OFFSET = 2; private final static int TNUM_OFFSET = 2;

View File

@ -411,7 +411,7 @@ final class DocumentsWriter {
*/ */
try { try {
synchronized (ticketQueue) { synchronized (ticketQueue) {
// Each flush is assigned a ticket in the order they accquire the ticketQueue lock // Each flush is assigned a ticket in the order they acquire the ticketQueue lock
ticket = new FlushTicket(flushingDWPT.prepareFlush(), true); ticket = new FlushTicket(flushingDWPT.prepareFlush(), true);
ticketQueue.add(ticket); ticketQueue.add(ticket);
} }

View File

@ -870,7 +870,7 @@ public abstract class IndexReader implements Cloneable,Closeable {
* (ie, obtained by a call to {@link * (ie, obtained by a call to {@link
* IndexWriter#getReader}, or by calling {@link #openIfChanged} * IndexWriter#getReader}, or by calling {@link #openIfChanged}
* on a near real-time reader), then this method checks if * on a near real-time reader), then this method checks if
* either a new commmit has occurred, or any new * either a new commit has occurred, or any new
* uncommitted changes have taken place via the writer. * uncommitted changes have taken place via the writer.
* Note that even if the writer has only performed * Note that even if the writer has only performed
* merging, this method will still return false.</p> * merging, this method will still return false.</p>

View File

@ -764,7 +764,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
if (reader != null) { if (reader != null) {
// the pulled reader could be from an in-flight merge // the pulled reader could be from an in-flight merge
// while the info we see has already new applied deletes after a commit // while the info we see has already new applied deletes after a commit
// we max out the delets since deletes never shrink // we max out the deletes since deletes never shrink
return Math.max(info.getDelCount(), reader.numDeletedDocs()); return Math.max(info.getDelCount(), reader.numDeletedDocs());
} else { } else {
return info.getDelCount(); return info.getDelCount();

View File

@ -40,7 +40,7 @@ public interface IndexableField {
// TODO: add attrs to this API? // TODO: add attrs to this API?
/* Field name */ /** Field name */
public String name(); public String name();
// NOTE: if doc/field impl has the notion of "doc level boost" // NOTE: if doc/field impl has the notion of "doc level boost"
@ -49,24 +49,24 @@ public interface IndexableField {
/** Field boost (you must pre-multiply in any doc boost). */ /** Field boost (you must pre-multiply in any doc boost). */
public float boost(); public float boost();
/* Non-null if this field has a binary value */ /** Non-null if this field has a binary value */
public BytesRef binaryValue(); public BytesRef binaryValue();
/* Non-null if this field has a string value */ /** Non-null if this field has a string value */
public String stringValue(); public String stringValue();
/* Non-null if this field has a Reader value */ /** Non-null if this field has a Reader value */
public Reader readerValue(); public Reader readerValue();
// Numeric field: // Numeric field:
/* True if this field is numeric */ /** True if this field is numeric */
public boolean numeric(); public boolean numeric();
/* Numeric {@link NumericField.DataType}; only used if /** Numeric {@link org.apache.lucene.document.NumericField.DataType}; only used if
* the field is numeric */ * the field is numeric */
public NumericField.DataType numericDataType(); public NumericField.DataType numericDataType();
/* Numeric value; only used if the field is numeric */ /** Numeric value; only used if the field is numeric */
public Number numericValue(); public Number numericValue();
/** /**
@ -76,10 +76,10 @@ public interface IndexableField {
*/ */
public IndexableFieldType fieldType(); public IndexableFieldType fieldType();
/* Non-null if doc values should be indexed */ /** Non-null if doc values should be indexed */
public PerDocFieldValues docValues(); public PerDocFieldValues docValues();
/* DocValues type; only used if docValues is non-null */ /** DocValues type; only used if docValues is non-null */
public ValueType docValuesType(); public ValueType docValuesType();
/** /**

View File

@ -21,28 +21,28 @@ import org.apache.lucene.index.FieldInfo.IndexOptions;
public interface IndexableFieldType { public interface IndexableFieldType {
/* True if this field should be indexed (inverted) */ /** True if this field should be indexed (inverted) */
public boolean indexed(); public boolean indexed();
/* True if the field's value should be stored */ /** True if the field's value should be stored */
public boolean stored(); public boolean stored();
/* True if this field's value should be analyzed */ /** True if this field's value should be analyzed */
public boolean tokenized(); public boolean tokenized();
/* True if term vectors should be indexed */ /** True if term vectors should be indexed */
public boolean storeTermVectors(); public boolean storeTermVectors();
/* True if term vector offsets should be indexed */ /** True if term vector offsets should be indexed */
public boolean storeTermVectorOffsets(); public boolean storeTermVectorOffsets();
/* True if term vector positions should be indexed */ /** True if term vector positions should be indexed */
public boolean storeTermVectorPositions(); public boolean storeTermVectorPositions();
/* True if norms should not be indexed */ /** True if norms should not be indexed */
public boolean omitNorms(); public boolean omitNorms();
/* {@link IndexOptions}, describing what should be /** {@link IndexOptions}, describing what should be
* recorded into the inverted index */ * recorded into the inverted index */
public IndexOptions indexOptions(); public IndexOptions indexOptions();
} }

View File

@ -23,7 +23,7 @@ import java.io.IOException;
* A {@link MergeScheduler} which never executes any merges. It is also a * A {@link MergeScheduler} which never executes any merges. It is also a
* singleton and can be accessed through {@link NoMergeScheduler#INSTANCE}. Use * singleton and can be accessed through {@link NoMergeScheduler#INSTANCE}. Use
* it if you want to prevent an {@link IndexWriter} from ever executing merges, * it if you want to prevent an {@link IndexWriter} from ever executing merges,
* irregardless of the {@link MergePolicy} used. Note that you can achieve the * regardless of the {@link MergePolicy} used. Note that you can achieve the
* same thing by using {@link NoMergePolicy}, however with * same thing by using {@link NoMergePolicy}, however with
* {@link NoMergeScheduler} you also ensure that no unnecessary code of any * {@link NoMergeScheduler} you also ensure that no unnecessary code of any
* {@link MergeScheduler} implementation is ever executed. Hence it is * {@link MergeScheduler} implementation is ever executed. Hence it is

View File

@ -41,10 +41,10 @@ import org.apache.lucene.util.Version;
* a Lucene index. It is highly recommended to use a dedicated directory (and on * a Lucene index. It is highly recommended to use a dedicated directory (and on
* stable storage as well) for persisting the snapshots' information, and not * stable storage as well) for persisting the snapshots' information, and not
* reuse the content index directory, or otherwise conflicts and index * reuse the content index directory, or otherwise conflicts and index
* corruptions will occur. * corruption will occur.
* <p> * <p>
* <b>NOTE:</b> you should call {@link #close()} when you're done using this * <b>NOTE:</b> you should call {@link #close()} when you're done using this
* class for safetyness (it will close the {@link IndexWriter} instance used). * class for safety (it will close the {@link IndexWriter} instance used).
*/ */
public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy { public class PersistentSnapshotDeletionPolicy extends SnapshotDeletionPolicy {

View File

@ -695,7 +695,7 @@ public final class SegmentInfo implements Cloneable {
* <p>Current format looks like * <p>Current format looks like
* <code>_a(3.1):c45/4->_1</code>, which means the segment's * <code>_a(3.1):c45/4->_1</code>, which means the segment's
* name is <code>_a</code>; it was created with Lucene 3.1 (or * name is <code>_a</code>; it was created with Lucene 3.1 (or
* '?' if it's unkown); it's using compound file * '?' if it's unknown); it's using compound file
* format (would be <code>C</code> if not compound); it * format (would be <code>C</code> if not compound); it
* has 45 documents; it has 4 deletions (this part is * has 45 documents; it has 4 deletions (this part is
* left off when there are no deletions); it's using the * left off when there are no deletions); it's using the
@ -718,7 +718,7 @@ public final class SegmentInfo implements Cloneable {
} }
} catch (Throwable e) { } catch (Throwable e) {
// Messy: because getHasVectors may be used in an // Messy: because getHasVectors may be used in an
// un-thread-safe way, and may attempt to open an fnm // thread-unsafe way, and may attempt to open an fnm
// file that has since (legitimately) been deleted by // file that has since (legitimately) been deleted by
// IndexWriter, instead of throwing these exceptions // IndexWriter, instead of throwing these exceptions
// up, just add v? to indicate we don't know if this // up, just add v? to indicate we don't know if this

View File

@ -39,7 +39,7 @@ import java.util.ArrayList;
* <p>For normal merging, this policy first computes a * <p>For normal merging, this policy first computes a
* "budget" of how many segments are allowed by be in the * "budget" of how many segments are allowed by be in the
* index. If the index is over-budget, then the policy * index. If the index is over-budget, then the policy
* sorts segments by decresing size (pro-rating by percent * sorts segments by decreasing size (pro-rating by percent
* deletes), and then finds the least-cost merge. Merge * deletes), and then finds the least-cost merge. Merge
* cost is measured by a combination of the "skew" of the * cost is measured by a combination of the "skew" of the
* merge (size of largest seg divided by smallest seg), * merge (size of largest seg divided by smallest seg),

View File

@ -766,7 +766,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
arcs[arcIdx] = new FST.Arc<BytesRef>(); arcs[arcIdx] = new FST.Arc<BytesRef>();
} }
// TODO: if the automaon is "smallish" we really // TODO: if the automaton is "smallish" we really
// should use the terms index to seek at least to // should use the terms index to seek at least to
// the initial term and likely to subsequent terms // the initial term and likely to subsequent terms
// (or, maybe just fallback to ATE for such cases). // (or, maybe just fallback to ATE for such cases).

View File

@ -75,7 +75,7 @@ import org.apache.lucene.util.fst.FST;
* queries that rely on advance will (AND BooleanQuery, * queries that rely on advance will (AND BooleanQuery,
* PhraseQuery) will be relatively slow! * PhraseQuery) will be relatively slow!
* *
* <p><b>NOTE</b>: this codec cannot adress more than ~2.1 GB * <p><b>NOTE</b>: this codec cannot address more than ~2.1 GB
* of postings, because the underlying FST uses an int * of postings, because the underlying FST uses an int
* to address the underlying byte[]. * to address the underlying byte[].
* *

View File

@ -107,7 +107,7 @@ public final class Bytes {
* @param bytesUsed * @param bytesUsed
* an {@link AtomicLong} instance to track the used bytes within the * an {@link AtomicLong} instance to track the used bytes within the
* {@link Writer}. A call to {@link Writer#finish(int)} will release * {@link Writer}. A call to {@link Writer#finish(int)} will release
* all internally used resources and frees the memeory tracking * all internally used resources and frees the memory tracking
* reference. * reference.
* @param context * @param context
* @return a new {@link Writer} instance * @return a new {@link Writer} instance

View File

@ -63,7 +63,7 @@ public class RateLimiter {
* with a biggish count, not one byte at a time. */ * with a biggish count, not one byte at a time. */
public void pause(long bytes) { public void pause(long bytes) {
// TODO: this is purely instantenous rate; maybe we // TODO: this is purely instantaneous rate; maybe we
// should also offer decayed recent history one? // should also offer decayed recent history one?
final long targetNS = lastNS = lastNS + ((long) (bytes * nsPerByte)); final long targetNS = lastNS = lastNS + ((long) (bytes * nsPerByte));
long curNS = System.nanoTime(); long curNS = System.nanoTime();
@ -71,7 +71,7 @@ public class RateLimiter {
lastNS = curNS; lastNS = curNS;
} }
// While loop because Thread.sleep doesn't alway sleep // While loop because Thread.sleep doesn't always sleep
// enough: // enough:
while(true) { while(true) {
final long pauseNS = targetNS - curNS; final long pauseNS = targetNS - curNS;

View File

@ -79,7 +79,7 @@ public final class BytesRef implements Comparable<BytesRef> {
/** /**
* @param text Initialize the byte[] from the UTF8 bytes * @param text Initialize the byte[] from the UTF8 bytes
* for the provided Sring. This must be well-formed * for the provided String. This must be well-formed
* unicode text, with no unpaired surrogates or U+FFFF. * unicode text, with no unpaired surrogates or U+FFFF.
*/ */
public BytesRef(CharSequence text) { public BytesRef(CharSequence text) {

View File

@ -322,7 +322,7 @@ final public class BasicOperations {
return c; return c;
} }
/** Returns true if these two auotomata accept exactly the /** Returns true if these two automata accept exactly the
* same language. This is a costly computation! Note * same language. This is a costly computation! Note
* also that a1 and a2 will be determinized as a side * also that a1 and a2 will be determinized as a side
* effect. */ * effect. */

View File

@ -25,7 +25,7 @@ import org.apache.lucene.store.DataOutput;
/** /**
* Holds one or two longs for each input term. If it's a * Holds one or two longs for each input term. If it's a
* single output, Long is returned; else, TwoLongs. Order * single output, Long is returned; else, TwoLongs. Order
* is preseved in the TwoLongs case, ie .first is the first * is preserved in the TwoLongs case, ie .first is the first
* input/output added to Builder, and .second is the * input/output added to Builder, and .second is the
* second. You cannot store 0 output with this (that's * second. You cannot store 0 output with this (that's
* reserved to mean "no output")! * reserved to mean "no output")!

View File

@ -37,7 +37,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase {
/** /**
* File containing default CJK stopwords. * File containing default CJK stopwords.
* <p/> * <p/>
* Currently it concains some common English words that are not usually * Currently it contains some common English words that are not usually
* useful for searching and some double-byte interpunctions. * useful for searching and some double-byte interpunctions.
*/ */
public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt"; public final static String DEFAULT_STOPWORD_FILE = "stopwords.txt";

View File

@ -225,7 +225,7 @@ public class HunspellDictionary {
} }
/** /**
* Parses the encoding specificed in the affix file readable through the provided InputStream * Parses the encoding specified in the affix file readable through the provided InputStream
* *
* @param affix InputStream for reading the affix file * @param affix InputStream for reading the affix file
* @return Encoding specified in the affix file * @return Encoding specified in the affix file
@ -277,10 +277,10 @@ public class HunspellDictionary {
} }
/** /**
* Determines the appropriate {@link FlagParsingStrategy} based on the FLAG definiton line taken from the affix file * Determines the appropriate {@link FlagParsingStrategy} based on the FLAG definition line taken from the affix file
* *
* @param flagLine Line containing the flag information * @param flagLine Line containing the flag information
* @return FlagParsingStrategy that handles parsing flags in the way specified in the FLAG definiton * @return FlagParsingStrategy that handles parsing flags in the way specified in the FLAG definition
*/ */
private FlagParsingStrategy getFlagParsingStrategy(String flagLine) { private FlagParsingStrategy getFlagParsingStrategy(String flagLine) {
String flagType = flagLine.substring(5); String flagType = flagLine.substring(5);

View File

@ -52,7 +52,7 @@ public class HunspellWord {
/** /**
* Returns the flags associated with the word * Returns the flags associated with the word
* *
* @return Flags asssociated with the word * @return Flags associated with the word
*/ */
public char[] getFlags() { public char[] getFlags() {
return flags; return flags;

View File

@ -65,13 +65,13 @@ import org.apache.lucene.util.fst.FST;
* <p><b>NOTE</b>: when a match occurs, the output tokens * <p><b>NOTE</b>: when a match occurs, the output tokens
* associated with the matching rule are "stacked" on top of * associated with the matching rule are "stacked" on top of
* the input stream (if the rule had * the input stream (if the rule had
* <code>keepOrig=true</code>) and also on top of aother * <code>keepOrig=true</code>) and also on top of another
* matched rule's output tokens. This is not a correct * matched rule's output tokens. This is not a correct
* solution, as really the output should be an abitrary * solution, as really the output should be an arbitrary
* graph/lattice. For example, with the above match, you * graph/lattice. For example, with the above match, you
* would expect an exact <code>PhraseQuery</code> <code>"y b * would expect an exact <code>PhraseQuery</code> <code>"y b
* c"</code> to match the parsed tokens, but it will fail to * c"</code> to match the parsed tokens, but it will fail to
* do so. This limitations is necessary because Lucene's * do so. This limitation is necessary because Lucene's
* TokenStream (and index) cannot yet represent an arbitrary * TokenStream (and index) cannot yet represent an arbitrary
* graph.</p> * graph.</p>
* *
@ -90,7 +90,7 @@ import org.apache.lucene.util.fst.FST;
// http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm // http://en.wikipedia.org/wiki/Aho%E2%80%93Corasick_string_matching_algorithm
// It improves over the current approach here // It improves over the current approach here
// because it does not fully re-start matching at every // because it does not fully re-start matching at every
// token. For exampl,e if one pattern is "a b c x" // token. For example if one pattern is "a b c x"
// and another is "b c d" and the input is "a b c d", on // and another is "b c d" and the input is "a b c d", on
// trying to parse "a b c x" but failing when you got to x, // trying to parse "a b c x" but failing when you got to x,
// rather than starting over again your really should // rather than starting over again your really should

View File

@ -44,9 +44,9 @@ import org.apache.lucene.util.fst.FST;
public class SynonymMap { public class SynonymMap {
/** for multiword support, you must separate words with this separator */ /** for multiword support, you must separate words with this separator */
public static final char WORD_SEPARATOR = 0; public static final char WORD_SEPARATOR = 0;
/** map<input word, list<ord>> */ /** map&lt;input word, list&lt;ord&gt;&gt; */
public final FST<BytesRef> fst; public final FST<BytesRef> fst;
/** map<ord, outputword> */ /** map&lt;ord, outputword&gt; */
public final BytesRefHash words; public final BytesRefHash words;
/** maxHorizontalContext: maximum context we need on the tokenstream */ /** maxHorizontalContext: maximum context we need on the tokenstream */
public final int maxHorizontalContext; public final int maxHorizontalContext;

View File

@ -34,7 +34,7 @@ public class CollatedTermAttributeImpl extends CharTermAttributeImpl {
* @param collator Collation key generator * @param collator Collation key generator
*/ */
public CollatedTermAttributeImpl(Collator collator) { public CollatedTermAttributeImpl(Collator collator) {
// clone in case JRE doesnt properly sync, // clone in case JRE doesn't properly sync,
// or to reduce contention in case they do // or to reduce contention in case they do
this.collator = (Collator) collator.clone(); this.collator = (Collator) collator.clone();
} }