LUCENE-2858: Fix Javadoc warnings, still missing some text for new classes

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2858@1238026 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2012-01-30 21:45:08 +00:00
parent 3ea54b5401
commit 18533af348
18 changed files with 78 additions and 66 deletions

View File

@ -26,6 +26,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.lucene.search.SearcherManager; // javadocs
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.util.IOUtils;
@ -88,7 +89,7 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
* @throws CorruptIndexException
* @throws IOException if there is a low-level IO error
*
* @see #openIfChanged(IndexReader,IndexWriter,boolean)
* @see #openIfChanged(DirectoryReader,IndexWriter,boolean)
*
* @lucene.experimental
*/
@ -298,9 +299,9 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
* new MultiReader, etc.
*
* <p>This method is typically far less costly than opening a
* fully new <code>IndexReader</code> as it shares
* fully new <code>DirectoryReader</code> as it shares
* resources (for example sub-readers) with the provided
* <code>IndexReader</code>, when possible.
* <code>DirectoryReader</code>, when possible.
*
* <p>The provided reader is not closed (you are responsible
* for doing so); if a new reader is returned you also
@ -311,7 +312,7 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
* @throws CorruptIndexException if the index is corrupt
* @throws IOException if there is a low-level IO error
* @return null if there are no changes; else, a new
* IndexReader instance which you must eventually close
* DirectoryReader instance which you must eventually close
*/
public static DirectoryReader openIfChanged(DirectoryReader oldReader) throws IOException {
final DirectoryReader newReader = oldReader.doOpenIfChanged();
@ -324,7 +325,7 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
* provided reader is searching, open and return a new
* reader; else, return null.
*
* @see #openIfChanged(IndexReader)
* @see #openIfChanged(DirectoryReader)
*/
public static DirectoryReader openIfChanged(DirectoryReader oldReader, IndexCommit commit) throws IOException {
final DirectoryReader newReader = oldReader.doOpenIfChanged(commit);
@ -373,9 +374,9 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
* <p><b>NOTE</b>: Once the writer is closed, any
* outstanding readers may continue to be used. However,
* if you attempt to reopen any of those readers, you'll
* hit an {@link AlreadyClosedException}.</p>
* hit an {@link org.apache.lucene.store.AlreadyClosedException}.</p>
*
* @return IndexReader that covers entire index plus all
* @return DirectoryReader that covers entire index plus all
* changes made so far by this IndexWriter instance, or
* null if there are no new changes
*
@ -568,7 +569,21 @@ public final class DirectoryReader extends BaseMultiReader<SegmentReader> {
return new ReaderCommit(segmentInfos, directory);
}
/** @see org.apache.lucene.index.IndexReader#listCommits */
/** Returns all commit points that exist in the Directory.
* Normally, because the default is {@link
* KeepOnlyLastCommitDeletionPolicy}, there would be only
* one commit point. But if you're using a custom {@link
* IndexDeletionPolicy} then there could be many commits.
* Once you have a given commit, you can open a reader on
* it by calling {@link IndexReader#open(IndexCommit)}
* There must be at least one commit in
* the Directory, else this method throws {@link
* IndexNotFoundException}. Note that if a commit is in
* progress while this method is running, that commit
* may or may not be returned.
*
* @return a sorted list of {@link IndexCommit}s, from oldest
* to latest. */
public static List<IndexCommit> listCommits(Directory dir) throws IOException {
final String[] files = dir.listAll();

View File

@ -33,7 +33,7 @@ import org.apache.lucene.util.packed.PackedInts;
* <li>via {@link #getSource()} providing RAM resident random access</li>
* <li>via {@link #getDirectSource()} providing on disk random access</li>
* </ul> {@link DocValues} are exposed via
* {@link IndexReader#docValues(String)} on a per-segment basis. For best
* {@link AtomicReader#docValues(String)} on a per-segment basis. For best
* performance {@link DocValues} should be consumed per-segment just like
* IndexReader.
* <p>

View File

@ -262,7 +262,7 @@ public abstract class IndexReader implements Closeable {
* @throws CorruptIndexException
* @throws IOException if there is a low-level IO error
*
* @see #openIfChanged(IndexReader,IndexWriter,boolean)
* @see DirectoryReader#openIfChanged(DirectoryReader,IndexWriter,boolean)
*
* @lucene.experimental
* @deprecated Use {@link DirectoryReader#open(IndexWriter,boolean)}

View File

@ -2631,7 +2631,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* @param commitUserData Opaque Map (String->String)
* that's recorded into the segments file in the index,
* and retrievable by {@link
* IndexReader#getCommitUserData}. Note that when
* DirectoryReader#getCommitUserData}. Note that when
* IndexWriter commits itself during {@link #close}, the
* commitUserData is unchanged (just carried over from
* the prior commit). If this is null then the previous

View File

@ -31,7 +31,7 @@ import org.apache.lucene.index.MultiReader; // javadoc
* This class forces a composite reader (eg a {@link
* MultiReader} or {@link DirectoryReader} or any other
* IndexReader subclass that returns non-null from {@link
* IndexReader#getSequentialSubReaders}) to emulate an
* CompositeReader#getSequentialSubReaders}) to emulate an
* atomic reader. This requires implementing the postings
* APIs on-the-fly, using the static methods in {@link
* MultiFields}, {@link MultiDocValues},

View File

@ -24,6 +24,7 @@ import java.util.WeakHashMap;
import org.apache.lucene.index.AtomicReader;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DirectoryReader; // javadocs
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.Bits;
@ -53,13 +54,13 @@ public class CachingWrapperFilter extends Filter {
/** Wraps another filter's result and caches it. If
* {@code recacheDeletes} is {@code true}, then new deletes (for example
* after {@link IndexReader#openIfChanged}) will cause the filter
* after {@link DirectoryReader#openIfChanged}) will cause the filter
* {@link DocIdSet} to be recached.
*
* <p>If your index changes seldom, it is recommended to use {@code recacheDeletes=true},
* as recaching will only occur when the index is reopened.
* For near-real-time indexes or indexes that are often
* reopened with (e.g., {@link IndexReader#openIfChanged} is used), you should
* reopened with (e.g., {@link DirectoryReader#openIfChanged} is used), you should
* pass {@code recacheDeletes=false}. This will cache the filter results omitting
* deletions and will AND them in while scoring.
* @param filter Filter to cache results of

View File

@ -18,8 +18,8 @@ package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.AtomicReader; // for javadocs
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
@ -134,7 +134,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,boolean)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -143,7 +143,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getBytes(IndexReader,String,FieldCache.ByteParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getBytes(AtomicReader,String,FieldCache.ByteParser,boolean)}. This works with all
* byte fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -184,7 +184,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,boolean)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -193,7 +193,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getShorts(IndexReader,String,FieldCache.ShortParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getShorts(AtomicReader,String,FieldCache.ShortParser,boolean)}. This works with all
* short fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -234,7 +234,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,boolean)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -243,7 +243,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getInts(IndexReader,String,FieldCache.IntParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)}. This works with all
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -284,7 +284,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,boolean)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -293,7 +293,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getLongs(IndexReader,String,FieldCache.LongParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)}. This works with all
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -334,7 +334,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,boolean)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -343,7 +343,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getFloats(IndexReader,String,FieldCache.FloatParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)}. This works with all
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -388,7 +388,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,boolean)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/
@ -397,7 +397,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
}
/**
* Creates a numeric range filter using {@link FieldCache#getDoubles(IndexReader,String,FieldCache.DoubleParser,boolean)}. This works with all
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)}. This works with all
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
* of the values to <code>null</code>.
*/

View File

@ -20,9 +20,9 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Comparator;
import org.apache.lucene.index.AtomicReader; // javadocs
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.FieldCache.ByteParser;
import org.apache.lucene.search.FieldCache.DocTerms;
import org.apache.lucene.search.FieldCache.DocTermsIndex;
@ -72,7 +72,7 @@ import org.apache.lucene.util.packed.PackedInts;
* priority queue. The {@link FieldValueHitQueue}
* calls this method when a new hit is competitive.
*
* <li> {@link #setNextReader(IndexReader.AtomicReaderContext)} Invoked
* <li> {@link #setNextReader(AtomicReaderContext)} Invoked
* when the search is switching to the next segment.
* You may need to update internal state of the
* comparator, for example retrieving new values from
@ -1885,7 +1885,7 @@ public abstract class FieldComparator<T> {
* comparisons are done using BytesRef.compareTo, which is
* slow for medium to large result sets but possibly
* very fast for very small results sets. The BytesRef
* values are obtained using {@link IndexReader#docValues}. */
* values are obtained using {@link AtomicReader#docValues}. */
public static final class TermValDocValuesComparator extends FieldComparator<BytesRef> {
private BytesRef[] values;

View File

@ -19,6 +19,7 @@ package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.AtomicReader; // javadocs
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader; // javadocs
import org.apache.lucene.util.Bits;
@ -44,7 +45,7 @@ public abstract class Filter {
* represent the whole underlying index i.e. if the index has more than
* one segment the given reader only represents a single segment.
* The provided context is always an atomic context, so you can call
* {@link IndexReader#fields()}
* {@link AtomicReader#fields()}
* on the context's reader, for example.
*
* @param acceptDocs

View File

@ -33,6 +33,7 @@ import java.util.concurrent.locks.ReentrantLock;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DirectoryReader; // javadocs
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.IndexReaderContext;
@ -56,10 +57,11 @@ import org.apache.lucene.util.ThreadInterruptedException;
* multiple searches instead of creating a new one
* per-search. If your index has changed and you wish to
* see the changes reflected in searching, you should
* use {@link IndexReader#openIfChanged} to obtain a new reader and
* use {@link DirectoryReader#openIfChanged(DirectoryReader)}
* to obtain a new reader and
* then create a new IndexSearcher from that. Also, for
* low-latency turnaround it's best to use a near-real-time
* reader ({@link IndexReader#open(IndexWriter,boolean)}).
* reader ({@link DirectoryReader#open(IndexWriter,boolean)}).
* Once you have a new {@link IndexReader}, it's relatively
* cheap to create a new IndexSearcher from it.
*
@ -402,7 +404,7 @@ public class IndexSearcher {
* <p>NOTE: this does not compute scores by default. If you
* need scores, create a {@link TopFieldCollector}
* instance by calling {@link TopFieldCollector#create} and
* then pass that to {@link #search(IndexReader.AtomicReaderContext[], Weight,
* then pass that to {@link #search(AtomicReaderContext[], Weight,
* Collector)}.</p>
*/
protected TopFieldDocs search(Weight weight, int nDocs,
@ -451,7 +453,7 @@ public class IndexSearcher {
* <p>NOTE: this does not compute scores by default. If you
* need scores, create a {@link TopFieldCollector}
* instance by calling {@link TopFieldCollector#create} and
* then pass that to {@link #search(IndexReader.AtomicReaderContext[], Weight,
* then pass that to {@link #search(AtomicReaderContext[], Weight,
* Collector)}.</p>
*/
protected TopFieldDocs search(AtomicReaderContext[] leaves, Weight weight, int nDocs,

View File

@ -26,7 +26,6 @@ import java.util.concurrent.ConcurrentHashMap;
import org.apache.lucene.search.NRTManager; // javadocs
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader; // javadocs
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.IOUtils;
@ -86,9 +85,9 @@ import org.apache.lucene.util.IOUtils;
* <p><b>NOTE</b>: keeping many searchers around means
* you'll use more resources (open files, RAM) than a single
* searcher. However, as long as you are using {@link
* IndexReader#openIfChanged}, the searchers will usually
* share almost all segments and the added resource usage is
* contained. When a large merge has completed, and
* DirectoryReader#openIfChanged(DirectoryReader)}, the searchers
* will usually share almost all segments and the added resource usage
* is contained. When a large merge has completed, and
* you reopen, because that is a large change, the new
* searcher will use higher additional RAM than other
* searchers; but large merges don't complete very often and

View File

@ -77,12 +77,12 @@ public final class SearcherManager implements Closeable {
* Creates and returns a new SearcherManager from the given {@link IndexWriter}.
* @param writer the IndexWriter to open the IndexReader from.
* @param applyAllDeletes If <code>true</code>, all buffered deletes will
* be applied (made visible) in the {@link IndexSearcher} / {@link IndexReader}.
* be applied (made visible) in the {@link IndexSearcher} / {@link DirectoryReader}.
* If <code>false</code>, the deletes may or may not be applied, but remain buffered
* (in IndexWriter) so that they will be applied in the future.
* Applying deletes can be costly, so if your app can tolerate deleted documents
* being returned you might gain some performance by passing <code>false</code>.
* See {@link IndexReader#openIfChanged(IndexReader, IndexWriter, boolean)}.
* See {@link DirectoryReader#openIfChanged(DirectoryReader, IndexWriter, boolean)}.
* @param searcherFactory An optional {@link SearcherFactory}. Pass
* <code>null</code> if you don't require the searcher to be warmed
* before going live or other custom behavior.
@ -99,7 +99,7 @@ public final class SearcherManager implements Closeable {
/**
* Creates and returns a new SearcherManager from the given {@link Directory}.
* @param dir the directory to open the IndexReader on.
* @param dir the directory to open the DirectoryReader on.
* @param searcherFactory An optional {@link SearcherFactory}. Pass
* <code>null</code> if you don't require the searcher to be warmed
* before going live or other custom behavior.
@ -116,7 +116,7 @@ public final class SearcherManager implements Closeable {
/**
* You must call this, periodically, to perform a reopen. This calls
* {@link IndexReader#openIfChanged(IndexReader)} with the underlying reader, and if that returns a
* {@link DirectoryReader#openIfChanged(DirectoryReader)} with the underlying reader, and if that returns a
* new reader, it's warmed (if you provided a {@link SearcherFactory} and then
* swapped into production.
*

View File

@ -17,7 +17,7 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader; // javadocs
import org.apache.lucene.index.AtomicReader; // javadocs
import org.apache.lucene.util.BytesRef;
/**
* Contains statistics for a specific term
@ -42,13 +42,13 @@ public class TermStatistics {
}
/** returns the number of documents this term occurs in
* @see IndexReader#docFreq(String, BytesRef) */
* @see AtomicReader#docFreq(String, BytesRef) */
public final long docFreq() {
return docFreq;
}
/** returns the total number of occurrences of this term
* @see IndexReader#totalTermFreq(String, BytesRef) */
* @see AtomicReader#totalTermFreq(String, BytesRef) */
public final long totalTermFreq() {
return totalTermFreq;
}

View File

@ -19,9 +19,9 @@ package org.apache.lucene.search;
import java.io.IOException;
import org.apache.lucene.index.AtomicReader; // javadocs
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.IndexReaderContext; // javadocs
import org.apache.lucene.search.similarities.SimilarityProvider;
import org.apache.lucene.util.Bits;
@ -32,10 +32,10 @@ import org.apache.lucene.util.Bits;
* {@link Query}, so that a {@link Query} instance can be reused. <br>
* {@link IndexSearcher} dependent state of the query should reside in the
* {@link Weight}. <br>
* {@link IndexReader} dependent state should reside in the {@link Scorer}.
* {@link AtomicReader} dependent state should reside in the {@link Scorer}.
* <p>
* Since {@link Weight} creates {@link Scorer} instances for a given
* {@link AtomicReaderContext} ({@link #scorer(IndexReader.AtomicReaderContext,
* {@link AtomicReaderContext} ({@link #scorer(AtomicReaderContext,
* boolean, boolean, Bits)})
* callers must maintain the relationship between the searcher's top-level
* {@link IndexReaderContext} and the context used to create a {@link Scorer}.
@ -51,7 +51,7 @@ import org.apache.lucene.util.Bits;
* <li>The query normalization factor is passed to {@link #normalize(float, float)}. At
* this point the weighting is complete.
* <li>A <code>Scorer</code> is constructed by
* {@link #scorer(IndexReader.AtomicReaderContext, boolean, boolean, Bits)}.
* {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)}.
* </ol>
*
* @since 2.9
@ -117,7 +117,7 @@ public abstract class Weight {
* Returns true iff this implementation scores docs only out of order. This
* method is used in conjunction with {@link Collector}'s
* {@link Collector#acceptsDocsOutOfOrder() acceptsDocsOutOfOrder} and
* {@link #scorer(IndexReader.AtomicReaderContext, boolean, boolean, Bits)} to
* {@link #scorer(AtomicReaderContext, boolean, boolean, Bits)} to
* create a matching {@link Scorer} instance for a given {@link Collector}, or
* vice versa.
* <p>

View File

@ -21,6 +21,7 @@ package org.apache.lucene.search.similarities;
import java.io.IOException;
import org.apache.lucene.document.DocValuesField; // javadoc
import org.apache.lucene.index.AtomicReader; // javadoc
import org.apache.lucene.index.AtomicReaderContext;
import org.apache.lucene.index.FieldInvertState;
import org.apache.lucene.index.IndexReader; // javadoc
@ -57,7 +58,7 @@ import org.apache.lucene.util.SmallFloat; // javadoc
* <a name="indextime"/>
* At indexing time, the indexer calls {@link #computeNorm(FieldInvertState, Norm)}, allowing
* the Similarity implementation to set a per-document value for the field that will
* be later accessible via {@link IndexReader#normValues(String)}. Lucene makes no assumption
* be later accessible via {@link AtomicReader#normValues(String)}. Lucene makes no assumption
* about what is in this byte, but it is most useful for encoding length normalization
* information.
* <p>
@ -72,7 +73,7 @@ import org.apache.lucene.util.SmallFloat; // javadoc
* Because index-time boost is handled entirely at the application level anyway,
* an application can alternatively store the index-time boost separately using an
* {@link DocValuesField}, and access this at query-time with
* {@link IndexReader#docValues(String)}.
* {@link AtomicReader#docValues(String)}.
* <p>
* Finally, using index-time boosts (either via folding into the normalization byte or
* via DocValues), is an inefficient way to boost the scores of different fields if the
@ -93,9 +94,9 @@ import org.apache.lucene.util.SmallFloat; // javadoc
* is called for each query leaf node, {@link SimilarityProvider#queryNorm(float)} is called for the top-level
* query, and finally {@link Similarity.Stats#normalize(float, float)} passes down the normalization value
* and any top-level boosts (e.g. from enclosing {@link BooleanQuery}s).
* <li>For each segment in the index, the Query creates a {@link #exactDocScorer(Stats, String, IndexReader.AtomicReaderContext)}
* <li>For each segment in the index, the Query creates a {@link #exactDocScorer(Stats, String, AtomicReaderContext)}
* (for queries with exact frequencies such as TermQuerys and exact PhraseQueries) or a
* {@link #sloppyDocScorer(Stats, String, IndexReader.AtomicReaderContext)} (for queries with sloppy frequencies such as
* {@link #sloppyDocScorer(Stats, String, AtomicReaderContext)} (for queries with sloppy frequencies such as
* SpanQuerys and sloppy PhraseQueries). The score() method is called for each matching document.
* </ol>
* <p>

View File

@ -98,12 +98,6 @@ public abstract class FacetsAccumulator {
* is considered, and used to decrement from the overall counts, thereby
* walking through less documents, which is faster.
* <p>
* Note that this optimization is only available when searching an index
* whose {@link IndexReader} implements both
* {@link IndexReader#directory()} and {@link IndexReader#getVersion()}
* otherwise the optimization is silently disabled regardless of
* the complement threshold settings.
* <p>
* For the default settings see {@link #DEFAULT_COMPLEMENT_THRESHOLD}.
* <p>
* To forcing complements in all cases pass {@link #FORCE_COMPLEMENT}.

View File

@ -25,7 +25,6 @@ import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.Bits;
import org.apache.lucene.index.MultiFields;
import java.io.IOException;
import java.io.Serializable;
@ -89,7 +88,7 @@ public abstract class ValueSource implements Serializable {
/**
* EXPERIMENTAL: This method is subject to change.
* <p>
* Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, IndexReader.AtomicReaderContext)}
* Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, AtomicReaderContext)}
* to populate the SortField.
*
* @param reverse true if this is a reverse sort.

View File

@ -23,9 +23,9 @@ import org.apache.lucene.store.Directory;
/**
* Default IndexReaderFactory implementation. Returns a standard Lucene
* IndexReader.
* {@link DirectoryReader}.
*
* @see IndexReader#open(Directory)
* @see DirectoryReader#open(Directory)
*/
public class StandardIndexReaderFactory extends IndexReaderFactory {