LUCENE-3902: add javadocs

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1332791 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-05-01 19:25:55 +00:00
parent 3d224abfe8
commit 9554a045e9
19 changed files with 151 additions and 28 deletions

View File

@ -209,18 +209,18 @@
<check-missing-javadocs dir="build/docs/analyzers-uima" level="class"/> <check-missing-javadocs dir="build/docs/analyzers-uima" level="class"/>
<!-- benchmark: problems --> <!-- benchmark: problems -->
<!-- core: problems --> <!-- core: problems -->
<!-- demo: problems --> <check-missing-javadocs dir="build/docs/demo" level="class"/>
<!-- facet: problems --> <!-- facet: problems -->
<!-- grouping: problems --> <!-- grouping: problems -->
<!-- highlighter: problems --> <!-- highlighter: problems -->
<check-missing-javadocs dir="build/docs/join" level="class"/> <check-missing-javadocs dir="build/docs/join" level="class"/>
<check-missing-javadocs dir="build/docs/memory" level="class"/> <check-missing-javadocs dir="build/docs/memory" level="class"/>
<!-- misc: problems --> <check-missing-javadocs dir="build/docs/misc" level="class"/>
<!-- queries: problems --> <!-- queries: problems -->
<!-- queryparser: problems --> <!-- queryparser: problems -->
<!-- sandbox: problems --> <!-- sandbox: problems -->
<!-- spatial: problems --> <!-- spatial: problems -->
<!-- suggest: problems --> <check-missing-javadocs dir="build/docs/suggest" level="class"/>
<!-- test-framework: problems --> <!-- test-framework: problems -->
</sequential> </sequential>
</target> </target>

View File

@ -24,12 +24,23 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.Fields; import org.apache.lucene.index.Fields;
import org.apache.lucene.index.FieldsEnum; import org.apache.lucene.index.FieldsEnum;
import org.apache.lucene.index.MergeState; import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.SegmentWriteState; // javadocs
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
/** Abstract API that consumes terms, doc, freq, prox, offset and /**
* Abstract API that consumes terms, doc, freq, prox, offset and
* payloads postings. Concrete implementations of this * payloads postings. Concrete implementations of this
* actually do "something" with the postings (write it into * actually do "something" with the postings (write it into
* the index in a specific format). * the index in a specific format).
* <p>
* The lifecycle is:
* <ol>
* <li>FieldsConsumer is created by
* {@link PostingsFormat#fieldsConsumer(SegmentWriteState)}.
* <li>For each field, {@link #addField(FieldInfo)} is called,
* returning a {@link TermsConsumer} for the field.
* <li>After all fields are added, the consumer is {@link #close}d.
* </ol>
* *
* @lucene.experimental * @lucene.experimental
*/ */

View File

@ -28,19 +28,29 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.FixedBitSet;
/** /**
* Abstract API that consumes postings for an individual term.
* <p>
* The lifecycle is:
* <ol>
* <li>PostingsConsumer is returned for each term by
* {@link TermsConsumer#startTerm(BytesRef)}.
* <li>{@link #startDoc(int, int)} is called for each
* document where the term occurs, specifying id
* and term frequency for that document.
* <li>If positions are enabled for the field, then
* {@link #addPosition(int, BytesRef, int, int)}
* will be called for each occurrence in the
* document.
* <li>{@link #finishDoc()} is called when the producer
* is done adding positions to the document.
* </ol>
*
* @lucene.experimental * @lucene.experimental
*/ */
public abstract class PostingsConsumer { public abstract class PostingsConsumer {
/** Adds a new doc in this term. */ /** Adds a new doc in this term. */
public abstract void startDoc(int docID, int termDocFreq) throws IOException; public abstract void startDoc(int docID, int freq) throws IOException;
public static class PostingsMergeState {
DocsEnum docsEnum;
int[] docMap;
int docBase;
}
/** Add a new position & payload, and start/end offset. A /** Add a new position & payload, and start/end offset. A
* null payload means no payload; a non-null payload with * null payload means no payload; a non-null payload with

View File

@ -24,9 +24,17 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.FieldInfo;
/** /**
* Extension of {@link PostingsConsumer} to support pluggable term dictionaries.
* <p>
* This class contains additional hooks to interact with the provided
* term dictionaries such as {@link BlockTreeTermsWriter} and
* {@link BlockTermsWriter}. If you want to re-use one of these existing
* implementations and are only interested in customizing the format of
* the postings list, extend this class instead.
*
* @see PostingsReaderBase
* @lucene.experimental * @lucene.experimental
*/ */
// TODO: find a better name; this defines the API that the // TODO: find a better name; this defines the API that the
// terms dict impls use to talk to a postings impl. // terms dict impls use to talk to a postings impl.
// TermsDict + PostingsReader/WriterBase == PostingsConsumer/Producer // TermsDict + PostingsReader/WriterBase == PostingsConsumer/Producer

View File

@ -20,6 +20,7 @@ package org.apache.lucene.codecs;
import java.io.IOException; import java.io.IOException;
import java.util.Comparator; import java.util.Comparator;
import org.apache.lucene.index.FieldInfo; // javadocs
import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.MergeState; import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.TermsEnum; import org.apache.lucene.index.TermsEnum;
@ -30,9 +31,25 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.FixedBitSet;
/** /**
* Abstract API that consumes terms for an individual field.
* <p>
* The lifecycle is:
* <ol>
* <li>TermsConsumer is returned for each field
* by {@link FieldsConsumer#addField(FieldInfo)}.
* <li>TermsConsumer returns a {@link PostingsConsumer} for
* each term in {@link #startTerm(BytesRef)}.
* <li>When the producer (e.g. IndexWriter)
* is done adding documents for the term, it calls
* {@link #finishTerm(BytesRef, TermStats)}, passing in
* the accumulated term statistics.
* <li>Producer calls {@link #finish(long, long, int)} with
* the accumulated collection statistics when it is finished
* adding terms to the field.
* </ol>
*
* @lucene.experimental * @lucene.experimental
*/ */
public abstract class TermsConsumer { public abstract class TermsConsumer {
/** Starts a new term in this field; this may be called /** Starts a new term in this field; this may be called
@ -50,11 +67,11 @@ public abstract class TermsConsumer {
* before feeding to this API. */ * before feeding to this API. */
public abstract Comparator<BytesRef> getComparator() throws IOException; public abstract Comparator<BytesRef> getComparator() throws IOException;
/** Default merge impl */
private MappingMultiDocsEnum docsEnum; private MappingMultiDocsEnum docsEnum;
private MappingMultiDocsEnum docsAndFreqsEnum; private MappingMultiDocsEnum docsAndFreqsEnum;
private MappingMultiDocsAndPositionsEnum postingsEnum; private MappingMultiDocsAndPositionsEnum postingsEnum;
/** Default merge impl */
public void merge(MergeState mergeState, TermsEnum termsEnum) throws IOException { public void merge(MergeState mergeState, TermsEnum termsEnum) throws IOException {
BytesRef term; BytesRef term;

View File

@ -30,9 +30,9 @@ import java.io.Closeable;
// frequent indexing // frequent indexing
/** /**
* TermsDictReader interacts with an instance of this class * {@link BlockTermsReader} interacts with an instance of this class
* to manage its terms index. The writer must accept * to manage its terms index. The writer must accept
* indexed terms (many pairs of CharSequence text + long * indexed terms (many pairs of BytesRef text + long
* fileOffset), and then this reader must be able to * fileOffset), and then this reader must be able to
* retrieve the nearest index term to a provided term * retrieve the nearest index term to a provided term
* text. * text.
@ -48,13 +48,15 @@ public abstract class TermsIndexReaderBase implements Closeable {
public abstract int getDivisor(); public abstract int getDivisor();
// Similar to TermsEnum, except, the only "metadata" it /**
// reports for a given indexed term is the long fileOffset * Similar to TermsEnum, except, the only "metadata" it
// into the main terms dict (_X.tis) file: * reports for a given indexed term is the long fileOffset
* into the main terms dictionary file:
*/
public static abstract class FieldIndexEnum { public static abstract class FieldIndexEnum {
/** Seeks to "largest" indexed term that's <= /** Seeks to "largest" indexed term that's <=
* term; retruns file pointer index (into the main * term; returns file pointer index (into the main
* terms index file) for that term */ * terms index file) for that term */
public abstract long seek(BytesRef term) throws IOException; public abstract long seek(BytesRef term) throws IOException;
@ -63,8 +65,10 @@ public abstract class TermsIndexReaderBase implements Closeable {
public abstract BytesRef term(); public abstract BytesRef term();
// Only impl'd if supportsOrd() returns true! /** Only implemented if {@link TermsIndexReaderBase#supportsOrd()} returns true. */
public abstract long seek(long ord) throws IOException; public abstract long seek(long ord) throws IOException;
/** Only implemented if {@link TermsIndexReaderBase#supportsOrd()} returns true. */
public abstract long ord(); public abstract long ord();
} }
} }

View File

@ -23,9 +23,18 @@ import org.apache.lucene.util.BytesRef;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
/** @lucene.experimental */ /**
* Base class for terms index implementations to plug
* into {@link BlockTermsWriter}.
*
* @see TermsIndexReaderBase
* @lucene.experimental
*/
public abstract class TermsIndexWriterBase implements Closeable { public abstract class TermsIndexWriterBase implements Closeable {
/**
* Terms index API for a single field.
*/
public abstract class FieldWriter { public abstract class FieldWriter {
public abstract boolean checkIndexTerm(BytesRef text, TermStats stats) throws IOException; public abstract boolean checkIndexTerm(BytesRef text, TermStats stats) throws IOException;
public abstract void add(BytesRef text, TermStats stats, long termsFilePointer) throws IOException; public abstract void add(BytesRef text, TermStats stats, long termsFilePointer) throws IOException;

View File

@ -66,6 +66,11 @@ public abstract class FixedIntBlockIndexInput extends IntIndexInput {
protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException; protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException;
/**
* Interface for fixed-size block decoders.
* <p>
* Implementations should decode into the buffer in {@link #readBlock}.
*/
public interface BlockReader { public interface BlockReader {
public void readBlock() throws IOException; public void readBlock() throws IOException;
} }

View File

@ -68,6 +68,11 @@ public abstract class VariableIntBlockIndexInput extends IntIndexInput {
protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException; protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException;
/**
* Interface for variable-size block decoders.
* <p>
* Implementations should decode into the buffer in {@link #readBlock}.
*/
public interface BlockReader { public interface BlockReader {
public int readBlock() throws IOException; public int readBlock() throws IOException;
public void seek(long pos) throws IOException; public void seek(long pos) throws IOException;

View File

@ -31,6 +31,10 @@ import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
/** /**
* Writes plain-text DocValues.
* <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
*
* @lucene.experimental * @lucene.experimental
*/ */
public class SimpleTextDocValuesConsumer extends DocValuesConsumer { public class SimpleTextDocValuesConsumer extends DocValuesConsumer {

View File

@ -26,7 +26,12 @@ import org.apache.lucene.index.PerDocWriteState;
import org.apache.lucene.index.SegmentInfo; import org.apache.lucene.index.SegmentInfo;
import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
/** /**
* Plain-text DocValues format.
* <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
*
* @lucene.experimental * @lucene.experimental
*/ */
public class SimpleTextDocValuesFormat extends DocValuesFormat { public class SimpleTextDocValuesFormat extends DocValuesFormat {

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
/** /**
* plain-text norms format * plain-text norms format.
* <p> * <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B> * <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
* *
@ -63,6 +63,13 @@ public class SimpleTextNormsFormat extends NormsFormat {
SimpleTextNormsPerDocConsumer.files(info, files); SimpleTextNormsPerDocConsumer.files(info, files);
} }
/**
* Reads plain-text norms.
* <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
*
* @lucene.experimental
*/
public static class SimpleTextNormsPerDocProducer extends public static class SimpleTextNormsPerDocProducer extends
SimpleTextPerDocProducer { SimpleTextPerDocProducer {
@ -88,6 +95,13 @@ public class SimpleTextNormsFormat extends NormsFormat {
} }
/**
* Writes plain-text norms.
* <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
*
* @lucene.experimental
*/
public static class SimpleTextNormsPerDocConsumer extends public static class SimpleTextNormsPerDocConsumer extends
SimpleTextPerDocConsumer { SimpleTextPerDocConsumer {

View File

@ -47,6 +47,10 @@ import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.packed.PackedInts.Reader; import org.apache.lucene.util.packed.PackedInts.Reader;
/** /**
* Reads plain-text DocValues.
* <p>
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
*
* @lucene.experimental * @lucene.experimental
*/ */
public class SimpleTextPerDocProducer extends PerDocProducerBase { public class SimpleTextPerDocProducer extends PerDocProducerBase {

View File

@ -51,6 +51,12 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.RAMDirectory; import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version; import org.apache.lucene.util.Version;
/**
* Example servlet that uses the XML queryparser.
* <p>
* NOTE: you must provide CSV data in <code>/WEB-INF/data.tsv</code>
* for the demo to work!
*/
public class FormBasedXmlQueryDemo extends HttpServlet { public class FormBasedXmlQueryDemo extends HttpServlet {
private QueryTemplateManager queryTemplateManager; private QueryTemplateManager queryTemplateManager;

View File

@ -21,6 +21,10 @@ import java.io.IOException;
import java.io.FileDescriptor; import java.io.FileDescriptor;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
/**
* Provides JNI access to native methods such as madvise() for
* {@link NativeUnixDirectory}
*/
public final class NativePosixUtil { public final class NativePosixUtil {
public final static int NORMAL = 0; public final static int NORMAL = 0;
public final static int SEQUENTIAL = 1; public final static int SEQUENTIAL = 1;

View File

@ -75,7 +75,7 @@ public class WindowsDirectory extends FSDirectory {
return new WindowsIndexInput(new File(getDirectory(), name), Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE)); return new WindowsIndexInput(new File(getDirectory(), name), Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE));
} }
protected static class WindowsIndexInput extends BufferedIndexInput { static class WindowsIndexInput extends BufferedIndexInput {
private final long fd; private final long fd;
private final long length; private final long length;
boolean isClone; boolean isClone;

View File

@ -34,6 +34,12 @@ import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util.UnicodeUtil;
/**
* Suggest implementation based on
* <a href="http://jaspell.sourceforge.net/">JaSpell</a>.
*
* @see JaspellTernarySearchTrie
*/
public class JaspellLookup extends Lookup { public class JaspellLookup extends Lookup {
JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie(); JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie();
private boolean usePrefix = true; private boolean usePrefix = true;

View File

@ -19,6 +19,11 @@ package org.apache.lucene.search.suggest.tst;
import java.util.*; import java.util.*;
/**
* Ternary Search Trie implementation.
*
* @see TernaryTreeNode
*/
public class TSTAutocomplete { public class TSTAutocomplete {
/** /**

View File

@ -33,6 +33,12 @@ import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.UnicodeUtil; import org.apache.lucene.util.UnicodeUtil;
/**
* Suggest implementation based on a
* <a href="http://en.wikipedia.org/wiki/Ternary_search_tree">Ternary Search Tree</a>
*
* @see TSTAutocomplete
*/
public class TSTLookup extends Lookup { public class TSTLookup extends Lookup {
TernaryTreeNode root = new TernaryTreeNode(); TernaryTreeNode root = new TernaryTreeNode();
TSTAutocomplete autocomplete = new TSTAutocomplete(); TSTAutocomplete autocomplete = new TSTAutocomplete();