mirror of https://github.com/apache/lucene.git
LUCENE-3902: add javadocs
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1332791 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
3d224abfe8
commit
9554a045e9
|
@ -209,18 +209,18 @@
|
|||
<check-missing-javadocs dir="build/docs/analyzers-uima" level="class"/>
|
||||
<!-- benchmark: problems -->
|
||||
<!-- core: problems -->
|
||||
<!-- demo: problems -->
|
||||
<check-missing-javadocs dir="build/docs/demo" level="class"/>
|
||||
<!-- facet: problems -->
|
||||
<!-- grouping: problems -->
|
||||
<!-- highlighter: problems -->
|
||||
<check-missing-javadocs dir="build/docs/join" level="class"/>
|
||||
<check-missing-javadocs dir="build/docs/memory" level="class"/>
|
||||
<!-- misc: problems -->
|
||||
<check-missing-javadocs dir="build/docs/misc" level="class"/>
|
||||
<!-- queries: problems -->
|
||||
<!-- queryparser: problems -->
|
||||
<!-- sandbox: problems -->
|
||||
<!-- spatial: problems -->
|
||||
<!-- suggest: problems -->
|
||||
<check-missing-javadocs dir="build/docs/suggest" level="class"/>
|
||||
<!-- test-framework: problems -->
|
||||
</sequential>
|
||||
</target>
|
||||
|
|
|
@ -24,12 +24,23 @@ import org.apache.lucene.index.FieldInfo;
|
|||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FieldsEnum;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.SegmentWriteState; // javadocs
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
||||
/** Abstract API that consumes terms, doc, freq, prox, offset and
|
||||
/**
|
||||
* Abstract API that consumes terms, doc, freq, prox, offset and
|
||||
* payloads postings. Concrete implementations of this
|
||||
* actually do "something" with the postings (write it into
|
||||
* the index in a specific format).
|
||||
* <p>
|
||||
* The lifecycle is:
|
||||
* <ol>
|
||||
* <li>FieldsConsumer is created by
|
||||
* {@link PostingsFormat#fieldsConsumer(SegmentWriteState)}.
|
||||
* <li>For each field, {@link #addField(FieldInfo)} is called,
|
||||
* returning a {@link TermsConsumer} for the field.
|
||||
* <li>After all fields are added, the consumer is {@link #close}d.
|
||||
* </ol>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -28,19 +28,29 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
||||
/**
|
||||
* Abstract API that consumes postings for an individual term.
|
||||
* <p>
|
||||
* The lifecycle is:
|
||||
* <ol>
|
||||
* <li>PostingsConsumer is returned for each term by
|
||||
* {@link TermsConsumer#startTerm(BytesRef)}.
|
||||
* <li>{@link #startDoc(int, int)} is called for each
|
||||
* document where the term occurs, specifying id
|
||||
* and term frequency for that document.
|
||||
* <li>If positions are enabled for the field, then
|
||||
* {@link #addPosition(int, BytesRef, int, int)}
|
||||
* will be called for each occurrence in the
|
||||
* document.
|
||||
* <li>{@link #finishDoc()} is called when the producer
|
||||
* is done adding positions to the document.
|
||||
* </ol>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
||||
public abstract class PostingsConsumer {
|
||||
|
||||
/** Adds a new doc in this term. */
|
||||
public abstract void startDoc(int docID, int termDocFreq) throws IOException;
|
||||
|
||||
public static class PostingsMergeState {
|
||||
DocsEnum docsEnum;
|
||||
int[] docMap;
|
||||
int docBase;
|
||||
}
|
||||
public abstract void startDoc(int docID, int freq) throws IOException;
|
||||
|
||||
/** Add a new position & payload, and start/end offset. A
|
||||
* null payload means no payload; a non-null payload with
|
||||
|
|
|
@ -24,9 +24,17 @@ import org.apache.lucene.store.IndexOutput;
|
|||
import org.apache.lucene.index.FieldInfo;
|
||||
|
||||
/**
|
||||
* Extension of {@link PostingsConsumer} to support pluggable term dictionaries.
|
||||
* <p>
|
||||
* This class contains additional hooks to interact with the provided
|
||||
* term dictionaries such as {@link BlockTreeTermsWriter} and
|
||||
* {@link BlockTermsWriter}. If you want to re-use one of these existing
|
||||
* implementations and are only interested in customizing the format of
|
||||
* the postings list, extend this class instead.
|
||||
*
|
||||
* @see PostingsReaderBase
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
||||
// TODO: find a better name; this defines the API that the
|
||||
// terms dict impls use to talk to a postings impl.
|
||||
// TermsDict + PostingsReader/WriterBase == PostingsConsumer/Producer
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.codecs;
|
|||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo; // javadocs
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -30,9 +31,25 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
||||
/**
|
||||
* Abstract API that consumes terms for an individual field.
|
||||
* <p>
|
||||
* The lifecycle is:
|
||||
* <ol>
|
||||
* <li>TermsConsumer is returned for each field
|
||||
* by {@link FieldsConsumer#addField(FieldInfo)}.
|
||||
* <li>TermsConsumer returns a {@link PostingsConsumer} for
|
||||
* each term in {@link #startTerm(BytesRef)}.
|
||||
* <li>When the producer (e.g. IndexWriter)
|
||||
* is done adding documents for the term, it calls
|
||||
* {@link #finishTerm(BytesRef, TermStats)}, passing in
|
||||
* the accumulated term statistics.
|
||||
* <li>Producer calls {@link #finish(long, long, int)} with
|
||||
* the accumulated collection statistics when it is finished
|
||||
* adding terms to the field.
|
||||
* </ol>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
||||
public abstract class TermsConsumer {
|
||||
|
||||
/** Starts a new term in this field; this may be called
|
||||
|
@ -50,11 +67,11 @@ public abstract class TermsConsumer {
|
|||
* before feeding to this API. */
|
||||
public abstract Comparator<BytesRef> getComparator() throws IOException;
|
||||
|
||||
/** Default merge impl */
|
||||
private MappingMultiDocsEnum docsEnum;
|
||||
private MappingMultiDocsEnum docsAndFreqsEnum;
|
||||
private MappingMultiDocsAndPositionsEnum postingsEnum;
|
||||
|
||||
/** Default merge impl */
|
||||
public void merge(MergeState mergeState, TermsEnum termsEnum) throws IOException {
|
||||
|
||||
BytesRef term;
|
||||
|
|
|
@ -30,9 +30,9 @@ import java.io.Closeable;
|
|||
// frequent indexing
|
||||
|
||||
/**
|
||||
* TermsDictReader interacts with an instance of this class
|
||||
* {@link BlockTermsReader} interacts with an instance of this class
|
||||
* to manage its terms index. The writer must accept
|
||||
* indexed terms (many pairs of CharSequence text + long
|
||||
* indexed terms (many pairs of BytesRef text + long
|
||||
* fileOffset), and then this reader must be able to
|
||||
* retrieve the nearest index term to a provided term
|
||||
* text.
|
||||
|
@ -48,13 +48,15 @@ public abstract class TermsIndexReaderBase implements Closeable {
|
|||
|
||||
public abstract int getDivisor();
|
||||
|
||||
// Similar to TermsEnum, except, the only "metadata" it
|
||||
// reports for a given indexed term is the long fileOffset
|
||||
// into the main terms dict (_X.tis) file:
|
||||
/**
|
||||
* Similar to TermsEnum, except, the only "metadata" it
|
||||
* reports for a given indexed term is the long fileOffset
|
||||
* into the main terms dictionary file:
|
||||
*/
|
||||
public static abstract class FieldIndexEnum {
|
||||
|
||||
/** Seeks to "largest" indexed term that's <=
|
||||
* term; retruns file pointer index (into the main
|
||||
* term; returns file pointer index (into the main
|
||||
* terms index file) for that term */
|
||||
public abstract long seek(BytesRef term) throws IOException;
|
||||
|
||||
|
@ -63,8 +65,10 @@ public abstract class TermsIndexReaderBase implements Closeable {
|
|||
|
||||
public abstract BytesRef term();
|
||||
|
||||
// Only impl'd if supportsOrd() returns true!
|
||||
/** Only implemented if {@link TermsIndexReaderBase#supportsOrd()} returns true. */
|
||||
public abstract long seek(long ord) throws IOException;
|
||||
|
||||
/** Only implemented if {@link TermsIndexReaderBase#supportsOrd()} returns true. */
|
||||
public abstract long ord();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,9 +23,18 @@ import org.apache.lucene.util.BytesRef;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
/** @lucene.experimental */
|
||||
/**
|
||||
* Base class for terms index implementations to plug
|
||||
* into {@link BlockTermsWriter}.
|
||||
*
|
||||
* @see TermsIndexReaderBase
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class TermsIndexWriterBase implements Closeable {
|
||||
|
||||
/**
|
||||
* Terms index API for a single field.
|
||||
*/
|
||||
public abstract class FieldWriter {
|
||||
public abstract boolean checkIndexTerm(BytesRef text, TermStats stats) throws IOException;
|
||||
public abstract void add(BytesRef text, TermStats stats, long termsFilePointer) throws IOException;
|
||||
|
|
|
@ -66,6 +66,11 @@ public abstract class FixedIntBlockIndexInput extends IntIndexInput {
|
|||
|
||||
protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException;
|
||||
|
||||
/**
|
||||
* Interface for fixed-size block decoders.
|
||||
* <p>
|
||||
* Implementations should decode into the buffer in {@link #readBlock}.
|
||||
*/
|
||||
public interface BlockReader {
|
||||
public void readBlock() throws IOException;
|
||||
}
|
||||
|
|
|
@ -68,6 +68,11 @@ public abstract class VariableIntBlockIndexInput extends IntIndexInput {
|
|||
|
||||
protected abstract BlockReader getBlockReader(IndexInput in, int[] buffer) throws IOException;
|
||||
|
||||
/**
|
||||
* Interface for variable-size block decoders.
|
||||
* <p>
|
||||
* Implementations should decode into the buffer in {@link #readBlock}.
|
||||
*/
|
||||
public interface BlockReader {
|
||||
public int readBlock() throws IOException;
|
||||
public void seek(long pos) throws IOException;
|
||||
|
|
|
@ -31,6 +31,10 @@ import org.apache.lucene.util.BytesRefHash;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/**
|
||||
* Writes plain-text DocValues.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextDocValuesConsumer extends DocValuesConsumer {
|
||||
|
|
|
@ -26,7 +26,12 @@ import org.apache.lucene.index.PerDocWriteState;
|
|||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* Plain-text DocValues format.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextDocValuesFormat extends DocValuesFormat {
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/**
|
||||
* plain-text norms format
|
||||
* plain-text norms format.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
|
@ -63,6 +63,13 @@ public class SimpleTextNormsFormat extends NormsFormat {
|
|||
SimpleTextNormsPerDocConsumer.files(info, files);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads plain-text norms.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public static class SimpleTextNormsPerDocProducer extends
|
||||
SimpleTextPerDocProducer {
|
||||
|
||||
|
@ -88,6 +95,13 @@ public class SimpleTextNormsFormat extends NormsFormat {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes plain-text norms.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public static class SimpleTextNormsPerDocConsumer extends
|
||||
SimpleTextPerDocConsumer {
|
||||
|
||||
|
|
|
@ -47,6 +47,10 @@ import org.apache.lucene.util.StringHelper;
|
|||
import org.apache.lucene.util.packed.PackedInts.Reader;
|
||||
|
||||
/**
|
||||
* Reads plain-text DocValues.
|
||||
* <p>
|
||||
* <b><font color="red">FOR RECREATIONAL USE ONLY</font></B>
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class SimpleTextPerDocProducer extends PerDocProducerBase {
|
||||
|
|
|
@ -51,6 +51,12 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Example servlet that uses the XML queryparser.
|
||||
* <p>
|
||||
* NOTE: you must provide CSV data in <code>/WEB-INF/data.tsv</code>
|
||||
* for the demo to work!
|
||||
*/
|
||||
public class FormBasedXmlQueryDemo extends HttpServlet {
|
||||
|
||||
private QueryTemplateManager queryTemplateManager;
|
||||
|
|
|
@ -21,6 +21,10 @@ import java.io.IOException;
|
|||
import java.io.FileDescriptor;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
/**
|
||||
* Provides JNI access to native methods such as madvise() for
|
||||
* {@link NativeUnixDirectory}
|
||||
*/
|
||||
public final class NativePosixUtil {
|
||||
public final static int NORMAL = 0;
|
||||
public final static int SEQUENTIAL = 1;
|
||||
|
|
|
@ -75,7 +75,7 @@ public class WindowsDirectory extends FSDirectory {
|
|||
return new WindowsIndexInput(new File(getDirectory(), name), Math.max(BufferedIndexInput.bufferSize(context), DEFAULT_BUFFERSIZE));
|
||||
}
|
||||
|
||||
protected static class WindowsIndexInput extends BufferedIndexInput {
|
||||
static class WindowsIndexInput extends BufferedIndexInput {
|
||||
private final long fd;
|
||||
private final long length;
|
||||
boolean isClone;
|
||||
|
|
|
@ -34,6 +34,12 @@ import org.apache.lucene.util.CharsRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
|
||||
/**
|
||||
* Suggest implementation based on
|
||||
* <a href="http://jaspell.sourceforge.net/">JaSpell</a>.
|
||||
*
|
||||
* @see JaspellTernarySearchTrie
|
||||
*/
|
||||
public class JaspellLookup extends Lookup {
|
||||
JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie();
|
||||
private boolean usePrefix = true;
|
||||
|
|
|
@ -19,6 +19,11 @@ package org.apache.lucene.search.suggest.tst;
|
|||
|
||||
import java.util.*;
|
||||
|
||||
/**
|
||||
* Ternary Search Trie implementation.
|
||||
*
|
||||
* @see TernaryTreeNode
|
||||
*/
|
||||
public class TSTAutocomplete {
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,6 +33,12 @@ import org.apache.lucene.util.CharsRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
|
||||
/**
|
||||
* Suggest implementation based on a
|
||||
* <a href="http://en.wikipedia.org/wiki/Ternary_search_tree">Ternary Search Tree</a>
|
||||
*
|
||||
* @see TSTAutocomplete
|
||||
*/
|
||||
public class TSTLookup extends Lookup {
|
||||
TernaryTreeNode root = new TernaryTreeNode();
|
||||
TSTAutocomplete autocomplete = new TSTAutocomplete();
|
||||
|
|
Loading…
Reference in New Issue