mirror of https://github.com/apache/lucene.git
javadocs
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1373895 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b655cf78bf
commit
85c6e76ab8
|
@ -26,11 +26,15 @@ import org.apache.lucene.index.TermState;
|
|||
* terms dict.
|
||||
*/
|
||||
public class BlockTermState extends OrdTermState {
|
||||
public int docFreq; // how many docs have this term
|
||||
public long totalTermFreq; // total number of occurrences of this term
|
||||
/** how many docs have this term */
|
||||
public int docFreq;
|
||||
/** total number of occurrences of this term */
|
||||
public long totalTermFreq;
|
||||
|
||||
public int termBlockOrd; // the term's ord in the current block
|
||||
public long blockFilePointer; // fp into the terms dict primary file (_X.tim) that holds this term
|
||||
/** the term's ord in the current block */
|
||||
public int termBlockOrd;
|
||||
/** fp into the terms dict primary file (_X.tim) that holds this term */
|
||||
public long blockFilePointer;
|
||||
|
||||
@Override
|
||||
public void copyFrom(TermState _other) {
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.util.MathUtil;
|
|||
*/
|
||||
|
||||
public abstract class MultiLevelSkipListReader {
|
||||
// the maximum number of skip levels possible for this index
|
||||
/** the maximum number of skip levels possible for this index */
|
||||
protected int maxNumberOfSkipLevels;
|
||||
|
||||
// number of levels in this skip list
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.util.MathUtil;
|
|||
*/
|
||||
|
||||
public abstract class MultiLevelSkipListWriter {
|
||||
// number of levels in this skip list
|
||||
/** number of levels in this skip list */
|
||||
protected int numberOfSkipLevels;
|
||||
|
||||
// the skip interval in the list with level = 0
|
||||
|
@ -77,8 +77,8 @@ public abstract class MultiLevelSkipListWriter {
|
|||
}
|
||||
}
|
||||
|
||||
/** creates new buffers or empties the existing ones */
|
||||
protected void resetSkip() {
|
||||
// creates new buffers or empties the existing ones
|
||||
if (skipBuffer == null) {
|
||||
init();
|
||||
} else {
|
||||
|
|
|
@ -119,10 +119,13 @@ public class DocTermOrds {
|
|||
protected final String field;
|
||||
|
||||
protected int numTermsInField;
|
||||
protected long termInstances; // total number of references to term numbers
|
||||
/** total number of references to term numbers */
|
||||
protected long termInstances;
|
||||
private long memsz;
|
||||
protected int total_time; // total time to uninvert the field
|
||||
protected int phase1_time; // time for phase1 of the uninvert process
|
||||
/** total time to uninvert the field */
|
||||
protected int total_time;
|
||||
/** time for phase1 of the uninvert process */
|
||||
protected int phase1_time;
|
||||
|
||||
protected int[] index;
|
||||
protected byte[][] tnums = new byte[256][];
|
||||
|
@ -234,7 +237,7 @@ public class DocTermOrds {
|
|||
protected void setActualDocFreq(int termNum, int df) throws IOException {
|
||||
}
|
||||
|
||||
// Call this only once (if you subclass!)
|
||||
/** Call this only once (if you subclass!) */
|
||||
protected void uninvert(final AtomicReader reader, final BytesRef termPrefix) throws IOException {
|
||||
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
|
||||
final long startTime = System.currentTimeMillis();
|
||||
|
|
|
@ -249,11 +249,11 @@ public class FieldInfos implements Iterable<FieldInfo> {
|
|||
return addOrUpdateInternal(name, -1, isIndexed, storeTermVector, omitNorms, storePayloads, indexOptions, docValues, normType);
|
||||
}
|
||||
|
||||
// NOTE: this method does not carry over termVector
|
||||
// booleans nor docValuesType; the indexer chain
|
||||
// (TermVectorsConsumerPerField, DocFieldProcessor) must
|
||||
// set these fields when they succeed in consuming
|
||||
// the document:
|
||||
/** NOTE: this method does not carry over termVector
|
||||
* booleans nor docValuesType; the indexer chain
|
||||
* (TermVectorsConsumerPerField, DocFieldProcessor) must
|
||||
* set these fields when they succeed in consuming
|
||||
* the document */
|
||||
public FieldInfo addOrUpdate(String name, IndexableFieldType fieldType) {
|
||||
// TODO: really, indexer shouldn't even call this
|
||||
// method (it's only called from DocFieldProcessor);
|
||||
|
|
|
@ -133,7 +133,8 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
// or wrote; this is normally the same as generation except if
|
||||
// there was an IOException that had interrupted a commit
|
||||
|
||||
public Map<String,String> userData = Collections.<String,String>emptyMap(); // Opaque Map<String, String> that user can specify during IndexWriter.commit
|
||||
/** Opaque Map<String, String> that user can specify during IndexWriter.commit */
|
||||
public Map<String,String> userData = Collections.<String,String>emptyMap();
|
||||
|
||||
private List<SegmentInfoPerCommit> segments = new ArrayList<SegmentInfoPerCommit>();
|
||||
|
||||
|
|
|
@ -30,11 +30,11 @@ public class SegmentReadState {
|
|||
public final FieldInfos fieldInfos;
|
||||
public final IOContext context;
|
||||
|
||||
// NOTE: if this is < 0, that means "defer terms index
|
||||
// load until needed". But if the codec must load the
|
||||
// terms index on init (preflex is the only once currently
|
||||
// that must do so), then it should negate this value to
|
||||
// get the app's terms divisor:
|
||||
/** NOTE: if this is < 0, that means "defer terms index
|
||||
* load until needed". But if the codec must load the
|
||||
* terms index on init (preflex is the only once currently
|
||||
* that must do so), then it should negate this value to
|
||||
* get the app's terms divisor */
|
||||
public int termsIndexDivisor;
|
||||
public final String segmentSuffix;
|
||||
|
||||
|
|
|
@ -33,11 +33,11 @@ public class SegmentWriteState {
|
|||
public final FieldInfos fieldInfos;
|
||||
public int delCountOnFlush;
|
||||
|
||||
// Deletes to apply while we are flushing the segment. A
|
||||
// Term is enrolled in here if it was deleted at one
|
||||
// point, and it's mapped to the docIDUpto, meaning any
|
||||
// docID < docIDUpto containing this term should be
|
||||
// deleted.
|
||||
/** Deletes to apply while we are flushing the segment. A
|
||||
* Term is enrolled in here if it was deleted at one
|
||||
* point, and it's mapped to the docIDUpto, meaning any
|
||||
* docID < docIDUpto containing this term should be
|
||||
* deleted. */
|
||||
public final BufferedDeletes segDeletes;
|
||||
|
||||
// Lazily created:
|
||||
|
|
|
@ -82,7 +82,7 @@ public class IndexSearcher {
|
|||
// in the next release
|
||||
protected final IndexReaderContext readerContext;
|
||||
protected final List<AtomicReaderContext> leafContexts;
|
||||
// used with executor - each slice holds a set of leafs executed within one thread
|
||||
/** used with executor - each slice holds a set of leafs executed within one thread */
|
||||
protected final LeafSlice[] leafSlices;
|
||||
|
||||
// These are only used for multi-threaded search
|
||||
|
|
|
@ -33,8 +33,8 @@ import org.apache.lucene.util.PriorityQueue;
|
|||
*/
|
||||
public abstract class TopDocsCollector<T extends ScoreDoc> extends Collector {
|
||||
|
||||
// This is used in case topDocs() is called with illegal parameters, or there
|
||||
// simply aren't (enough) results.
|
||||
/** This is used in case topDocs() is called with illegal parameters, or there
|
||||
* simply aren't (enough) results. */
|
||||
protected static final TopDocs EMPTY_TOPDOCS = new TopDocs(0, new ScoreDoc[0], Float.NaN);
|
||||
|
||||
/**
|
||||
|
|
|
@ -117,10 +117,13 @@ public final class ByteBlockPool {
|
|||
public byte[][] buffers = new byte[10][];
|
||||
|
||||
int bufferUpto = -1; // Which buffer we are upto
|
||||
public int byteUpto = BYTE_BLOCK_SIZE; // Where we are in head buffer
|
||||
/** Where we are in head buffer */
|
||||
public int byteUpto = BYTE_BLOCK_SIZE;
|
||||
|
||||
public byte[] buffer; // Current head buffer
|
||||
public int byteOffset = -BYTE_BLOCK_SIZE; // Current head offset
|
||||
/** Current head buffer */
|
||||
public byte[] buffer;
|
||||
/** Current head offset */
|
||||
public int byteOffset = -BYTE_BLOCK_SIZE;
|
||||
|
||||
private final Allocator allocator;
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ public final class FST<T> {
|
|||
private final boolean packed;
|
||||
private PackedInts.Reader nodeRefToAddress;
|
||||
|
||||
// If arc has this label then that arc is final/accepted
|
||||
/** If arc has this label then that arc is final/accepted */
|
||||
public static final int END_LABEL = -1;
|
||||
|
||||
private boolean allowArrayArcs = true;
|
||||
|
@ -174,7 +174,7 @@ public final class FST<T> {
|
|||
// building an FST w/ willPackFST=true:
|
||||
int node;
|
||||
|
||||
// To node (ord or address):
|
||||
/** To node (ord or address) */
|
||||
public int target;
|
||||
|
||||
byte flags;
|
||||
|
@ -542,8 +542,8 @@ public final class FST<T> {
|
|||
return v;
|
||||
}
|
||||
|
||||
// returns true if the node at this address has any
|
||||
// outgoing arcs
|
||||
/** returns true if the node at this address has any
|
||||
* outgoing arcs */
|
||||
public static<T> boolean targetHasArcs(Arc<T> arc) {
|
||||
return arc.target > 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue