mirror of https://github.com/apache/lucene.git
javadocs
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1388436 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8cb409e692
commit
1079014287
|
@ -264,6 +264,7 @@
|
|||
<check-missing-javadocs dir="build/docs/core/org/apache/lucene/analysis" level="method"/>
|
||||
<check-missing-javadocs dir="build/docs/core/org/apache/lucene/document" level="method"/>
|
||||
<check-missing-javadocs dir="build/docs/core/org/apache/lucene/search/similarities" level="method"/>
|
||||
<check-missing-javadocs dir="build/docs/core/org/apache/lucene/index" level="method"/>
|
||||
</sequential>
|
||||
</target>
|
||||
|
||||
|
|
|
@ -48,6 +48,8 @@ public abstract class AtomicReader extends IndexReader {
|
|||
|
||||
private final AtomicReaderContext readerContext = new AtomicReaderContext(this);
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected AtomicReader() {
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.codecs.BlockTreeTermsReader;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.FieldType; // for javadocs
|
||||
import org.apache.lucene.index.DocValues.SortedSource;
|
||||
|
@ -70,6 +71,9 @@ public class CheckIndex {
|
|||
|
||||
public static class Status {
|
||||
|
||||
Status() {
|
||||
}
|
||||
|
||||
/** True if no problems were found with the index. */
|
||||
public boolean clean;
|
||||
|
||||
|
@ -134,6 +138,10 @@ public class CheckIndex {
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public static class SegmentInfoStatus {
|
||||
|
||||
SegmentInfoStatus() {
|
||||
}
|
||||
|
||||
/** Name of the segment. */
|
||||
public String name;
|
||||
|
||||
|
@ -207,6 +215,9 @@ public class CheckIndex {
|
|||
* Status from testing field norms.
|
||||
*/
|
||||
public static final class FieldNormStatus {
|
||||
private FieldNormStatus() {
|
||||
}
|
||||
|
||||
/** Number of fields successfully tested */
|
||||
public long totFields = 0L;
|
||||
|
||||
|
@ -218,6 +229,10 @@ public class CheckIndex {
|
|||
* Status from testing term index.
|
||||
*/
|
||||
public static final class TermIndexStatus {
|
||||
|
||||
TermIndexStatus() {
|
||||
}
|
||||
|
||||
/** Total term count */
|
||||
public long termCount = 0L;
|
||||
|
||||
|
@ -230,6 +245,10 @@ public class CheckIndex {
|
|||
/** Exception thrown during term index test (null on success) */
|
||||
public Throwable error = null;
|
||||
|
||||
/** Holds details of block allocations in the block
|
||||
* tree terms dictionary (this is only set if the
|
||||
* {@link PostingsFormat} for this segment uses block
|
||||
* tree. */
|
||||
public Map<String,BlockTreeTermsReader.Stats> blockTreeStats = null;
|
||||
}
|
||||
|
||||
|
@ -237,6 +256,9 @@ public class CheckIndex {
|
|||
* Status from testing stored fields.
|
||||
*/
|
||||
public static final class StoredFieldStatus {
|
||||
|
||||
StoredFieldStatus() {
|
||||
}
|
||||
|
||||
/** Number of documents tested. */
|
||||
public int docCount = 0;
|
||||
|
@ -253,6 +275,9 @@ public class CheckIndex {
|
|||
*/
|
||||
public static final class TermVectorStatus {
|
||||
|
||||
TermVectorStatus() {
|
||||
}
|
||||
|
||||
/** Number of documents tested. */
|
||||
public int docCount = 0;
|
||||
|
||||
|
@ -267,6 +292,10 @@ public class CheckIndex {
|
|||
* Status from testing DocValues
|
||||
*/
|
||||
public static final class DocValuesStatus {
|
||||
|
||||
DocValuesStatus() {
|
||||
}
|
||||
|
||||
/** Number of documents tested. */
|
||||
public int docCount;
|
||||
/** Total number of docValues tested. */
|
||||
|
|
|
@ -57,6 +57,8 @@ public abstract class CompositeReader extends IndexReader {
|
|||
|
||||
private volatile CompositeReaderContext readerContext = null; // lazy init
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected CompositeReader() {
|
||||
super();
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
|
||||
private int mergeThreadPriority = -1;
|
||||
|
||||
/** List of currently active {@link MergeThread}s. */
|
||||
protected List<MergeThread> mergeThreads = new ArrayList<MergeThread>();
|
||||
|
||||
// Max number of merge threads allowed to be running at
|
||||
|
@ -63,11 +64,21 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
// throttling the incoming threads
|
||||
private int maxMergeCount = maxThreadCount+2;
|
||||
|
||||
/** {@link Directory} that holds the index. */
|
||||
protected Directory dir;
|
||||
|
||||
/** {@link IndexWriter} that owns this instance. */
|
||||
protected IndexWriter writer;
|
||||
|
||||
/** How many {@link MergeThread}s have kicked off (this is use
|
||||
* to name them). */
|
||||
protected int mergeThreadCount;
|
||||
|
||||
/** Sole constructor, with all settings set to default
|
||||
* values. */
|
||||
public ConcurrentMergeScheduler() {
|
||||
}
|
||||
|
||||
/** Sets the max # simultaneous merge threads that should
|
||||
* be running at once. This must be <= {@link
|
||||
* #setMaxMergeCount}. */
|
||||
|
@ -81,7 +92,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
maxThreadCount = count;
|
||||
}
|
||||
|
||||
/** @see #setMaxThreadCount(int) */
|
||||
/** Returns {@code maxThreadCount}.
|
||||
*
|
||||
* @see #setMaxThreadCount(int) */
|
||||
public int getMaxThreadCount() {
|
||||
return maxThreadCount;
|
||||
}
|
||||
|
@ -129,7 +142,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
updateMergeThreads();
|
||||
}
|
||||
|
||||
// Larger merges come first
|
||||
/** Sorts {@link MergeThread}s; larger merges come first. */
|
||||
protected static final Comparator<MergeThread> compareByMergeDocCount = new Comparator<MergeThread>() {
|
||||
public int compare(MergeThread t1, MergeThread t2) {
|
||||
final MergePolicy.OneMerge m1 = t1.getCurrentMerge();
|
||||
|
@ -398,6 +411,8 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
return thread;
|
||||
}
|
||||
|
||||
/** Runs a merge thread, which may run one or more merges
|
||||
* in sequence. */
|
||||
protected class MergeThread extends Thread {
|
||||
|
||||
IndexWriter tWriter;
|
||||
|
@ -405,19 +420,24 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
MergePolicy.OneMerge runningMerge;
|
||||
private volatile boolean done;
|
||||
|
||||
/** Sole constructor. */
|
||||
public MergeThread(IndexWriter writer, MergePolicy.OneMerge startMerge) {
|
||||
this.tWriter = writer;
|
||||
this.startMerge = startMerge;
|
||||
}
|
||||
|
||||
/** Record the currently running merge. */
|
||||
public synchronized void setRunningMerge(MergePolicy.OneMerge merge) {
|
||||
runningMerge = merge;
|
||||
}
|
||||
|
||||
/** Return the currently running merge. */
|
||||
public synchronized MergePolicy.OneMerge getRunningMerge() {
|
||||
return runningMerge;
|
||||
}
|
||||
|
||||
/** Return the current merge, or null if this {@code
|
||||
* MergeThread} is done. */
|
||||
public synchronized MergePolicy.OneMerge getCurrentMerge() {
|
||||
if (done) {
|
||||
return null;
|
||||
|
@ -428,6 +448,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
}
|
||||
}
|
||||
|
||||
/** Set the priority of this thread. */
|
||||
public void setThreadPriority(int pri) {
|
||||
try {
|
||||
setPriority(pri);
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
* an inconsistency in the index.
|
||||
*/
|
||||
public class CorruptIndexException extends IOException {
|
||||
/** Sole constructor. */
|
||||
public CorruptIndexException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
|
|
@ -49,6 +49,8 @@ import org.apache.lucene.store.Directory;
|
|||
(non-Lucene) objects instead.
|
||||
*/
|
||||
public abstract class DirectoryReader extends BaseCompositeReader<AtomicReader> {
|
||||
|
||||
/** Default termInfosIndexDivisor. */
|
||||
public static final int DEFAULT_TERMS_INDEX_DIVISOR = 1;
|
||||
|
||||
/** The index directory. */
|
||||
|
|
|
@ -17,17 +17,18 @@
|
|||
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.PagedBytes;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.PagedBytes;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
* This class enables fast access to multiple term ords for
|
||||
|
@ -107,35 +108,57 @@ public class DocTermOrds {
|
|||
// values 0 (end term) and 1 (index is a pointer into byte array)
|
||||
private final static int TNUM_OFFSET = 2;
|
||||
|
||||
// Default: every 128th term is indexed
|
||||
/** Every 128th term is indexed, by default. */
|
||||
public final static int DEFAULT_INDEX_INTERVAL_BITS = 7; // decrease to a low number like 2 for testing
|
||||
|
||||
private int indexIntervalBits;
|
||||
private int indexIntervalMask;
|
||||
private int indexInterval;
|
||||
|
||||
/** Don't uninvert terms that exceed this count. */
|
||||
protected final int maxTermDocFreq;
|
||||
|
||||
/** Field we are uninverting. */
|
||||
protected final String field;
|
||||
|
||||
/** Number of terms in the field. */
|
||||
protected int numTermsInField;
|
||||
/** total number of references to term numbers */
|
||||
|
||||
/** Total number of references to term numbers. */
|
||||
protected long termInstances;
|
||||
private long memsz;
|
||||
/** total time to uninvert the field */
|
||||
|
||||
/** Total time to uninvert the field. */
|
||||
protected int total_time;
|
||||
/** time for phase1 of the uninvert process */
|
||||
|
||||
/** Time for phase1 of the uninvert process. */
|
||||
protected int phase1_time;
|
||||
|
||||
/** Holds the per-document ords or a pointer to the ords. */
|
||||
protected int[] index;
|
||||
|
||||
/** Holds term ords for documents. */
|
||||
protected byte[][] tnums = new byte[256][];
|
||||
|
||||
/** Total bytes (sum of term lengths) for all indexed terms.*/
|
||||
protected long sizeOfIndexedStrings;
|
||||
|
||||
/** Holds the indexed (by default every 128th) terms. */
|
||||
protected BytesRef[] indexedTermsArray;
|
||||
|
||||
/** If non-null, only terms matching this prefix were
|
||||
* indexed. */
|
||||
protected BytesRef prefix;
|
||||
|
||||
/** Ordinal of the first term in the field, or 0 if the
|
||||
* {@link PostingsFormat} does not implement {@link
|
||||
* TermsEnum#ord}. */
|
||||
protected int ordBase;
|
||||
|
||||
protected DocsEnum docsEnum; //used while uninverting
|
||||
/** Used while uninverting. */
|
||||
protected DocsEnum docsEnum;
|
||||
|
||||
/** Returns total bytes used. */
|
||||
public long ramUsedInBytes() {
|
||||
// can cache the mem size since it shouldn't change
|
||||
if (memsz!=0) return memsz;
|
||||
|
@ -217,14 +240,14 @@ public class DocTermOrds {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return The number of terms in this field
|
||||
* Returns the number of terms in this field
|
||||
*/
|
||||
public int numTerms() {
|
||||
return numTermsInField;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether this <code>DocTermOrds</code> instance is empty.
|
||||
* Returns {@code true} if no terms were indexed.
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return index == null;
|
||||
|
@ -234,6 +257,9 @@ public class DocTermOrds {
|
|||
protected void visitTerm(TermsEnum te, int termNum) throws IOException {
|
||||
}
|
||||
|
||||
/** Invoked during {@link #uninvert(AtomicReader,BytesRef)}
|
||||
* to record the document frequency for each uninverted
|
||||
* term. */
|
||||
protected void setActualDocFreq(int termNum, int df) throws IOException {
|
||||
}
|
||||
|
||||
|
@ -570,11 +596,15 @@ public class DocTermOrds {
|
|||
return pos;
|
||||
}
|
||||
|
||||
/** Iterates over the ords for a single document. */
|
||||
public class TermOrdsIterator {
|
||||
private int tnum;
|
||||
private int upto;
|
||||
private byte[] arr;
|
||||
|
||||
TermOrdsIterator() {
|
||||
}
|
||||
|
||||
/** Buffer must be at least 5 ints long. Returns number
|
||||
* of term ords placed into buffer; if this count is
|
||||
* less than buffer.length then that is the end. */
|
||||
|
@ -620,6 +650,7 @@ public class DocTermOrds {
|
|||
return bufferUpto;
|
||||
}
|
||||
|
||||
/** Reset the iterator on a new document. */
|
||||
public TermOrdsIterator reset(int docID) {
|
||||
//System.out.println(" reset docID=" + docID);
|
||||
tnum = 0;
|
||||
|
@ -810,6 +841,8 @@ public class DocTermOrds {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the term ({@link BytesRef}) corresponding to
|
||||
* the provided ordinal. */
|
||||
public BytesRef lookupTerm(TermsEnum termsEnum, int ord) throws IOException {
|
||||
termsEnum.seekExact(ord);
|
||||
return termsEnum.term();
|
||||
|
|
|
@ -78,11 +78,17 @@ import org.apache.lucene.util.packed.PackedInts;
|
|||
*/
|
||||
public abstract class DocValues implements Closeable {
|
||||
|
||||
/** Zero length DocValues array. */
|
||||
public static final DocValues[] EMPTY_ARRAY = new DocValues[0];
|
||||
|
||||
private volatile SourceCache cache = new SourceCache.DirectSourceCache();
|
||||
private final Object cacheLock = new Object();
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected DocValues() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads a new {@link Source} instance for this {@link DocValues} field
|
||||
* instance. Source instances returned from this method are not cached. It is
|
||||
|
@ -173,9 +179,12 @@ public abstract class DocValues implements Closeable {
|
|||
* @see DocValues#getDirectSource()
|
||||
*/
|
||||
public static abstract class Source {
|
||||
|
||||
|
||||
/** {@link Type} of this {@code Source}. */
|
||||
protected final Type type;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected Source(Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
@ -261,6 +270,8 @@ public abstract class DocValues implements Closeable {
|
|||
|
||||
private final Comparator<BytesRef> comparator;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected SortedSource(Type type, Comparator<BytesRef> comparator) {
|
||||
super(type);
|
||||
this.comparator = comparator;
|
||||
|
@ -685,6 +696,11 @@ public abstract class DocValues implements Closeable {
|
|||
*/
|
||||
public static abstract class SourceCache {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected SourceCache() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically loads a {@link Source} into the cache from the given
|
||||
* {@link DocValues} and returns it iff no other {@link Source} has already
|
||||
|
@ -717,6 +733,10 @@ public abstract class DocValues implements Closeable {
|
|||
public static final class DirectSourceCache extends SourceCache {
|
||||
private Source ref;
|
||||
|
||||
/** Sole constructor. */
|
||||
public DirectSourceCache() {
|
||||
}
|
||||
|
||||
public synchronized Source load(DocValues values) throws IOException {
|
||||
if (ref == null) {
|
||||
ref = values.load();
|
||||
|
|
|
@ -31,7 +31,12 @@ public abstract class DocsAndPositionsEnum extends DocsEnum {
|
|||
/** Flag to pass to {@link TermsEnum#docsAndPositions(Bits,DocsAndPositionsEnum,int)}
|
||||
* if you require payloads in the returned enum. */
|
||||
public static final int FLAG_PAYLOADS = 0x2;
|
||||
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected DocsAndPositionsEnum() {
|
||||
}
|
||||
|
||||
/** Returns the next position. You should only call this
|
||||
* up to {@link DocsEnum#freq()} times else
|
||||
* the behavior is not defined. If positions were not
|
||||
|
|
|
@ -34,6 +34,11 @@ public abstract class DocsEnum extends DocIdSetIterator {
|
|||
|
||||
private AttributeSource atts = null;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected DocsEnum() {
|
||||
}
|
||||
|
||||
/** Returns term frequency in the current document. Do
|
||||
* not call this before {@link #nextDoc} is first called,
|
||||
* nor after {@link #nextDoc} returns NO_MORE_DOCS.
|
||||
|
|
|
@ -84,6 +84,8 @@ public final class FieldInfo {
|
|||
};
|
||||
|
||||
/**
|
||||
* Sole Constructor.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public FieldInfo(String name, boolean indexed, int number, boolean storeTermVector,
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream; // javadocs
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
|
||||
/**
|
||||
|
@ -36,10 +37,14 @@ public final class FieldInvertState {
|
|||
float boost;
|
||||
AttributeSource attributeSource;
|
||||
|
||||
/** Creates {code FieldInvertState} for the specified
|
||||
* field name. */
|
||||
public FieldInvertState(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
/** Creates {code FieldInvertState} for the specified
|
||||
* field name and values for all fields. */
|
||||
public FieldInvertState(String name, int position, int length, int numOverlap, int offset, float boost) {
|
||||
this.name = name;
|
||||
this.position = position;
|
||||
|
@ -79,6 +84,7 @@ public final class FieldInvertState {
|
|||
return length;
|
||||
}
|
||||
|
||||
/** Set length value. */
|
||||
public void setLength(int length) {
|
||||
this.length = length;
|
||||
}
|
||||
|
@ -91,6 +97,8 @@ public final class FieldInvertState {
|
|||
return numOverlap;
|
||||
}
|
||||
|
||||
/** Set number of terms with {@code positionIncrement ==
|
||||
* 0}. */
|
||||
public void setNumOverlap(int numOverlap) {
|
||||
this.numOverlap = numOverlap;
|
||||
}
|
||||
|
@ -112,7 +120,8 @@ public final class FieldInvertState {
|
|||
public float getBoost() {
|
||||
return boost;
|
||||
}
|
||||
|
||||
|
||||
/** Set boost value. */
|
||||
public void setBoost(float boost) {
|
||||
this.boost = boost;
|
||||
}
|
||||
|
@ -132,7 +141,10 @@ public final class FieldInvertState {
|
|||
public int getUniqueTermCount() {
|
||||
return uniqueTermCount;
|
||||
}
|
||||
|
||||
|
||||
/** Returns the {@link AttributeSource} from the {@link
|
||||
* TokenStream} that provided the indexed tokens for this
|
||||
* field. */
|
||||
public AttributeSource getAttributeSource() {
|
||||
return attributeSource;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,11 @@ import java.util.Iterator;
|
|||
|
||||
public abstract class Fields implements Iterable<String> {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected Fields() {
|
||||
}
|
||||
|
||||
/** Returns an iterator that will step through all fields
|
||||
* names. This will not return null. */
|
||||
public abstract Iterator<String> iterator();
|
||||
|
@ -38,5 +43,6 @@ public abstract class Fields implements Iterable<String> {
|
|||
* {@link #iterator} will return as many field names. */
|
||||
public abstract int size();
|
||||
|
||||
/** Zero-length {@code Fields} array. */
|
||||
public final static Fields[] EMPTY_ARRAY = new Fields[0];
|
||||
}
|
||||
|
|
|
@ -73,11 +73,19 @@ public abstract class IndexCommit implements Comparable<IndexCommit> {
|
|||
*/
|
||||
public abstract void delete();
|
||||
|
||||
/** Returns true if this commit should be deleted; this is
|
||||
* only used by {@link IndexWriter} after invoking the
|
||||
* {@link IndexDeletionPolicy}. */
|
||||
public abstract boolean isDeleted();
|
||||
|
||||
/** Returns number of segments referenced by this commit. */
|
||||
public abstract int getSegmentCount();
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected IndexCommit() {
|
||||
}
|
||||
|
||||
/** Two IndexCommits are equal if both their Directory and versions are equal. */
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
|
|
|
@ -25,14 +25,28 @@ import org.apache.lucene.store.DataInput;
|
|||
*/
|
||||
public class IndexFormatTooNewException extends CorruptIndexException {
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooNewException}
|
||||
*
|
||||
* @param resourceDesc describes the file that was too old
|
||||
* @param version the version of the file that was too old
|
||||
* @param minVersion the minimum version accepted
|
||||
* @param maxVersion the maxium version accepted
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooNewException(String resourceDesc, int version, int minVersion, int maxVersion) {
|
||||
super("Format version is not supported (resource: " + resourceDesc + "): "
|
||||
+ version + " (needs to be between " + minVersion + " and " + maxVersion + ")");
|
||||
assert resourceDesc != null;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooNewException}
|
||||
*
|
||||
* @param in the open file that's too old
|
||||
* @param version the version of the file that was too old
|
||||
* @param minVersion the minimum version accepted
|
||||
* @param maxVersion the maxium version accepted
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooNewException(DataInput in, int version, int minVersion, int maxVersion) {
|
||||
this(in.toString(), version, minVersion, maxVersion);
|
||||
}
|
||||
|
|
|
@ -25,19 +25,36 @@ import org.apache.lucene.store.DataInput;
|
|||
*/
|
||||
public class IndexFormatTooOldException extends CorruptIndexException {
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooOldException}.
|
||||
*
|
||||
* @param resourceDesc describes the file that was too old
|
||||
* @param version the version of the file that was too old
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooOldException(String resourceDesc, String version) {
|
||||
super("Format version is not supported (resource: " + resourceDesc + "): " +
|
||||
version + ". This version of Lucene only supports indexes created with release 4.0 and later.");
|
||||
assert resourceDesc != null;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooOldException}.
|
||||
*
|
||||
* @param in the open file that's too old
|
||||
* @param version the version of the file that was too old
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooOldException(DataInput in, String version) {
|
||||
this(in.toString(), version);
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooOldException}.
|
||||
*
|
||||
* @param resourceDesc describes the file that was too old
|
||||
* @param version the version of the file that was too old
|
||||
* @param minVersion the minimum version accepted
|
||||
* @param maxVersion the maxium version accepted
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooOldException(String resourceDesc, int version, int minVersion, int maxVersion) {
|
||||
super("Format version is not supported (resource: " + resourceDesc + "): " +
|
||||
version + " (needs to be between " + minVersion + " and " + maxVersion +
|
||||
|
@ -45,7 +62,14 @@ public class IndexFormatTooOldException extends CorruptIndexException {
|
|||
assert resourceDesc != null;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Creates an {@code IndexFormatTooOldException}.
|
||||
*
|
||||
* @param in the open file that's too old
|
||||
* @param version the version of the file that was too old
|
||||
* @param minVersion the minimum version accepted
|
||||
* @param maxVersion the maxium version accepted
|
||||
*
|
||||
* @lucene.internal */
|
||||
public IndexFormatTooOldException(DataInput in, int version, int minVersion, int maxVersion) {
|
||||
this(in.toString(), version, minVersion, maxVersion);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,8 @@ import java.io.FileNotFoundException;
|
|||
*/
|
||||
public final class IndexNotFoundException extends FileNotFoundException {
|
||||
|
||||
/** Creates IndexFileNotFoundException with the
|
||||
* description message. */
|
||||
public IndexNotFoundException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
|
|
|
@ -88,6 +88,7 @@ public abstract class IndexReader implements Closeable {
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public static interface ReaderClosedListener {
|
||||
/** Invoked when the {@link IndexReader} is closed. */
|
||||
public void onClose(IndexReader reader);
|
||||
}
|
||||
|
||||
|
|
|
@ -67,6 +67,8 @@ public final class IndexUpgrader {
|
|||
System.exit(1);
|
||||
}
|
||||
|
||||
/** Main method to run {code IndexUpgrader} from the
|
||||
* command-line. */
|
||||
@SuppressWarnings("deprecation")
|
||||
public static void main(String[] args) throws IOException {
|
||||
String path = null;
|
||||
|
@ -132,7 +134,8 @@ public final class IndexUpgrader {
|
|||
this.iwc = iwc;
|
||||
this.deletePriorCommits = deletePriorCommits;
|
||||
}
|
||||
|
||||
|
||||
/** Perform the upgrade. */
|
||||
public void upgrade() throws IOException {
|
||||
if (!DirectoryReader.indexExists(dir)) {
|
||||
throw new IndexNotFoundException(dir.toString());
|
||||
|
|
|
@ -3765,12 +3765,18 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
return segmentInfos.size() > 0 ? segmentInfos.info(segmentInfos.size()-1) : null;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Returns a string description of all segments, for
|
||||
* debugging.
|
||||
*
|
||||
* @lucene.internal */
|
||||
public synchronized String segString() {
|
||||
return segString(segmentInfos);
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Returns a string description of the specified
|
||||
* segments, for debugging.
|
||||
*
|
||||
* @lucene.internal */
|
||||
public synchronized String segString(Iterable<SegmentInfoPerCommit> infos) {
|
||||
final StringBuilder buffer = new StringBuilder();
|
||||
for(final SegmentInfoPerCommit info : infos) {
|
||||
|
@ -3782,7 +3788,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
return buffer.toString();
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Returns a string description of the specified
|
||||
* segment, for debugging.
|
||||
*
|
||||
* @lucene.internal */
|
||||
public synchronized String segString(SegmentInfoPerCommit info) {
|
||||
return info.toString(info.info.dir, numDeletedDocs(info) - info.getDelCount());
|
||||
}
|
||||
|
@ -3997,6 +4006,15 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
* <p><b>NOTE</b>: warm is called before any deletes have
|
||||
* been carried over to the merged segment. */
|
||||
public static abstract class IndexReaderWarmer {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected IndexReaderWarmer() {
|
||||
}
|
||||
|
||||
/** Invoked on the {@link AtomicReader} for the newly
|
||||
* merged segment, before that segment is made visible
|
||||
* to near-real-time readers. */
|
||||
public abstract void warm(AtomicReader reader) throws IOException;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,10 @@ import java.util.List;
|
|||
|
||||
public final class KeepOnlyLastCommitDeletionPolicy implements IndexDeletionPolicy {
|
||||
|
||||
/** Sole constructor. */
|
||||
public KeepOnlyLastCommitDeletionPolicy() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all commits except the most recent one.
|
||||
*/
|
||||
|
|
|
@ -46,21 +46,56 @@ public class LiveIndexWriterConfig {
|
|||
private volatile int termIndexInterval; // TODO: this should be private to the codec, not settable here
|
||||
|
||||
// modified by IndexWriterConfig
|
||||
/** {@link IndexDeletionPolicy} controlling when commit
|
||||
* points are deleted. */
|
||||
protected volatile IndexDeletionPolicy delPolicy;
|
||||
|
||||
/** {@link IndexCommit} that {@link IndexWriter} is
|
||||
* opened on. */
|
||||
protected volatile IndexCommit commit;
|
||||
|
||||
/** {@link OpenMode} that {@link IndexWriter} is opened
|
||||
* with. */
|
||||
protected volatile OpenMode openMode;
|
||||
|
||||
/** {@link Similarity} to use when encoding norms. */
|
||||
protected volatile Similarity similarity;
|
||||
|
||||
/** {@link MergeScheduler} to use for running merges. */
|
||||
protected volatile MergeScheduler mergeScheduler;
|
||||
|
||||
/** Timeout when trying to obtain the write lock on init. */
|
||||
protected volatile long writeLockTimeout;
|
||||
|
||||
/** {@link IndexingChain} that determines how documents are
|
||||
* indexed. */
|
||||
protected volatile IndexingChain indexingChain;
|
||||
|
||||
/** {@link Codec} used to write new segments. */
|
||||
protected volatile Codec codec;
|
||||
|
||||
/** {@link InfoStream} for debugging messages. */
|
||||
protected volatile InfoStream infoStream;
|
||||
|
||||
/** {@link MergePolicy} for selecting merges. */
|
||||
protected volatile MergePolicy mergePolicy;
|
||||
|
||||
/** {@code DocumentsWriterPerThreadPool} to control how
|
||||
* threads are allocated to {@code DocumentsWriterPerThread}. */
|
||||
protected volatile DocumentsWriterPerThreadPool indexerThreadPool;
|
||||
|
||||
/** True if readers should be pooled. */
|
||||
protected volatile boolean readerPooling;
|
||||
|
||||
/** {@link FlushPolicy} to control when segments are
|
||||
* flushed. */
|
||||
protected volatile FlushPolicy flushPolicy;
|
||||
|
||||
/** Sets the hard upper bound on RAM usage for a single
|
||||
* segment, after which the segment is forced to flush. */
|
||||
protected volatile int perThreadHardLimitMB;
|
||||
|
||||
/** {@link Version} that {@link IndexWriter} should emulate. */
|
||||
protected final Version matchVersion;
|
||||
|
||||
// used by IndexWriterConfig
|
||||
|
@ -372,7 +407,9 @@ public class LiveIndexWriterConfig {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setReaderTermsIndexDivisor(int) */
|
||||
/** Returns the {@code termInfosIndexDivisor}.
|
||||
*
|
||||
* @see #setReaderTermsIndexDivisor(int) */
|
||||
public int getReaderTermsIndexDivisor() {
|
||||
return readerTermsIndexDivisor;
|
||||
}
|
||||
|
@ -494,7 +531,8 @@ public class LiveIndexWriterConfig {
|
|||
return flushPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
/** Returns {@link InfoStream} used for debugging.
|
||||
*
|
||||
* @see IndexWriterConfig#setInfoStream(InfoStream)
|
||||
*/
|
||||
public InfoStream getInfoStream() {
|
||||
|
|
|
@ -34,6 +34,8 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
|
|||
* or larger will never be merged during forceMerge. @see setMaxMergeMBForForceMerge */
|
||||
public static final double DEFAULT_MAX_MERGE_MB_FOR_FORCED_MERGE = Long.MAX_VALUE;
|
||||
|
||||
/** Sole constructor, setting all settings to their
|
||||
* defaults. */
|
||||
public LogByteSizeMergePolicy() {
|
||||
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
|
||||
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
|
||||
|
|
|
@ -28,6 +28,8 @@ public class LogDocMergePolicy extends LogMergePolicy {
|
|||
/** Default minimum segment size. @see setMinMergeDocs */
|
||||
public static final int DEFAULT_MIN_MERGE_DOCS = 1000;
|
||||
|
||||
/** Sole constructor, setting all settings to their
|
||||
* defaults. */
|
||||
public LogDocMergePolicy() {
|
||||
minMergeSize = DEFAULT_MIN_MERGE_DOCS;
|
||||
|
||||
|
|
|
@ -25,14 +25,15 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
|
||||
|
||||
/** <p>This class implements a {@link MergePolicy} that tries
|
||||
* to merge segments into levels of exponentially
|
||||
* increasing size, where each level has fewer segments than
|
||||
* the value of the merge factor. Whenever extra segments
|
||||
* (beyond the merge factor upper bound) are encountered,
|
||||
* all segments within the level are merged. You can get or
|
||||
* set the merge factor using {@link #getMergeFactor()} and
|
||||
* {@link #setMergeFactor(int)} respectively.</p>
|
||||
/**
|
||||
* <p>This class implements a {@link MergePolicy} that tries
|
||||
* to merge segments into levels of exponentially
|
||||
* increasing size, where each level has fewer segments than
|
||||
* the value of the merge factor. Whenever extra segments
|
||||
* (beyond the merge factor upper bound) are encountered,
|
||||
* all segments within the level are merged. You can get or
|
||||
* set the merge factor using {@link #getMergeFactor()} and
|
||||
* {@link #setMergeFactor(int)} respectively.</p>
|
||||
*
|
||||
* <p>This class is abstract and requires a subclass to
|
||||
* define the {@link #size} method which specifies how a
|
||||
|
@ -71,32 +72,64 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
*/
|
||||
public static final long DEFAULT_MAX_CFS_SEGMENT_SIZE = Long.MAX_VALUE;
|
||||
|
||||
/** How many segments to merge at a time. */
|
||||
protected int mergeFactor = DEFAULT_MERGE_FACTOR;
|
||||
|
||||
/** Any segments whose size is smaller than this value
|
||||
* will be rounded up to this value. This ensures that
|
||||
* tiny segments are aggressively merged. */
|
||||
protected long minMergeSize;
|
||||
|
||||
/** If the size of a segment exceeds this value then it
|
||||
* will never be merged. */
|
||||
protected long maxMergeSize;
|
||||
|
||||
// Although the core MPs set it explicitly, we must default in case someone
|
||||
// out there wrote his own LMP ...
|
||||
/** If the size of a segment exceeds this value then it
|
||||
* will never be merged during {@link IndexWriter#forceMerge}. */
|
||||
protected long maxMergeSizeForForcedMerge = Long.MAX_VALUE;
|
||||
|
||||
/** If a segment has more than this many documents then it
|
||||
* will never be merged. */
|
||||
protected int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
|
||||
|
||||
/** If the size of the merge segment exceesd this ratio of
|
||||
* the total index size then it will remain in
|
||||
* non-compound format even if {@link
|
||||
* #setUseCompoundFile} is {@code true}. */
|
||||
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
|
||||
|
||||
/** If the size of the merged segment exceeds
|
||||
* this value then it will not use compound file format. */
|
||||
protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
|
||||
|
||||
/** If true, we pro-rate a segment's size by the
|
||||
* percentage of non-deleted documents. */
|
||||
protected boolean calibrateSizeByDeletes = true;
|
||||
|
||||
|
||||
/** True if new segments (flushed or merged) should use
|
||||
* the compound file format. Note that large segments
|
||||
* may sometimes still use non-compound format (see
|
||||
* {@link #setNoCFSRatio}. */
|
||||
protected boolean useCompoundFile = true;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
public LogMergePolicy() {
|
||||
super();
|
||||
}
|
||||
|
||||
/** Returns true if {@code LMP} is enabled in {@link
|
||||
* IndexWriter}'s {@code infoStream}. */
|
||||
protected boolean verbose() {
|
||||
final IndexWriter w = writer.get();
|
||||
return w != null && w.infoStream.isEnabled("LMP");
|
||||
}
|
||||
|
||||
/** @see #setNoCFSRatio */
|
||||
/** Returns current {@code noCFSRatio}.
|
||||
*
|
||||
* @see #setNoCFSRatio */
|
||||
public double getNoCFSRatio() {
|
||||
return noCFSRatio;
|
||||
}
|
||||
|
@ -112,7 +145,9 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
}
|
||||
this.noCFSRatio = noCFSRatio;
|
||||
}
|
||||
|
||||
|
||||
/** Print a debug message to {@link IndexWriter}'s {@code
|
||||
* infoStream}. */
|
||||
protected void message(String message) {
|
||||
if (verbose()) {
|
||||
writer.get().infoStream.message("LMP", message);
|
||||
|
@ -189,8 +224,14 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
@Override
|
||||
public void close() {}
|
||||
|
||||
/** Return the size of the provided {@link
|
||||
* SegmentInfoPerCommit}. */
|
||||
abstract protected long size(SegmentInfoPerCommit info) throws IOException;
|
||||
|
||||
/** Return the number of documents in the provided {@link
|
||||
* SegmentInfoPerCommit}, pro-rated by percentage of
|
||||
* non-deleted documents if {@link
|
||||
* #setCalibrateSizeByDeletes} is set. */
|
||||
protected long sizeDocs(SegmentInfoPerCommit info) throws IOException {
|
||||
if (calibrateSizeByDeletes) {
|
||||
int delCount = writer.get().numDeletedDocs(info);
|
||||
|
@ -200,7 +241,11 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
return info.info.getDocCount();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Return the byte size of the provided {@link
|
||||
* SegmentInfoPerCommit}, pro-rated by percentage of
|
||||
* non-deleted documents if {@link
|
||||
* #setCalibrateSizeByDeletes} is set. */
|
||||
protected long sizeBytes(SegmentInfoPerCommit info) throws IOException {
|
||||
long byteSize = info.sizeInBytes();
|
||||
if (calibrateSizeByDeletes) {
|
||||
|
@ -213,6 +258,9 @@ public abstract class LogMergePolicy extends MergePolicy {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns true if the number of segments eligible for
|
||||
* merging is less than or equal to the specified {@code
|
||||
* maxNumSegments}. */
|
||||
protected boolean isMerged(SegmentInfos infos, int maxNumSegments, Map<SegmentInfoPerCommit,Boolean> segmentsToMerge) throws IOException {
|
||||
final int numSegments = infos.size();
|
||||
int numToMerge = 0;
|
||||
|
|
|
@ -71,14 +71,23 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
long mergeGen; // used by IndexWriter
|
||||
boolean isExternal; // used by IndexWriter
|
||||
int maxNumSegments = -1; // used by IndexWriter
|
||||
|
||||
/** Estimated size in bytes of the merged segment. */
|
||||
public long estimatedMergeBytes; // used by IndexWriter
|
||||
List<SegmentReader> readers; // used by IndexWriter
|
||||
|
||||
/** Segments to be merged. */
|
||||
public final List<SegmentInfoPerCommit> segments;
|
||||
|
||||
/** Number of documents in the merged segment. */
|
||||
public final int totalDocCount;
|
||||
boolean aborted;
|
||||
Throwable error;
|
||||
boolean paused;
|
||||
|
||||
/** Sole constructor.
|
||||
* @param segments List of {@link SegmentInfoPerCommit}s
|
||||
* to be merged. */
|
||||
public OneMerge(List<SegmentInfoPerCommit> segments) {
|
||||
if (0 == segments.size())
|
||||
throw new RuntimeException("segments must include at least one segment");
|
||||
|
@ -116,6 +125,8 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
return aborted;
|
||||
}
|
||||
|
||||
/** Called periodically by {@link IndexWriter} while
|
||||
* merging to see if the merge is aborted. */
|
||||
public synchronized void checkAborted(Directory dir) throws MergeAbortedException {
|
||||
if (aborted) {
|
||||
throw new MergeAbortedException("merge is aborted: " + segString(dir));
|
||||
|
@ -135,6 +146,9 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Set or clear whether this merge is paused paused (for example
|
||||
* {@link ConcurrentMergeScheduler} will pause merges
|
||||
* if too many are running). */
|
||||
synchronized public void setPause(boolean paused) {
|
||||
this.paused = paused;
|
||||
if (!paused) {
|
||||
|
@ -143,10 +157,15 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns true if this merge is paused.
|
||||
*
|
||||
* @see #setPause(boolean) */
|
||||
synchronized public boolean getPause() {
|
||||
return paused;
|
||||
}
|
||||
|
||||
/** Returns a readable description of the current merge
|
||||
* state. */
|
||||
public String segString(Directory dir) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
final int numSegments = segments.size();
|
||||
|
@ -188,7 +207,8 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
|
||||
/** Return {@link MergeInfo} describing this merge. */
|
||||
public MergeInfo getMergeInfo() {
|
||||
return new MergeInfo(totalDocCount, estimatedMergeBytes, isExternal, maxNumSegments);
|
||||
}
|
||||
|
@ -208,10 +228,19 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
|
||||
public final List<OneMerge> merges = new ArrayList<OneMerge>();
|
||||
|
||||
/** Sole constructor. Use {@link
|
||||
* #add(MergePolicy.OneMerge)} to add merges. */
|
||||
public MergeSpecification() {
|
||||
}
|
||||
|
||||
/** Adds the provided {@link OneMerge} to this
|
||||
* specification. */
|
||||
public void add(OneMerge merge) {
|
||||
merges.add(merge);
|
||||
}
|
||||
|
||||
/** Returns a description of the merges in this
|
||||
* specification. */
|
||||
public String segString(Directory dir) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("MergeSpec:\n");
|
||||
|
@ -227,15 +256,18 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
public static class MergeException extends RuntimeException {
|
||||
private Directory dir;
|
||||
|
||||
/** Create a {@code MergeException}. */
|
||||
public MergeException(String message, Directory dir) {
|
||||
super(message);
|
||||
this.dir = dir;
|
||||
}
|
||||
|
||||
/** Create a {@code MergeException}. */
|
||||
public MergeException(Throwable exc, Directory dir) {
|
||||
super(exc);
|
||||
this.dir = dir;
|
||||
}
|
||||
|
||||
/** Returns the {@link Directory} of the index that hit
|
||||
* the exception. */
|
||||
public Directory getDirectory() {
|
||||
|
@ -248,14 +280,19 @@ public abstract class MergePolicy implements java.io.Closeable, Cloneable {
|
|||
* <code>false</code>. Normally this exception is
|
||||
* privately caught and suppresed by {@link IndexWriter}. */
|
||||
public static class MergeAbortedException extends IOException {
|
||||
/** Create a {@link MergeAbortedException}. */
|
||||
public MergeAbortedException() {
|
||||
super("merge is aborted");
|
||||
}
|
||||
|
||||
/** Create a {@link MergeAbortedException} with a
|
||||
* specified message. */
|
||||
public MergeAbortedException(String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
||||
|
||||
/** {@link IndexWriter} that contains this instance. */
|
||||
protected SetOnce<IndexWriter> writer;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,6 +29,11 @@ import java.io.IOException;
|
|||
*/
|
||||
public abstract class MergeScheduler implements Closeable {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected MergeScheduler() {
|
||||
}
|
||||
|
||||
/** Run the merges provided by {@link IndexWriter#getNextMerge()}. */
|
||||
public abstract void merge(IndexWriter writer) throws IOException;
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.lucene.util.Bits;
|
|||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
|
||||
/** Holds common state used during segment merging
|
||||
/** Holds common state used during segment merging.
|
||||
*
|
||||
* @lucene.experimental */
|
||||
public class MergeState {
|
||||
|
@ -35,10 +35,14 @@ public class MergeState {
|
|||
public static abstract class DocMap {
|
||||
private final Bits liveDocs;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected DocMap(Bits liveDocs) {
|
||||
this.liveDocs = liveDocs;
|
||||
}
|
||||
|
||||
/** Creates a {@link DocMap} instance appropriate for
|
||||
* this reader. */
|
||||
public static DocMap build(AtomicReader reader) {
|
||||
final int maxDoc = reader.maxDoc();
|
||||
final int numDeletes = reader.numDeletedDocs();
|
||||
|
@ -82,6 +86,7 @@ public class MergeState {
|
|||
return new DirectDocMap(liveDocs, docIds, del);
|
||||
}
|
||||
|
||||
/** Returns the mapped docID corresponding to the provided one. */
|
||||
public int get(int docId) {
|
||||
if (liveDocs == null || liveDocs.get(docId)) {
|
||||
return remap(docId);
|
||||
|
@ -90,16 +95,22 @@ public class MergeState {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the mapped docID corresponding to the provided one. */
|
||||
public abstract int remap(int docId);
|
||||
|
||||
/** Returns the total number of documents, ignoring
|
||||
* deletions. */
|
||||
public abstract int maxDoc();
|
||||
|
||||
/** Returns the number of not-deleted documents. */
|
||||
public final int numDocs() {
|
||||
return maxDoc() - numDeletedDocs();
|
||||
}
|
||||
|
||||
/** Returns the number of deleted documents. */
|
||||
public abstract int numDeletedDocs();
|
||||
|
||||
/** Returns true if there are any deletions. */
|
||||
public boolean hasDeletions() {
|
||||
return numDeletedDocs() > 0;
|
||||
}
|
||||
|
@ -184,21 +195,45 @@ public class MergeState {
|
|||
}
|
||||
}
|
||||
|
||||
/** {@link SegmentInfo} of the newly merged segment. */
|
||||
public SegmentInfo segmentInfo;
|
||||
|
||||
/** {@link FieldInfos} of the newly merged segment. */
|
||||
public FieldInfos fieldInfos;
|
||||
public List<AtomicReader> readers; // Readers being merged
|
||||
public DocMap[] docMaps; // Maps docIDs around deletions
|
||||
public int[] docBase; // New docID base per reader
|
||||
|
||||
/** Readers being merged. */
|
||||
public List<AtomicReader> readers;
|
||||
|
||||
/** Maps docIDs around deletions. */
|
||||
public DocMap[] docMaps;
|
||||
|
||||
/** New docID base per reader. */
|
||||
public int[] docBase;
|
||||
|
||||
/** Holds the CheckAbort instance, which is invoked
|
||||
* periodically to see if the merge has been aborted. */
|
||||
public CheckAbort checkAbort;
|
||||
|
||||
/** InfoStream for debugging messages. */
|
||||
public InfoStream infoStream;
|
||||
|
||||
// Updated per field;
|
||||
/** Current field being merged. */
|
||||
public FieldInfo fieldInfo;
|
||||
|
||||
// TODO: get rid of this? it tells you which segments are 'aligned' (e.g. for bulk merging)
|
||||
// but is this really so expensive to compute again in different components, versus once in SM?
|
||||
|
||||
/** {@link SegmentReader}s that have identical field
|
||||
* name/number mapping, so their stored fields and term
|
||||
* vectors may be bulk merged. */
|
||||
public SegmentReader[] matchingSegmentReaders;
|
||||
|
||||
/** How many {@link #matchingSegmentReaders} are set. */
|
||||
public int matchedCount;
|
||||
|
||||
/** Sole constructor. */
|
||||
MergeState() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for recording units of work when merging segments.
|
||||
|
@ -207,6 +242,8 @@ public class MergeState {
|
|||
private double workCount;
|
||||
private final MergePolicy.OneMerge merge;
|
||||
private final Directory dir;
|
||||
|
||||
/** Creates a #CheckAbort instance. */
|
||||
public CheckAbort(MergePolicy.OneMerge merge, Directory dir) {
|
||||
this.merge = merge;
|
||||
this.dir = dir;
|
||||
|
|
|
@ -38,15 +38,19 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
int currentBase;
|
||||
int doc = -1;
|
||||
|
||||
/** Sole constructor. */
|
||||
public MultiDocsAndPositionsEnum(MultiTermsEnum parent, int subReaderCount) {
|
||||
this.parent = parent;
|
||||
subDocsAndPositionsEnum = new DocsAndPositionsEnum[subReaderCount];
|
||||
}
|
||||
|
||||
/** Returns {@code true} if this instance can be reused by
|
||||
* the provided {@link MultiTermsEnum}. */
|
||||
public boolean canReuse(MultiTermsEnum parent) {
|
||||
return this.parent == parent;
|
||||
}
|
||||
|
||||
/** Rre-use and reset this instance on the provided slices. */
|
||||
public MultiDocsAndPositionsEnum reset(final EnumWithSlice[] subs, final int numSubs) {
|
||||
this.numSubs = numSubs;
|
||||
this.subs = new EnumWithSlice[subs.length];
|
||||
|
@ -61,10 +65,13 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** How many sub-readers we are merging.
|
||||
* @see #getSubs */
|
||||
public int getNumSubs() {
|
||||
return numSubs;
|
||||
}
|
||||
|
||||
/** Returns sub-readers we are merging. */
|
||||
public EnumWithSlice[] getSubs() {
|
||||
return subs;
|
||||
}
|
||||
|
@ -146,7 +153,14 @@ public final class MultiDocsAndPositionsEnum extends DocsAndPositionsEnum {
|
|||
/** Holds a {@link DocsAndPositionsEnum} along with the
|
||||
* corresponding {@link ReaderSlice}. */
|
||||
public final static class EnumWithSlice {
|
||||
EnumWithSlice() {
|
||||
}
|
||||
|
||||
/** {@link DocsAndPositionsEnum} for this sub-reader. */
|
||||
public DocsAndPositionsEnum docsAndPositionsEnum;
|
||||
|
||||
/** {@link ReaderSlice} describing how this sub-reader
|
||||
* fits into the composite reader. */
|
||||
public ReaderSlice slice;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,8 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* Exposes flex API, merged from flex API of sub-segments.
|
||||
* Exposes {@link DocsEnum}, merged from {@link DocsEnum}
|
||||
* API of sub-segments.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
@ -37,6 +38,9 @@ public final class MultiDocsEnum extends DocsEnum {
|
|||
int currentBase;
|
||||
int doc = -1;
|
||||
|
||||
/** Sole constructor
|
||||
* @param parent The {@link MultiTermsEnum} that created us.
|
||||
* @param subReaderCount How many sub-readers are being merged. */
|
||||
public MultiDocsEnum(MultiTermsEnum parent, int subReaderCount) {
|
||||
this.parent = parent;
|
||||
subDocsEnum = new DocsEnum[subReaderCount];
|
||||
|
@ -57,14 +61,19 @@ public final class MultiDocsEnum extends DocsEnum {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Returns {@code true} if this instance can be reused by
|
||||
* the provided {@link MultiTermsEnum}. */
|
||||
public boolean canReuse(MultiTermsEnum parent) {
|
||||
return this.parent == parent;
|
||||
}
|
||||
|
||||
/** How many sub-readers we are merging.
|
||||
* @see #getSubs */
|
||||
public int getNumSubs() {
|
||||
return numSubs;
|
||||
}
|
||||
|
||||
/** Returns sub-readers we are merging. */
|
||||
public EnumWithSlice[] getSubs() {
|
||||
return subs;
|
||||
}
|
||||
|
@ -125,7 +134,14 @@ public final class MultiDocsEnum extends DocsEnum {
|
|||
/** Holds a {@link DocsEnum} along with the
|
||||
* corresponding {@link ReaderSlice}. */
|
||||
public final static class EnumWithSlice {
|
||||
EnumWithSlice() {
|
||||
}
|
||||
|
||||
/** {@link DocsEnum} of this sub-reader. */
|
||||
public DocsEnum docsEnum;
|
||||
|
||||
/** {@link ReaderSlice} describing how this sub-reader
|
||||
* fits into the composite reader. */
|
||||
public ReaderSlice slice;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,6 +41,12 @@ public final class MultiTerms extends Terms {
|
|||
private final boolean hasPositions;
|
||||
private final boolean hasPayloads;
|
||||
|
||||
/** Sole constructor.
|
||||
*
|
||||
* @param subs The {@link Terms} instances of all sub-readers.
|
||||
* @param subSlices A parallel array (matching {@code
|
||||
* subs}) describing the sub-reader slices.
|
||||
*/
|
||||
public MultiTerms(Terms[] subs, ReaderSlice[] subSlices) throws IOException {
|
||||
this.subs = subs;
|
||||
this.subSlices = subSlices;
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Arrays;
|
|||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* Exposes flex API, merged from flex API of sub-segments.
|
||||
* Exposes {@link TermsEnum} API, merged from {@link TermsEnum} API of sub-segments.
|
||||
* This does a merge sort, by term text, of the sub-readers.
|
||||
*
|
||||
* @lucene.experimental
|
||||
|
@ -60,14 +60,20 @@ public final class MultiTermsEnum extends TermsEnum {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns how many sub-reader slices contain the current
|
||||
* term. @see #getMatchArray */
|
||||
public int getMatchCount() {
|
||||
return numTop;
|
||||
}
|
||||
|
||||
/** Returns sub-reader slices positioned to the current term. */
|
||||
public TermsEnumWithSlice[] getMatchArray() {
|
||||
return top;
|
||||
}
|
||||
|
||||
/** Sole constructor.
|
||||
* @param slices Which sub-reader slices we should
|
||||
* merge. */
|
||||
public MultiTermsEnum(ReaderSlice[] slices) {
|
||||
queue = new TermMergeQueue(slices.length);
|
||||
top = new TermsEnumWithSlice[slices.length];
|
||||
|
|
|
@ -47,6 +47,10 @@ public final class Norm {
|
|||
private StoredField field;
|
||||
private BytesRef spare;
|
||||
|
||||
/** Sole constructor. */
|
||||
public Norm() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link StorableField} representation for this norm
|
||||
*/
|
||||
|
|
|
@ -23,7 +23,13 @@ package org.apache.lucene.index;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public class OrdTermState extends TermState {
|
||||
/** Term ordinal, i.e. it's position in the full list of
|
||||
* sorted terms. */
|
||||
public long ord;
|
||||
|
||||
/** Sole constructor. */
|
||||
public OrdTermState() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copyFrom(TermState other) {
|
||||
|
|
|
@ -30,13 +30,26 @@ import org.apache.lucene.util.InfoStream;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public class PerDocWriteState {
|
||||
/** InfoStream used for debugging. */
|
||||
public final InfoStream infoStream;
|
||||
|
||||
/** {@link Directory} to write all files to. */
|
||||
public final Directory directory;
|
||||
|
||||
/** {@link SegmentInfo} describing this segment. */
|
||||
public final SegmentInfo segmentInfo;
|
||||
|
||||
/** Number of bytes allocated in RAM to hold this state. */
|
||||
public final Counter bytesUsed;
|
||||
|
||||
/** Segment suffix to pass to {@link
|
||||
* IndexFileNames#segmentFileName(String,String,String)}. */
|
||||
public final String segmentSuffix;
|
||||
|
||||
/** {@link IOContext} to use for all file writing. */
|
||||
public final IOContext context;
|
||||
|
||||
/** Creates a {@code PerDocWriteState}. */
|
||||
public PerDocWriteState(InfoStream infoStream, Directory directory,
|
||||
SegmentInfo segmentInfo, Counter bytesUsed,
|
||||
String segmentSuffix, IOContext context) {
|
||||
|
@ -48,6 +61,8 @@ public class PerDocWriteState {
|
|||
this.context = context;
|
||||
}
|
||||
|
||||
/** Creates a {@code PerDocWriteState}, copying fields
|
||||
* from another and allocating a new {@link #bytesUsed}. */
|
||||
public PerDocWriteState(SegmentWriteState state) {
|
||||
infoStream = state.infoStream;
|
||||
directory = state.directory;
|
||||
|
@ -57,6 +72,9 @@ public class PerDocWriteState {
|
|||
context = state.context;
|
||||
}
|
||||
|
||||
/** Creates a {@code PerDocWriteState}, copying fields
|
||||
* from another (copy constructor) but setting a new
|
||||
* {@link #segmentSuffix}. */
|
||||
public PerDocWriteState(PerDocWriteState state, String segmentSuffix) {
|
||||
this.infoStream = state.infoStream;
|
||||
this.directory = state.directory;
|
||||
|
|
|
@ -23,11 +23,20 @@ package org.apache.lucene.index;
|
|||
* @lucene.internal
|
||||
*/
|
||||
public final class ReaderSlice {
|
||||
|
||||
/** Zero-length {@code ReaderSlice} array. */
|
||||
public static final ReaderSlice[] EMPTY_ARRAY = new ReaderSlice[0];
|
||||
|
||||
/** Document ID this slice starts from. */
|
||||
public final int start;
|
||||
|
||||
/** Number of documents in this slice. */
|
||||
public final int length;
|
||||
|
||||
/** Sub-reader index for this slice. */
|
||||
public final int readerIndex;
|
||||
|
||||
/** Sole constructor. */
|
||||
public ReaderSlice(int start, int length, int readerIndex) {
|
||||
this.start = start;
|
||||
this.length = length;
|
||||
|
|
|
@ -39,12 +39,21 @@ import org.apache.lucene.store.TrackingDirectoryWrapper;
|
|||
public final class SegmentInfo {
|
||||
|
||||
// TODO: remove these from this class, for now this is the representation
|
||||
/** Used by some member fields to mean not present (e.g.,
|
||||
* norms, deletions). */
|
||||
public static final int NO = -1; // e.g. no norms; no deletes;
|
||||
|
||||
/** Used by some member fields to mean present (e.g.,
|
||||
* norms, deletions). */
|
||||
public static final int YES = 1; // e.g. have norms; have deletes;
|
||||
|
||||
public final String name; // unique name in dir
|
||||
/** Unique segment name in the directory. */
|
||||
public final String name;
|
||||
|
||||
private int docCount; // number of docs in seg
|
||||
public final Directory dir; // where segment resides
|
||||
|
||||
/** Where this segment resides. */
|
||||
public final Directory dir;
|
||||
|
||||
private boolean isCompoundFile;
|
||||
|
||||
|
@ -67,6 +76,8 @@ public final class SegmentInfo {
|
|||
this.diagnostics = diagnostics;
|
||||
}
|
||||
|
||||
/** Returns diagnostics saved into the segment when it was
|
||||
* written. */
|
||||
public Map<String, String> getDiagnostics() {
|
||||
return diagnostics;
|
||||
}
|
||||
|
@ -133,10 +144,13 @@ public final class SegmentInfo {
|
|||
this.codec = codec;
|
||||
}
|
||||
|
||||
/** Return {@link Codec} that wrote this segment. */
|
||||
public Codec getCodec() {
|
||||
return codec;
|
||||
}
|
||||
|
||||
/** Returns number of documents in this segment (deletions
|
||||
* are not taken into account). */
|
||||
public int getDocCount() {
|
||||
if (this.docCount == -1) {
|
||||
throw new IllegalStateException("docCount isn't set yet");
|
||||
|
@ -152,12 +166,7 @@ public final class SegmentInfo {
|
|||
this.docCount = docCount;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return all files referenced by this SegmentInfo. The
|
||||
* returns List is a locally cached List so you should not
|
||||
* modify it.
|
||||
*/
|
||||
|
||||
/** Return all files referenced by this SegmentInfo. */
|
||||
public Set<String> files() {
|
||||
if (setFiles == null) {
|
||||
throw new IllegalStateException("files were not computed yet");
|
||||
|
@ -241,18 +250,23 @@ public final class SegmentInfo {
|
|||
|
||||
private Set<String> setFiles;
|
||||
|
||||
/** Sets the files written for this segment. */
|
||||
public void setFiles(Set<String> files) {
|
||||
checkFileNames(files);
|
||||
setFiles = files;
|
||||
sizeInBytes = -1;
|
||||
}
|
||||
|
||||
/** Add these files to the set of files written for this
|
||||
* segment. */
|
||||
public void addFiles(Collection<String> files) {
|
||||
checkFileNames(files);
|
||||
setFiles.addAll(files);
|
||||
sizeInBytes = -1;
|
||||
}
|
||||
|
||||
/** Add this file to the set of files written for this
|
||||
* segment. */
|
||||
public void addFile(String file) {
|
||||
checkFileNames(Collections.singleton(file));
|
||||
setFiles.add(file);
|
||||
|
@ -298,6 +312,8 @@ public final class SegmentInfo {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the internal codec attributes map.
|
||||
*
|
||||
* @return internal codec attributes map. May be null if no mappings exist.
|
||||
*/
|
||||
public Map<String,String> attributes() {
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.store.Directory;
|
|||
|
||||
public class SegmentInfoPerCommit {
|
||||
|
||||
/** The {@link SegmentInfo} that we wrap. */
|
||||
public final SegmentInfo info;
|
||||
|
||||
// How many deleted docs in the segment:
|
||||
|
@ -41,6 +42,12 @@ public class SegmentInfoPerCommit {
|
|||
|
||||
private volatile long sizeInBytes = -1;
|
||||
|
||||
/** Sole constructor.
|
||||
* @param info {@link SegmentInfo} that we wrap
|
||||
* @param delCount number of deleted documents in this segment
|
||||
* @param delGen deletion generation number (used to name
|
||||
deletion files)
|
||||
**/
|
||||
public SegmentInfoPerCommit(SegmentInfo info, int delCount, long delGen) {
|
||||
this.info = info;
|
||||
this.delCount = delCount;
|
||||
|
@ -56,6 +63,8 @@ public class SegmentInfoPerCommit {
|
|||
sizeInBytes = -1;
|
||||
}
|
||||
|
||||
/** Returns total size in bytes of all files for this
|
||||
* segment. */
|
||||
public long sizeInBytes() throws IOException {
|
||||
if (sizeInBytes == -1) {
|
||||
final Collection<String> files = new HashSet<String>();
|
||||
|
@ -70,6 +79,7 @@ public class SegmentInfoPerCommit {
|
|||
return sizeInBytes;
|
||||
}
|
||||
|
||||
/** Returns all files in use by this segment. */
|
||||
public Collection<String> files() throws IOException {
|
||||
Collection<String> files = new HashSet<String>(info.files());
|
||||
|
||||
|
@ -144,6 +154,7 @@ public class SegmentInfoPerCommit {
|
|||
assert delCount <= info.getDocCount();
|
||||
}
|
||||
|
||||
/** Returns a description of this segment. */
|
||||
public String toString(Directory dir, int pendingDelCount) {
|
||||
return info.toString(dir, delCount + pendingDelCount);
|
||||
}
|
||||
|
|
|
@ -121,11 +121,10 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
* Whenever you add a new format, make it 1 smaller (negative version logic)! */
|
||||
public static final int FORMAT_SEGMENTS_GEN_CURRENT = -2;
|
||||
|
||||
public int counter; // used to name new segments
|
||||
/** Used to name new segments. */
|
||||
public int counter;
|
||||
|
||||
/**
|
||||
* counts how often the index has been changed
|
||||
*/
|
||||
/** Counts how often the index has been changed. */
|
||||
public long version;
|
||||
|
||||
private long generation; // generation of the "segments_N" for the next commit
|
||||
|
@ -144,6 +143,16 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
*/
|
||||
private static PrintStream infoStream = null;
|
||||
|
||||
/** Sole constructor. Typically you call this and then
|
||||
* use {@link #read(Directory) or
|
||||
* #read(Directory,String)} to populate each {@link
|
||||
* SegmentInfoPerCommit}. Alternatively, you can add/remove your
|
||||
* own {@link SegmentInfoPerCommit}s. */
|
||||
public SegmentInfos() {
|
||||
}
|
||||
|
||||
/** Returns {@link SegmentInfoPerCommit} at the provided
|
||||
* index. */
|
||||
public SegmentInfoPerCommit info(int i) {
|
||||
return segments.get(i);
|
||||
}
|
||||
|
@ -319,6 +328,8 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
}
|
||||
}
|
||||
|
||||
/** Find the latest commit ({@code segments_N file}) and
|
||||
* load all {@link SegmentInfoPerCommit}s. */
|
||||
public final void read(Directory directory) throws IOException {
|
||||
generation = lastGeneration = -1;
|
||||
|
||||
|
@ -415,9 +426,13 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
/** Returns current generation. */
|
||||
public long getGeneration() {
|
||||
return generation;
|
||||
}
|
||||
|
||||
/** Returns last succesfully read or written generation. */
|
||||
public long getLastGeneration() {
|
||||
return lastGeneration;
|
||||
}
|
||||
|
@ -445,7 +460,10 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
public static void setDefaultGenLookaheadCount(int count) {
|
||||
defaultGenLookaheadCount = count;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@code defaultGenLookaheadCount}.
|
||||
*
|
||||
* @see #setDefaultGenLookaheadCount
|
||||
*
|
||||
* @lucene.experimental
|
||||
|
@ -455,6 +473,8 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns {@code infoStream}.
|
||||
*
|
||||
* @see #setInfoStream
|
||||
*/
|
||||
public static PrintStream getInfoStream() {
|
||||
|
@ -484,14 +504,18 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
|
||||
final Directory directory;
|
||||
|
||||
/** Sole constructor. */
|
||||
public FindSegmentsFile(Directory directory) {
|
||||
this.directory = directory;
|
||||
}
|
||||
|
||||
/** Locate the most recent {@code segments} file and
|
||||
* run {@link #doBody} on it. */
|
||||
public Object run() throws IOException {
|
||||
return run(null);
|
||||
}
|
||||
|
||||
/** Run {@link #doBody} on the provided commit. */
|
||||
public Object run(IndexCommit commit) throws IOException {
|
||||
if (commit != null) {
|
||||
if (directory != commit.getDirectory())
|
||||
|
@ -858,6 +882,7 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
finishCommit(dir);
|
||||
}
|
||||
|
||||
/** Returns readable description of this segment. */
|
||||
public String toString(Directory directory) {
|
||||
StringBuilder buffer = new StringBuilder();
|
||||
buffer.append(getSegmentsFileName()).append(": ");
|
||||
|
@ -872,6 +897,10 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
return buffer.toString();
|
||||
}
|
||||
|
||||
/** Return {@code userData} saved with this commit.
|
||||
*
|
||||
* @see IndexWriter#commit(Map)
|
||||
*/
|
||||
public Map<String,String> getUserData() {
|
||||
return userData;
|
||||
}
|
||||
|
@ -966,41 +995,56 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfoPerCom
|
|||
public List<SegmentInfoPerCommit> asList() {
|
||||
return Collections.unmodifiableList(segments);
|
||||
}
|
||||
|
||||
|
||||
/** Returns number of {@link SegmentInfoPerCommit}s. */
|
||||
public int size() {
|
||||
return segments.size();
|
||||
}
|
||||
|
||||
/** Appends the provided {@link SegmentInfoPerCommit}. */
|
||||
public void add(SegmentInfoPerCommit si) {
|
||||
segments.add(si);
|
||||
}
|
||||
|
||||
/** Appends the provided {@link SegmentInfoPerCommit}s. */
|
||||
public void addAll(Iterable<SegmentInfoPerCommit> sis) {
|
||||
for (final SegmentInfoPerCommit si : sis) {
|
||||
this.add(si);
|
||||
}
|
||||
}
|
||||
|
||||
/** Clear all {@link SegmentInfoPerCommit}s. */
|
||||
public void clear() {
|
||||
segments.clear();
|
||||
}
|
||||
|
||||
/** WARNING: O(N) cost */
|
||||
/** Remove the provided {@link SegmentInfoPerCommit}.
|
||||
*
|
||||
* <p><b>WARNING</b>: O(N) cost */
|
||||
public void remove(SegmentInfoPerCommit si) {
|
||||
segments.remove(si);
|
||||
}
|
||||
|
||||
/** WARNING: O(N) cost */
|
||||
/** Remove the {@link SegmentInfoPerCommit} at the
|
||||
* provided index.
|
||||
*
|
||||
* <p><b>WARNING</b>: O(N) cost */
|
||||
void remove(int index) {
|
||||
segments.remove(index);
|
||||
}
|
||||
|
||||
/** WARNING: O(N) cost */
|
||||
/** Return true if the provided {@link
|
||||
* SegmentInfoPerCommit} is contained.
|
||||
*
|
||||
* <p><b>WARNING</b>: O(N) cost */
|
||||
boolean contains(SegmentInfoPerCommit si) {
|
||||
return segments.contains(si);
|
||||
}
|
||||
|
||||
/** WARNING: O(N) cost */
|
||||
/** Returns index of the provided {@link
|
||||
* SegmentInfoPerCommit}.
|
||||
*
|
||||
* <p><b>WARNING</b>: O(N) cost */
|
||||
int indexOf(SegmentInfoPerCommit si) {
|
||||
return segments.indexOf(si);
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; // javadocs
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
|
@ -25,24 +27,46 @@ import org.apache.lucene.store.IOContext;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public class SegmentReadState {
|
||||
/** {@link Directory} where this segment is read from. */
|
||||
public final Directory dir;
|
||||
|
||||
/** {@link SegmentInfo} describing this segment. */
|
||||
public final SegmentInfo segmentInfo;
|
||||
|
||||
/** {@link FieldInfos} describing all fields in this
|
||||
* segment. */
|
||||
public final FieldInfos fieldInfos;
|
||||
|
||||
/** {@link IOContext} to pass to {@link
|
||||
* Directory#openInput(String,IOContext)}. */
|
||||
public final IOContext context;
|
||||
|
||||
/** NOTE: if this is < 0, that means "defer terms index
|
||||
/** The {@code termInfosIndexDivisor} to use, if
|
||||
* appropriate (not all {@link PostingsFormat}s support
|
||||
* it; in particular the current default does not).
|
||||
*
|
||||
* <p> NOTE: if this is < 0, that means "defer terms index
|
||||
* load until needed". But if the codec must load the
|
||||
* terms index on init (preflex is the only once currently
|
||||
* that must do so), then it should negate this value to
|
||||
* get the app's terms divisor */
|
||||
public int termsIndexDivisor;
|
||||
|
||||
/** Unique suffix for any postings files read for this
|
||||
* segment. {@link PerFieldPostingsFormat} sets this for
|
||||
* each of the postings formats it wraps. If you create
|
||||
* a new {@link PostingsFormat} then any files you
|
||||
* write/read must be derived using this suffix (use
|
||||
* {@link IndexFileNames#segmentFileName(String,String,String)}). */
|
||||
public final String segmentSuffix;
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(Directory dir, SegmentInfo info,
|
||||
FieldInfos fieldInfos, IOContext context, int termsIndexDivisor) {
|
||||
this(dir, info, fieldInfos, context, termsIndexDivisor, "");
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(Directory dir,
|
||||
SegmentInfo info,
|
||||
FieldInfos fieldInfos,
|
||||
|
@ -57,6 +81,7 @@ public class SegmentReadState {
|
|||
this.segmentSuffix = segmentSuffix;
|
||||
}
|
||||
|
||||
/** Create a {@code SegmentReadState}. */
|
||||
public SegmentReadState(SegmentReadState other,
|
||||
String newSegmentSuffix) {
|
||||
this.dir = other.dir;
|
||||
|
|
|
@ -126,7 +126,9 @@ public final class SegmentReader extends AtomicReader {
|
|||
return core.fieldInfos;
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Expert: retrieve thread-private {@link
|
||||
* StoredFieldsReader}
|
||||
* @lucene.internal */
|
||||
public StoredFieldsReader getFieldsReader() {
|
||||
ensureOpen();
|
||||
return core.fieldsReaderLocal.get();
|
||||
|
@ -158,7 +160,9 @@ public final class SegmentReader extends AtomicReader {
|
|||
return si.info.getDocCount();
|
||||
}
|
||||
|
||||
/** @lucene.internal */
|
||||
/** Expert: retrieve thread-private {@link
|
||||
* TermVectorsReader}
|
||||
* @lucene.internal */
|
||||
public TermVectorsReader getTermVectorsReader() {
|
||||
ensureOpen();
|
||||
return core.termVectorsLocal.get();
|
||||
|
@ -220,7 +224,9 @@ public final class SegmentReader extends AtomicReader {
|
|||
public Object getCombinedCoreAndDeletesKey() {
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/** Returns term infos index divisor originally passed to
|
||||
* {@link #SegmentReader(SegmentInfoPerCommit, int, IOContext)}. */
|
||||
public int getTermInfosIndexDivisor() {
|
||||
return core.termsIndexDivisor;
|
||||
}
|
||||
|
@ -260,6 +266,8 @@ public final class SegmentReader extends AtomicReader {
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public static interface CoreClosedListener {
|
||||
/** Invoked when the shared core of the provided {@link
|
||||
* SegmentReader} has closed. */
|
||||
public void onClose(SegmentReader owner);
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; // javadocs
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
|
@ -27,10 +29,23 @@ import org.apache.lucene.util.MutableBits;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public class SegmentWriteState {
|
||||
|
||||
/** {@link InfoStream} used for debugging messages. */
|
||||
public final InfoStream infoStream;
|
||||
|
||||
/** {@link Directory} where this segment will be written
|
||||
* to. */
|
||||
public final Directory directory;
|
||||
|
||||
/** {@link SegmentInfo} describing this segment. */
|
||||
public final SegmentInfo segmentInfo;
|
||||
|
||||
/** {@link FieldInfos} describing all fields in this
|
||||
* segment. */
|
||||
public final FieldInfos fieldInfos;
|
||||
|
||||
/** Number of deleted documents set while flushing the
|
||||
* segment. */
|
||||
public int delCountOnFlush;
|
||||
|
||||
/** Deletes to apply while we are flushing the segment. A
|
||||
|
@ -40,9 +55,16 @@ public class SegmentWriteState {
|
|||
* deleted. */
|
||||
public final BufferedDeletes segDeletes;
|
||||
|
||||
// Lazily created:
|
||||
/** {@link MutableBits} recording live documents; this is
|
||||
* only set if there is one or more deleted documents. */
|
||||
public MutableBits liveDocs;
|
||||
|
||||
/** Unique suffix for any postings files written for this
|
||||
* segment. {@link PerFieldPostingsFormat} sets this for
|
||||
* each of the postings formats it wraps. If you create
|
||||
* a new {@link PostingsFormat} then any files you
|
||||
* write/read must be derived using this suffix (use
|
||||
* {@link IndexFileNames#segmentFileName(String,String,String)}). */
|
||||
public final String segmentSuffix;
|
||||
|
||||
/** Expert: The fraction of terms in the "dictionary" which should be stored
|
||||
|
@ -52,8 +74,11 @@ public class SegmentWriteState {
|
|||
* tweaking this is rarely useful.*/
|
||||
public int termIndexInterval; // TODO: this should be private to the codec, not settable here or in IWC
|
||||
|
||||
/** {@link IOContext} for all writes; you should pass this
|
||||
* to {@link Directory#createOutput(String,IOContext)}. */
|
||||
public final IOContext context;
|
||||
|
||||
/** Sole constructor. */
|
||||
public SegmentWriteState(InfoStream infoStream, Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos,
|
||||
int termIndexInterval, BufferedDeletes segDeletes, IOContext context) {
|
||||
this.infoStream = infoStream;
|
||||
|
|
|
@ -23,6 +23,10 @@ import java.io.IOException;
|
|||
* sequentially, using the current thread. */
|
||||
public class SerialMergeScheduler extends MergeScheduler {
|
||||
|
||||
/** Sole constructor. */
|
||||
public SerialMergeScheduler() {
|
||||
}
|
||||
|
||||
/** Just do the merges in sequence. We do this
|
||||
* "synchronized" so that even if the application is using
|
||||
* multiple threads, only one merge may run at a time. */
|
||||
|
|
|
@ -60,7 +60,9 @@ public final class SlowCompositeReaderWrapper extends AtomicReader {
|
|||
return (AtomicReader) reader;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Sole constructor, wrapping the provided {@link
|
||||
* CompositeReader}. */
|
||||
public SlowCompositeReaderWrapper(CompositeReader reader) throws IOException {
|
||||
super();
|
||||
in = reader;
|
||||
|
|
|
@ -68,10 +68,16 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
|
|||
return id + " : " + segmentsFileName;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Wraps a provided {@link IndexCommit} and prevents it
|
||||
* from being deleted. */
|
||||
protected class SnapshotCommitPoint extends IndexCommit {
|
||||
|
||||
/** The {@link IndexCommit} we are preventing from deletion. */
|
||||
protected IndexCommit cp;
|
||||
|
||||
/** Creates a {@code SnapshotCommitPoint} wrapping the provided
|
||||
* {@link IndexCommit}. */
|
||||
protected SnapshotCommitPoint(IndexCommit cp) {
|
||||
this.cp = cp;
|
||||
}
|
||||
|
@ -143,8 +149,12 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
|
|||
private Map<String, Set<String>> segmentsFileToIDs = new HashMap<String, Set<String>>();
|
||||
|
||||
private IndexDeletionPolicy primary;
|
||||
|
||||
/** Most recently committed {@link IndexCommit}. */
|
||||
protected IndexCommit lastCommit;
|
||||
|
||||
/** Sole constructor, taking the incoming {@link
|
||||
* IndexDeletionPolicy} to wrap. */
|
||||
public SnapshotDeletionPolicy(IndexDeletionPolicy primary) {
|
||||
this.primary = primary;
|
||||
}
|
||||
|
@ -198,6 +208,8 @@ public class SnapshotDeletionPolicy implements IndexDeletionPolicy {
|
|||
ids.add(id);
|
||||
}
|
||||
|
||||
/** Wraps each {@link IndexCommit} as a {@link
|
||||
* SnapshotCommitPoint}. */
|
||||
protected List<IndexCommit> wrapCommits(List<? extends IndexCommit> commits) {
|
||||
List<IndexCommit> wrappedCommits = new ArrayList<IndexCommit>(commits.size());
|
||||
for (IndexCommit ic : commits) {
|
||||
|
|
|
@ -43,6 +43,8 @@ public final class SortedBytesMergeUtils {
|
|||
// no instance
|
||||
}
|
||||
|
||||
/** Creates the {@link MergeContext} necessary for merging
|
||||
* the ordinals. */
|
||||
public static MergeContext init(Type type, DocValues[] docValues,
|
||||
Comparator<BytesRef> comp, int mergeDocCount) {
|
||||
int size = -1;
|
||||
|
@ -68,11 +70,21 @@ public final class SortedBytesMergeUtils {
|
|||
public static final class MergeContext {
|
||||
private final Comparator<BytesRef> comp;
|
||||
private final BytesRef missingValue = new BytesRef();
|
||||
|
||||
/** How many bytes each value occupies, or -1 if it
|
||||
* varies. */
|
||||
public final int sizePerValues; // -1 if var length
|
||||
|
||||
final Type type;
|
||||
|
||||
/** Maps each document to the ordinal for its value. */
|
||||
public final int[] docToEntry;
|
||||
|
||||
/** File-offset for each document; will be null if it's
|
||||
* not needed (eg fixed-size values). */
|
||||
public long[] offsets; // if non-null #mergeRecords collects byte offsets here
|
||||
|
||||
/** Sole constructor. */
|
||||
public MergeContext(Comparator<BytesRef> comp, int mergeDocCount,
|
||||
int size, Type type) {
|
||||
assert type == Type.BYTES_FIXED_SORTED || type == Type.BYTES_VAR_SORTED;
|
||||
|
@ -85,12 +97,15 @@ public final class SortedBytesMergeUtils {
|
|||
}
|
||||
docToEntry = new int[mergeDocCount];
|
||||
}
|
||||
|
||||
|
||||
/** Returns number of documents merged. */
|
||||
public int getMergeDocCount() {
|
||||
return docToEntry.length;
|
||||
}
|
||||
}
|
||||
|
||||
/** Creates the {@link SortedSourceSlice}s for
|
||||
* merging. */
|
||||
public static List<SortedSourceSlice> buildSlices(
|
||||
int[] docBases, MergeState.DocMap[] docMaps,
|
||||
DocValues[] docValues, MergeContext ctx) throws IOException {
|
||||
|
@ -150,6 +165,8 @@ public final class SortedBytesMergeUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/** Does the "real work" of merging the slices and
|
||||
* computing the ord mapping. */
|
||||
public static int mergeRecords(MergeContext ctx, BytesRefConsumer consumer,
|
||||
List<SortedSourceSlice> slices) throws IOException {
|
||||
final RecordMerger merger = new RecordMerger(new MergeQueue(slices.size(),
|
||||
|
@ -212,6 +229,7 @@ public final class SortedBytesMergeUtils {
|
|||
public static final class IndexOutputBytesRefConsumer implements BytesRefConsumer {
|
||||
private final IndexOutput datOut;
|
||||
|
||||
/** Sole constructor. */
|
||||
public IndexOutputBytesRefConsumer(IndexOutput datOut) {
|
||||
this.datOut = datOut;
|
||||
}
|
||||
|
@ -330,7 +348,10 @@ public final class SortedBytesMergeUtils {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
/** Fills in the absolute ords for this slice.
|
||||
*
|
||||
* @return the provided {@code docToOrd} */
|
||||
public int[] toAbsolutOrds(int[] docToOrd) {
|
||||
for (int i = docToOrdStart; i < docToOrdEnd; i++) {
|
||||
final int mappedOrd = docIDToRelativeOrd[i];
|
||||
|
@ -341,6 +362,7 @@ public final class SortedBytesMergeUtils {
|
|||
return docToOrd;
|
||||
}
|
||||
|
||||
/** Writes ords for this slice. */
|
||||
public void writeOrds(PackedInts.Writer writer) throws IOException {
|
||||
for (int i = docToOrdStart; i < docToOrdEnd; i++) {
|
||||
final int mappedOrd = docIDToRelativeOrd[i];
|
||||
|
|
|
@ -32,9 +32,13 @@ import org.apache.lucene.util.BytesRef;
|
|||
* StoredDocument is retrieved from IndexReader containing only stored fields from indexed {@link IndexDocument}.
|
||||
*/
|
||||
// TODO: shouldn't this really be in the .document package?
|
||||
public class StoredDocument implements Iterable<StorableField>{
|
||||
|
||||
public class StoredDocument implements Iterable<StorableField> {
|
||||
|
||||
private final List<StorableField> fields = new ArrayList<StorableField>();
|
||||
|
||||
/** Sole constructor. */
|
||||
public StoredDocument() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a field to a document.
|
||||
|
|
|
@ -35,6 +35,12 @@ import org.apache.lucene.document.DocumentStoredFieldVisitor;
|
|||
* @lucene.experimental */
|
||||
|
||||
public abstract class StoredFieldVisitor {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected StoredFieldVisitor() {
|
||||
}
|
||||
|
||||
/** Process a binary field. */
|
||||
public void binaryField(FieldInfo fieldInfo, byte[] value, int offset, int length) throws IOException {
|
||||
}
|
||||
|
|
|
@ -32,7 +32,13 @@ import org.apache.lucene.util.BytesRef;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public final class TermContext {
|
||||
public final IndexReaderContext topReaderContext; // for asserting!
|
||||
|
||||
/** Holds the {@link IndexReaderContext} of the top-level
|
||||
* {@link IndexReader}, used internally only for
|
||||
* asserting.
|
||||
*
|
||||
* @lucene.internal */
|
||||
public final IndexReaderContext topReaderContext;
|
||||
private final TermState[] states;
|
||||
private int docFreq;
|
||||
private long totalTermFreq;
|
||||
|
|
|
@ -27,6 +27,11 @@ package org.apache.lucene.index;
|
|||
*/
|
||||
public abstract class TermState implements Cloneable {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected TermState() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the content of the given {@link TermState} to this instance
|
||||
*
|
||||
|
|
|
@ -30,6 +30,11 @@ import org.apache.lucene.util.automaton.CompiledAutomaton;
|
|||
|
||||
public abstract class Terms {
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected Terms() {
|
||||
}
|
||||
|
||||
/** Returns an iterator that will step through all
|
||||
* terms. This method will not return null. If you have
|
||||
* a previous TermsEnum, for example from a different
|
||||
|
@ -114,5 +119,6 @@ public abstract class Terms {
|
|||
/** Returns true if documents in this field store payloads. */
|
||||
public abstract boolean hasPayloads();
|
||||
|
||||
/** Zero-length array of {@link Terms}. */
|
||||
public final static Terms[] EMPTY_ARRAY = new Terms[0];
|
||||
}
|
||||
|
|
|
@ -45,6 +45,11 @@ public abstract class TermsEnum implements BytesRefIterator {
|
|||
|
||||
private AttributeSource atts = null;
|
||||
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected TermsEnum() {
|
||||
}
|
||||
|
||||
/** Returns the related attributes. */
|
||||
public AttributeSource attributes() {
|
||||
if (atts == null) atts = new AttributeSource();
|
||||
|
|
|
@ -87,6 +87,11 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
private long maxCFSSegmentSize = Long.MAX_VALUE;
|
||||
private double reclaimDeletesWeight = 2.0;
|
||||
|
||||
/** Sole constructor, setting all settings to their
|
||||
* defaults. */
|
||||
public TieredMergePolicy() {
|
||||
}
|
||||
|
||||
/** Maximum number of segments to be merged at a time
|
||||
* during "normal" merging. For explicit merging (eg,
|
||||
* forceMerge or forceMergeDeletes was called), see {@link
|
||||
|
@ -99,7 +104,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setMaxMergeAtOnce */
|
||||
/** Returns the current maxMergeAtOnce setting.
|
||||
*
|
||||
* @see #setMaxMergeAtOnce */
|
||||
public int getMaxMergeAtOnce() {
|
||||
return maxMergeAtOnce;
|
||||
}
|
||||
|
@ -117,7 +124,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setMaxMergeAtOnceExplicit */
|
||||
/** Returns the current maxMergeAtOnceExplicit setting.
|
||||
*
|
||||
* @see #setMaxMergeAtOnceExplicit */
|
||||
public int getMaxMergeAtOnceExplicit() {
|
||||
return maxMergeAtOnceExplicit;
|
||||
}
|
||||
|
@ -136,7 +145,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #getMaxMergedSegmentMB */
|
||||
/** Returns the current maxMergedSegmentMB setting.
|
||||
*
|
||||
* @see #getMaxMergedSegmentMB */
|
||||
public double getMaxMergedSegmentMB() {
|
||||
return maxMergedSegmentBytes/1024/1024.;
|
||||
}
|
||||
|
@ -172,7 +183,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setFloorSegmentMB */
|
||||
/** Returns the current floorSegmentMB.
|
||||
*
|
||||
* @see #setFloorSegmentMB */
|
||||
public double getFloorSegmentMB() {
|
||||
return floorSegmentBytes/(1024*1024.);
|
||||
}
|
||||
|
@ -188,7 +201,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setForceMergeDeletesPctAllowed */
|
||||
/** Returns the current forceMergeDeletesPctAllowed setting.
|
||||
*
|
||||
* @see #setForceMergeDeletesPctAllowed */
|
||||
public double getForceMergeDeletesPctAllowed() {
|
||||
return forceMergeDeletesPctAllowed;
|
||||
}
|
||||
|
@ -209,7 +224,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setSegmentsPerTier */
|
||||
/** Returns the current segmentsPerTier setting.
|
||||
*
|
||||
* @see #setSegmentsPerTier */
|
||||
public double getSegmentsPerTier() {
|
||||
return segsPerTier;
|
||||
}
|
||||
|
@ -222,7 +239,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setUseCompoundFile */
|
||||
/** Returns the current useCompoundFile setting.
|
||||
*
|
||||
* @see #setUseCompoundFile */
|
||||
public boolean getUseCompoundFile() {
|
||||
return useCompoundFile;
|
||||
}
|
||||
|
@ -240,7 +259,9 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
return this;
|
||||
}
|
||||
|
||||
/** @see #setNoCFSRatio */
|
||||
/** Returns the current noCFSRatio setting.
|
||||
*
|
||||
* @see #setNoCFSRatio */
|
||||
public double getNoCFSRatio() {
|
||||
return noCFSRatio;
|
||||
}
|
||||
|
@ -266,7 +287,13 @@ public class TieredMergePolicy extends MergePolicy {
|
|||
/** Holds score and explanation for a single candidate
|
||||
* merge. */
|
||||
protected static abstract class MergeScore {
|
||||
/** Sole constructor. (For invocation by subclass
|
||||
* constructors, typically implicit.) */
|
||||
protected MergeScore() {
|
||||
}
|
||||
|
||||
abstract double getScore();
|
||||
|
||||
abstract String getExplanation();
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ public final class TwoPhaseCommitTool {
|
|||
private final TwoPhaseCommit tpc;
|
||||
private final Map<String, String> commitData;
|
||||
|
||||
/** Sole constructor. */
|
||||
public TwoPhaseCommitWrapper(TwoPhaseCommit tpc, Map<String, String> commitData) {
|
||||
this.tpc = tpc;
|
||||
this.commitData = commitData;
|
||||
|
@ -73,12 +74,12 @@ public final class TwoPhaseCommitTool {
|
|||
* object fails to prepareCommit().
|
||||
*/
|
||||
public static class PrepareCommitFailException extends IOException {
|
||||
|
||||
|
||||
/** Sole constructor. */
|
||||
public PrepareCommitFailException(Throwable cause, TwoPhaseCommit obj) {
|
||||
super("prepareCommit() failed on " + obj);
|
||||
initCause(cause);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,7 +87,8 @@ public final class TwoPhaseCommitTool {
|
|||
* object fails to commit().
|
||||
*/
|
||||
public static class CommitFailException extends IOException {
|
||||
|
||||
|
||||
/** Sole constructor. */
|
||||
public CommitFailException(Throwable cause, TwoPhaseCommit obj) {
|
||||
super("commit() failed on " + obj);
|
||||
initCause(cause);
|
||||
|
|
|
@ -50,6 +50,7 @@ import java.util.HashMap;
|
|||
*/
|
||||
public class UpgradeIndexMergePolicy extends MergePolicy {
|
||||
|
||||
/** Wrapped {@link MergePolicy}. */
|
||||
protected final MergePolicy base;
|
||||
|
||||
/** Wrap the given {@link MergePolicy} and intercept forceMerge requests to
|
||||
|
|
Loading…
Reference in New Issue