mirror of https://github.com/apache/lucene.git
LUCENE-2960: allow certain IWC setters to take effect 'live' after IW has been constructed
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1082865 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7e1a9ee6ef
commit
bfad5f34f8
|
@ -362,6 +362,10 @@ Bug fixes
|
|||
* LUCENE-2936: PhraseQuery score explanations were not correctly
|
||||
identifying matches vs non-matches. (hossman)
|
||||
|
||||
* LUCENE-2960: Allow some changes to IndexWriterConfig to take effect
|
||||
"live" (after an IW is instantiated), via
|
||||
IndexWriter.getConfig().setXXX(...) (Shay Banon, Mike McCandless)
|
||||
|
||||
======================= Lucene 3.x (not yet released) =======================
|
||||
|
||||
(No changes)
|
||||
|
|
|
@ -266,17 +266,8 @@ final class DocumentsWriter {
|
|||
|
||||
// How much RAM we can use before flushing. This is 0 if
|
||||
// we are flushing by doc count instead.
|
||||
private long ramBufferSize = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024);
|
||||
private long waitQueuePauseBytes = (long) (ramBufferSize*0.1);
|
||||
private long waitQueueResumeBytes = (long) (ramBufferSize*0.05);
|
||||
|
||||
// If we've allocated 5% over our RAM budget, we then
|
||||
// free down to 95%
|
||||
private long freeLevel = (long) (IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB*1024*1024*0.95);
|
||||
|
||||
// Flush @ this number of docs. If ramBufferSize is
|
||||
// non-zero we will flush by RAM usage instead.
|
||||
private int maxBufferedDocs = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
|
||||
private final IndexWriterConfig config;
|
||||
|
||||
private boolean closed;
|
||||
private final FieldInfos fieldInfos;
|
||||
|
@ -284,16 +275,17 @@ final class DocumentsWriter {
|
|||
private final BufferedDeletesStream bufferedDeletesStream;
|
||||
private final IndexWriter.FlushControl flushControl;
|
||||
|
||||
DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain, int maxThreadStates, FieldInfos fieldInfos, BufferedDeletesStream bufferedDeletesStream) throws IOException {
|
||||
DocumentsWriter(IndexWriterConfig config, Directory directory, IndexWriter writer, FieldInfos fieldInfos, BufferedDeletesStream bufferedDeletesStream) throws IOException {
|
||||
this.directory = directory;
|
||||
this.writer = writer;
|
||||
this.similarityProvider = writer.getConfig().getSimilarityProvider();
|
||||
this.maxThreadStates = maxThreadStates;
|
||||
this.maxThreadStates = config.getMaxThreadStates();
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.bufferedDeletesStream = bufferedDeletesStream;
|
||||
flushControl = writer.flushControl;
|
||||
|
||||
consumer = indexingChain.getChain(this);
|
||||
consumer = config.getIndexingChain().getChain(this);
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
// Buffer a specific docID for deletion. Currently only
|
||||
|
@ -363,45 +355,6 @@ final class DocumentsWriter {
|
|||
}
|
||||
}
|
||||
|
||||
synchronized void setSimilarityProvider(SimilarityProvider similarity) {
|
||||
this.similarityProvider = similarity;
|
||||
for(int i=0;i<threadStates.length;i++) {
|
||||
threadStates[i].docState.similarityProvider = similarity;
|
||||
}
|
||||
}
|
||||
|
||||
/** Set how much RAM we can use before flushing. */
|
||||
synchronized void setRAMBufferSizeMB(double mb) {
|
||||
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
|
||||
waitQueuePauseBytes = 4*1024*1024;
|
||||
waitQueueResumeBytes = 2*1024*1024;
|
||||
} else {
|
||||
ramBufferSize = (long) (mb*1024*1024);
|
||||
waitQueuePauseBytes = (long) (ramBufferSize*0.1);
|
||||
waitQueueResumeBytes = (long) (ramBufferSize*0.05);
|
||||
freeLevel = (long) (0.95 * ramBufferSize);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized double getRAMBufferSizeMB() {
|
||||
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
return ramBufferSize;
|
||||
} else {
|
||||
return ramBufferSize/1024./1024.;
|
||||
}
|
||||
}
|
||||
|
||||
/** Set max buffered docs, which means we will flush by
|
||||
* doc count instead of by RAM usage. */
|
||||
void setMaxBufferedDocs(int count) {
|
||||
maxBufferedDocs = count;
|
||||
}
|
||||
|
||||
int getMaxBufferedDocs() {
|
||||
return maxBufferedDocs;
|
||||
}
|
||||
|
||||
/** Get current segment name we are writing. */
|
||||
synchronized String getSegment() {
|
||||
return segment;
|
||||
|
@ -1024,6 +977,14 @@ final class DocumentsWriter {
|
|||
|
||||
deletesRAMUsed = bufferedDeletesStream.bytesUsed();
|
||||
|
||||
final long ramBufferSize;
|
||||
final double mb = config.getRAMBufferSizeMB();
|
||||
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
ramBufferSize = IndexWriterConfig.DISABLE_AUTO_FLUSH;
|
||||
} else {
|
||||
ramBufferSize = (long) (mb*1024*1024);
|
||||
}
|
||||
|
||||
synchronized(this) {
|
||||
if (ramBufferSize == IndexWriterConfig.DISABLE_AUTO_FLUSH || bufferIsFull) {
|
||||
return;
|
||||
|
@ -1052,6 +1013,8 @@ final class DocumentsWriter {
|
|||
|
||||
boolean any = true;
|
||||
|
||||
final long freeLevel = (long) (0.95 * ramBufferSize);
|
||||
|
||||
while(bytesUsed()+deletesRAMUsed > freeLevel) {
|
||||
|
||||
synchronized(this) {
|
||||
|
@ -1117,10 +1080,24 @@ final class DocumentsWriter {
|
|||
}
|
||||
|
||||
synchronized boolean doResume() {
|
||||
final double mb = config.getRAMBufferSizeMB();
|
||||
final long waitQueueResumeBytes;
|
||||
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
waitQueueResumeBytes = 2*1024*1024;
|
||||
} else {
|
||||
waitQueueResumeBytes = (long) (mb*1024*1024*0.05);
|
||||
}
|
||||
return waitingBytes <= waitQueueResumeBytes;
|
||||
}
|
||||
|
||||
synchronized boolean doPause() {
|
||||
final double mb = config.getRAMBufferSizeMB();
|
||||
final long waitQueuePauseBytes;
|
||||
if (mb == IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
waitQueuePauseBytes = 4*1024*1024;
|
||||
} else {
|
||||
waitQueuePauseBytes = (long) (mb*1024*1024*0.1);
|
||||
}
|
||||
return waitingBytes > waitQueuePauseBytes;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,8 +230,6 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
private Lock writeLock;
|
||||
|
||||
private final int termIndexInterval;
|
||||
|
||||
private boolean closed;
|
||||
private boolean closing;
|
||||
|
||||
|
@ -689,10 +687,9 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
/**
|
||||
* Constructs a new IndexWriter per the settings given in <code>conf</code>.
|
||||
* Note that the passed in {@link IndexWriterConfig} is cloned and thus making
|
||||
* changes to it after IndexWriter has been instantiated will not affect
|
||||
* IndexWriter. Additionally, calling {@link #getConfig()} and changing the
|
||||
* parameters does not affect that IndexWriter instance.
|
||||
* Note that the passed in {@link IndexWriterConfig} is
|
||||
* privately cloned; if you need to make subsequent "live"
|
||||
* changes to the configuration use {@link #getConfig}.
|
||||
* <p>
|
||||
*
|
||||
* @param d
|
||||
|
@ -718,11 +715,9 @@ public class IndexWriter implements Closeable {
|
|||
directory = d;
|
||||
analyzer = conf.getAnalyzer();
|
||||
infoStream = defaultInfoStream;
|
||||
termIndexInterval = conf.getTermIndexInterval();
|
||||
mergePolicy = conf.getMergePolicy();
|
||||
mergePolicy.setIndexWriter(this);
|
||||
mergeScheduler = conf.getMergeScheduler();
|
||||
mergedSegmentWarmer = conf.getMergedSegmentWarmer();
|
||||
codecs = conf.getCodecProvider();
|
||||
|
||||
bufferedDeletesStream = new BufferedDeletesStream(messageID);
|
||||
|
@ -791,7 +786,7 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
setRollbackSegmentInfos(segmentInfos);
|
||||
|
||||
docWriter = new DocumentsWriter(directory, this, conf.getIndexingChain(), conf.getMaxThreadStates(), getCurrentFieldInfos(), bufferedDeletesStream);
|
||||
docWriter = new DocumentsWriter(config, directory, this, getCurrentFieldInfos(), bufferedDeletesStream);
|
||||
docWriter.setInfoStream(infoStream);
|
||||
|
||||
// Default deleter (for backwards compatibility) is
|
||||
|
@ -809,10 +804,6 @@ public class IndexWriter implements Closeable {
|
|||
segmentInfos.changed();
|
||||
}
|
||||
|
||||
docWriter.setRAMBufferSizeMB(conf.getRAMBufferSizeMB());
|
||||
docWriter.setMaxBufferedDocs(conf.getMaxBufferedDocs());
|
||||
pushMaxBufferedDocs();
|
||||
|
||||
if (infoStream != null) {
|
||||
message("init: create=" + create);
|
||||
messageState();
|
||||
|
@ -881,38 +872,20 @@ public class IndexWriter implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link IndexWriterConfig} that was passed to
|
||||
* {@link #IndexWriter(Directory, IndexWriterConfig)}. This allows querying
|
||||
* IndexWriter's settings.
|
||||
* Returns the private {@link IndexWriterConfig}, cloned
|
||||
* from the {@link IndexWriterConfig} passed to
|
||||
* {@link #IndexWriter(Directory, IndexWriterConfig)}.
|
||||
* <p>
|
||||
* <b>NOTE:</b> setting any parameter on the returned instance has not effect
|
||||
* on the IndexWriter instance. If you need to change those settings after
|
||||
* IndexWriter has been created, you need to instantiate a new IndexWriter.
|
||||
* <b>NOTE:</b> some settings may be changed on the
|
||||
* returned {@link IndexWriterConfig}, and will take
|
||||
* effect in the current IndexWriter instance. See the
|
||||
* javadocs for the specific setters in {@link
|
||||
* IndexWriterConfig} for details.
|
||||
*/
|
||||
public IndexWriterConfig getConfig() {
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are flushing by doc count (not by RAM usage), and
|
||||
* using LogDocMergePolicy then push maxBufferedDocs down
|
||||
* as its minMergeDocs, to keep backwards compatibility.
|
||||
*/
|
||||
private void pushMaxBufferedDocs() {
|
||||
if (docWriter.getMaxBufferedDocs() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
final MergePolicy mp = mergePolicy;
|
||||
if (mp instanceof LogDocMergePolicy) {
|
||||
LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
|
||||
final int maxBufferedDocs = docWriter.getMaxBufferedDocs();
|
||||
if (lmp.getMinMergeDocs() != maxBufferedDocs) {
|
||||
if (infoStream != null)
|
||||
message("now push maxBufferedDocs " + maxBufferedDocs + " to LogDocMergePolicy");
|
||||
lmp.setMinMergeDocs(maxBufferedDocs);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** If non-null, this will be the default infoStream used
|
||||
* by a newly instantiated IndexWriter.
|
||||
* @see #setInfoStream
|
||||
|
@ -1476,8 +1449,8 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
/** If non-null, information about merges will be printed to this.
|
||||
*/
|
||||
private PrintStream infoStream = null;
|
||||
private static PrintStream defaultInfoStream = null;
|
||||
private PrintStream infoStream;
|
||||
private static PrintStream defaultInfoStream;
|
||||
|
||||
/**
|
||||
* Requests an "optimize" operation on an index, priming the index
|
||||
|
@ -2270,7 +2243,7 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
try {
|
||||
String mergedName = newSegmentName();
|
||||
SegmentMerger merger = new SegmentMerger(directory, termIndexInterval,
|
||||
SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(),
|
||||
mergedName, null, codecs, payloadProcessorProvider,
|
||||
((FieldInfos) docWriter.getFieldInfos().clone()));
|
||||
|
||||
|
@ -3163,7 +3136,7 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
SegmentInfos sourceSegments = merge.segments;
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge,
|
||||
SegmentMerger merger = new SegmentMerger(directory, config.getTermIndexInterval(), mergedName, merge,
|
||||
codecs, payloadProcessorProvider,
|
||||
((FieldInfos) docWriter.getFieldInfos().clone()));
|
||||
|
||||
|
@ -3292,6 +3265,8 @@ public class IndexWriter implements Closeable {
|
|||
merge.info.setUseCompoundFile(true);
|
||||
}
|
||||
|
||||
final IndexReaderWarmer mergedSegmentWarmer = config.getMergedSegmentWarmer();
|
||||
|
||||
final int termsIndexDivisor;
|
||||
final boolean loadDocStores;
|
||||
|
||||
|
@ -3572,8 +3547,6 @@ public class IndexWriter implements Closeable {
|
|||
public abstract void warm(IndexReader reader) throws IOException;
|
||||
}
|
||||
|
||||
private IndexReaderWarmer mergedSegmentWarmer;
|
||||
|
||||
private void handleOOM(OutOfMemoryError oom, String location) {
|
||||
if (infoStream != null) {
|
||||
message("hit OutOfMemoryError inside " + location);
|
||||
|
|
|
@ -26,12 +26,16 @@ import org.apache.lucene.search.SimilarityProvider;
|
|||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Holds all the configuration of {@link IndexWriter}. This object is only used
|
||||
* while constructing a new IndexWriter. Those settings cannot be changed
|
||||
* afterwards, except instantiating a new IndexWriter.
|
||||
* Holds all the configuration of {@link IndexWriter}. You
|
||||
* should instantiate this class, call the setters to set
|
||||
* your configuration, then pass it to {@link IndexWriter}.
|
||||
* Note that {@link IndexWriter} makes a private clone; if
|
||||
* you need to subsequently change settings use {@link
|
||||
* IndexWriter#getConfig}.
|
||||
*
|
||||
* <p>
|
||||
* All setter methods return {@link IndexWriterConfig} to allow chaining
|
||||
* settings conveniently. Thus someone can do:
|
||||
* settings conveniently, for example:
|
||||
*
|
||||
* <pre>
|
||||
* IndexWriterConfig conf = new IndexWriterConfig(analyzer);
|
||||
|
@ -108,26 +112,25 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
return WRITE_LOCK_TIMEOUT;
|
||||
}
|
||||
|
||||
private Analyzer analyzer;
|
||||
private IndexDeletionPolicy delPolicy;
|
||||
private IndexCommit commit;
|
||||
private OpenMode openMode;
|
||||
private SimilarityProvider similarityProvider;
|
||||
private int termIndexInterval; // TODO: this should be private to the codec, not settable here
|
||||
private MergeScheduler mergeScheduler;
|
||||
private long writeLockTimeout;
|
||||
private int maxBufferedDeleteTerms;
|
||||
private double ramBufferSizeMB;
|
||||
private int maxBufferedDocs;
|
||||
private IndexingChain indexingChain;
|
||||
private IndexReaderWarmer mergedSegmentWarmer;
|
||||
private CodecProvider codecProvider;
|
||||
private MergePolicy mergePolicy;
|
||||
private int maxThreadStates;
|
||||
private boolean readerPooling;
|
||||
private int readerTermsIndexDivisor;
|
||||
private final Analyzer analyzer;
|
||||
private volatile IndexDeletionPolicy delPolicy;
|
||||
private volatile IndexCommit commit;
|
||||
private volatile OpenMode openMode;
|
||||
private volatile SimilarityProvider similarityProvider;
|
||||
private volatile int termIndexInterval; // TODO: this should be private to the codec, not settable here
|
||||
private volatile MergeScheduler mergeScheduler;
|
||||
private volatile long writeLockTimeout;
|
||||
private volatile int maxBufferedDeleteTerms;
|
||||
private volatile double ramBufferSizeMB;
|
||||
private volatile int maxBufferedDocs;
|
||||
private volatile IndexingChain indexingChain;
|
||||
private volatile IndexReaderWarmer mergedSegmentWarmer;
|
||||
private volatile CodecProvider codecProvider;
|
||||
private volatile MergePolicy mergePolicy;
|
||||
private volatile int maxThreadStates;
|
||||
private volatile boolean readerPooling;
|
||||
private volatile int readerTermsIndexDivisor;
|
||||
|
||||
// required for clone
|
||||
private Version matchVersion;
|
||||
|
||||
/**
|
||||
|
@ -162,7 +165,7 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
@Override
|
||||
public Object clone() {
|
||||
// Shallow clone is the only thing that's possible, since parameters like
|
||||
// analyzer, index commit etc. do not implemnt Cloneable.
|
||||
// analyzer, index commit etc. do not implement Cloneable.
|
||||
try {
|
||||
return super.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
|
@ -176,7 +179,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
return analyzer;
|
||||
}
|
||||
|
||||
/** Specifies {@link OpenMode} of that index. */
|
||||
/** Specifies {@link OpenMode} of the index.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setOpenMode(OpenMode openMode) {
|
||||
this.openMode = openMode;
|
||||
return this;
|
||||
|
@ -201,6 +206,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* <p>
|
||||
* <b>NOTE:</b> the deletion policy cannot be null. If <code>null</code> is
|
||||
* passed, the deletion policy will be set to the default.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created.
|
||||
*/
|
||||
public IndexWriterConfig setIndexDeletionPolicy(IndexDeletionPolicy delPolicy) {
|
||||
this.delPolicy = delPolicy == null ? new KeepOnlyLastCommitDeletionPolicy() : delPolicy;
|
||||
|
@ -219,7 +226,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
/**
|
||||
* Expert: allows to open a certain commit point. The default is null which
|
||||
* opens the latest commit point.
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setIndexCommit(IndexCommit commit) {
|
||||
this.commit = commit;
|
||||
return this;
|
||||
|
@ -239,7 +247,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* <p>
|
||||
* <b>NOTE:</b> the similarity provider cannot be null. If <code>null</code> is passed,
|
||||
* the similarity provider will be set to the default implementation (unspecified).
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setSimilarityProvider(SimilarityProvider similarityProvider) {
|
||||
this.similarityProvider = similarityProvider == null ? IndexSearcher.getDefaultSimilarityProvider() : similarityProvider;
|
||||
return this;
|
||||
|
@ -274,7 +283,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* must be scanned for each random term access.
|
||||
*
|
||||
* @see #DEFAULT_TERM_INDEX_INTERVAL
|
||||
*/
|
||||
*
|
||||
* <p>Takes effect immediately, but only applies to newly
|
||||
* flushed/merged segments. */
|
||||
public IndexWriterConfig setTermIndexInterval(int interval) { // TODO: this should be private to the codec, not settable here
|
||||
this.termIndexInterval = interval;
|
||||
return this;
|
||||
|
@ -295,7 +306,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* <p>
|
||||
* <b>NOTE:</b> the merge scheduler cannot be null. If <code>null</code> is
|
||||
* passed, the merge scheduler will be set to the default.
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setMergeScheduler(MergeScheduler mergeScheduler) {
|
||||
this.mergeScheduler = mergeScheduler == null ? new ConcurrentMergeScheduler() : mergeScheduler;
|
||||
return this;
|
||||
|
@ -313,7 +325,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* Sets the maximum time to wait for a write lock (in milliseconds) for this
|
||||
* instance. You can change the default value for all instances by calling
|
||||
* {@link #setDefaultWriteLockTimeout(long)}.
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setWriteLockTimeout(long writeLockTimeout) {
|
||||
this.writeLockTimeout = writeLockTimeout;
|
||||
return this;
|
||||
|
@ -339,6 +352,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* @throws IllegalArgumentException if maxBufferedDeleteTerms
|
||||
* is enabled but smaller than 1
|
||||
* @see #setRAMBufferSizeMB
|
||||
*
|
||||
* <p>Takes effect immediately, but only the next time a
|
||||
* document is added, updated or deleted.
|
||||
*/
|
||||
public IndexWriterConfig setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
|
||||
if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH
|
||||
|
@ -391,6 +407,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* <p>
|
||||
* The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.
|
||||
*
|
||||
* <p>Takes effect immediately, but only the next time a
|
||||
* document is added, updated or deleted.
|
||||
*
|
||||
* @throws IllegalArgumentException
|
||||
* if ramBufferSize is enabled but non-positive, or it disables
|
||||
* ramBufferSize when maxBufferedDocs is already disabled
|
||||
|
@ -430,6 +449,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* <p>
|
||||
* Disabled by default (writer flushes by RAM usage).
|
||||
*
|
||||
* <p>Takes effect immediately, but only the next time a
|
||||
* document is added, updated or deleted.
|
||||
*
|
||||
* @see #setRAMBufferSizeMB(double)
|
||||
*
|
||||
* @throws IllegalArgumentException
|
||||
|
@ -458,7 +480,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
return maxBufferedDocs;
|
||||
}
|
||||
|
||||
/** Set the merged segment warmer. See {@link IndexReaderWarmer}. */
|
||||
/** Set the merged segment warmer. See {@link IndexReaderWarmer}.
|
||||
*
|
||||
* <p>Takes effect on the next merge. */
|
||||
public IndexWriterConfig setMergedSegmentWarmer(IndexReaderWarmer mergeSegmentWarmer) {
|
||||
this.mergedSegmentWarmer = mergeSegmentWarmer;
|
||||
return this;
|
||||
|
@ -475,13 +499,16 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* and return a {@link MergePolicy.MergeSpecification} describing the merges.
|
||||
* It also selects merges to do for optimize(). (The default is
|
||||
* {@link LogByteSizeMergePolicy}.
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setMergePolicy(MergePolicy mergePolicy) {
|
||||
this.mergePolicy = mergePolicy == null ? new LogByteSizeMergePolicy() : mergePolicy;
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Set the CodecProvider. See {@link CodecProvider}. */
|
||||
/** Set the CodecProvider. See {@link CodecProvider}.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setCodecProvider(CodecProvider codecProvider) {
|
||||
this.codecProvider = codecProvider;
|
||||
return this;
|
||||
|
@ -507,7 +534,8 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* at once in IndexWriter. Values < 1 are invalid and if passed
|
||||
* <code>maxThreadStates</code> will be set to
|
||||
* {@link #DEFAULT_MAX_THREAD_STATES}.
|
||||
*/
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setMaxThreadStates(int maxThreadStates) {
|
||||
this.maxThreadStates = maxThreadStates < 1 ? DEFAULT_MAX_THREAD_STATES : maxThreadStates;
|
||||
return this;
|
||||
|
@ -526,7 +554,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* This method lets you enable pooling without getting a
|
||||
* near-real-time reader. NOTE: if you set this to
|
||||
* false, IndexWriter will still pool readers once
|
||||
* {@link IndexWriter#getReader} is called. */
|
||||
* {@link IndexWriter#getReader} is called.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
public IndexWriterConfig setReaderPooling(boolean readerPooling) {
|
||||
this.readerPooling = readerPooling;
|
||||
return this;
|
||||
|
@ -538,7 +568,9 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
return readerPooling;
|
||||
}
|
||||
|
||||
/** Expert: sets the {@link DocConsumer} chain to be used to process documents. */
|
||||
/** Expert: sets the {@link DocConsumer} chain to be used to process documents.
|
||||
*
|
||||
* <p>Only takes effect when IndexWriter is first created. */
|
||||
IndexWriterConfig setIndexingChain(IndexingChain indexingChain) {
|
||||
this.indexingChain = indexingChain == null ? DocumentsWriter.defaultIndexingChain : indexingChain;
|
||||
return this;
|
||||
|
@ -555,7 +587,10 @@ public final class IndexWriterConfig implements Cloneable {
|
|||
* IndexWriter#getReader}. If you pass -1, the terms index
|
||||
* won't be loaded by the readers. This is only useful in
|
||||
* advanced situations when you will only .next() through
|
||||
* all terms; attempts to seek will hit an exception. */
|
||||
* all terms; attempts to seek will hit an exception.
|
||||
*
|
||||
* <p>Takes effect immediately, but only applies to
|
||||
* readers opened after this call */
|
||||
public IndexWriterConfig setReaderTermsIndexDivisor(int divisor) {
|
||||
if (divisor <= 0 && divisor != -1) {
|
||||
throw new IllegalArgumentException("divisor must be >= 1, or -1 (got " + divisor + ")");
|
||||
|
|
|
@ -685,6 +685,122 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// Make sure it's OK to change RAM buffer size and
|
||||
// maxBufferedDocs in a write session
|
||||
public void testChangingRAMBuffer() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
writer.getConfig().setMaxBufferedDocs(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
|
||||
int lastFlushCount = -1;
|
||||
for(int j=1;j<52;j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
if (j == 1)
|
||||
lastFlushCount = flushCount;
|
||||
else if (j < 10)
|
||||
// No new files should be created
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
else if (10 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
writer.getConfig().setRAMBufferSizeMB(0.000001);
|
||||
writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (j < 20) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (20 == j) {
|
||||
writer.getConfig().setRAMBufferSizeMB(16);
|
||||
writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 30) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (30 == j) {
|
||||
writer.getConfig().setRAMBufferSizeMB(0.000001);
|
||||
writer.getConfig().setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (j < 40) {
|
||||
assertTrue(flushCount> lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (40 == j) {
|
||||
writer.getConfig().setMaxBufferedDocs(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 50) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
writer.getConfig().setMaxBufferedDocs(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (50 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
}
|
||||
}
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testChangingRAMBuffer2() throws IOException {
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
writer.getConfig().setMaxBufferedDocs(10);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
|
||||
for(int j=1;j<52;j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
int lastFlushCount = -1;
|
||||
for(int j=1;j<52;j++) {
|
||||
writer.deleteDocuments(new Term("field", "aaa" + j));
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
if (j == 1)
|
||||
lastFlushCount = flushCount;
|
||||
else if (j < 10) {
|
||||
// No new files should be created
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (10 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
writer.getConfig().setRAMBufferSizeMB(0.000001);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(1);
|
||||
} else if (j < 20) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (20 == j) {
|
||||
writer.getConfig().setRAMBufferSizeMB(16);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 30) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (30 == j) {
|
||||
writer.getConfig().setRAMBufferSizeMB(0.000001);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(1);
|
||||
} else if (j < 40) {
|
||||
assertTrue(flushCount> lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (40 == j) {
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 50) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
writer.getConfig().setMaxBufferedDeleteTerms(10);
|
||||
writer.getConfig().setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (50 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
}
|
||||
}
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDiverseDocs() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
|
||||
|
|
Loading…
Reference in New Issue