mirror of https://github.com/apache/lucene.git
LUCENE-1763: require IndexWriter be passed up front to the MergePolicy
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@799818 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
bbcab117d9
commit
175e8b546d
|
@ -80,6 +80,11 @@ Changes in backwards compatibility policy
|
||||||
methods in these TokenStreams/-Filters were made final.
|
methods in these TokenStreams/-Filters were made final.
|
||||||
(Michael Busch, Uwe Schindler)
|
(Michael Busch, Uwe Schindler)
|
||||||
|
|
||||||
|
5. LUCENE-1763: MergePolicy now requires an IndexWriter instance to
|
||||||
|
be passed upon instantiation. As a result, IndexWriter was removed
|
||||||
|
as a method argument from all MergePolicy methods. (Shai Erera via
|
||||||
|
Mike McCandless)
|
||||||
|
|
||||||
Changes in runtime behavior
|
Changes in runtime behavior
|
||||||
|
|
||||||
1. LUCENE-1424: QueryParser now by default uses constant score auto
|
1. LUCENE-1424: QueryParser now by default uses constant score auto
|
||||||
|
|
|
@ -42,7 +42,7 @@
|
||||||
<property name="Name" value="Lucene"/>
|
<property name="Name" value="Lucene"/>
|
||||||
<property name="dev.version" value="2.9-dev"/>
|
<property name="dev.version" value="2.9-dev"/>
|
||||||
<property name="version" value="${dev.version}"/>
|
<property name="version" value="${dev.version}"/>
|
||||||
<property name="compatibility.tag" value="lucene_2_4_back_compat_tests_20090729"/>
|
<property name="compatibility.tag" value="lucene_2_4_back_compat_tests_20090801"/>
|
||||||
<property name="spec.version" value="${version}"/>
|
<property name="spec.version" value="${version}"/>
|
||||||
<property name="year" value="2000-${current.year}"/>
|
<property name="year" value="2000-${current.year}"/>
|
||||||
<property name="final.name" value="lucene-${name}-${version}"/>
|
<property name="final.name" value="lucene-${name}-${version}"/>
|
||||||
|
|
|
@ -60,38 +60,19 @@ public class CreateIndexTask extends PerfTask {
|
||||||
|
|
||||||
final String mergeScheduler = config.get("merge.scheduler",
|
final String mergeScheduler = config.get("merge.scheduler",
|
||||||
"org.apache.lucene.index.ConcurrentMergeScheduler");
|
"org.apache.lucene.index.ConcurrentMergeScheduler");
|
||||||
RuntimeException err = null;
|
|
||||||
try {
|
try {
|
||||||
writer.setMergeScheduler((MergeScheduler) Class.forName(mergeScheduler).newInstance());
|
writer.setMergeScheduler((MergeScheduler) Class.forName(mergeScheduler).newInstance());
|
||||||
} catch (IllegalAccessException iae) {
|
} catch (Exception e) {
|
||||||
err = new RuntimeException("unable to instantiate class '" + mergeScheduler + "' as merge scheduler");
|
throw new RuntimeException("unable to instantiate class '" + mergeScheduler + "' as merge scheduler", e);
|
||||||
err.initCause(iae);
|
|
||||||
} catch (InstantiationException ie) {
|
|
||||||
err = new RuntimeException("unable to instantiate class '" + mergeScheduler + "' as merge scheduler");
|
|
||||||
err.initCause(ie);
|
|
||||||
} catch (ClassNotFoundException cnfe) {
|
|
||||||
err = new RuntimeException("unable to load class '" + mergeScheduler + "' as merge scheduler");
|
|
||||||
err.initCause(cnfe);
|
|
||||||
}
|
}
|
||||||
if (err != null)
|
|
||||||
throw err;
|
|
||||||
|
|
||||||
final String mergePolicy = config.get("merge.policy",
|
final String mergePolicy = config.get("merge.policy",
|
||||||
"org.apache.lucene.index.LogByteSizeMergePolicy");
|
"org.apache.lucene.index.LogByteSizeMergePolicy");
|
||||||
try {
|
try {
|
||||||
writer.setMergePolicy((MergePolicy) Class.forName(mergePolicy).newInstance());
|
writer.setMergePolicy((MergePolicy) Class.forName(mergePolicy).getConstructor(new Class[] { IndexWriter.class }).newInstance(new Object[] { writer }));
|
||||||
} catch (IllegalAccessException iae) {
|
} catch (Exception e) {
|
||||||
err = new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy");
|
throw new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy", e);
|
||||||
err.initCause(iae);
|
|
||||||
} catch (InstantiationException ie) {
|
|
||||||
err = new RuntimeException("unable to instantiate class '" + mergePolicy + "' as merge policy");
|
|
||||||
err.initCause(ie);
|
|
||||||
} catch (ClassNotFoundException cnfe) {
|
|
||||||
err = new RuntimeException("unable to load class '" + mergePolicy + "' as merge policy");
|
|
||||||
err.initCause(cnfe);
|
|
||||||
}
|
}
|
||||||
if (err != null)
|
|
||||||
throw err;
|
|
||||||
|
|
||||||
writer.setUseCompoundFile(config.get("compound",true));
|
writer.setUseCompoundFile(config.get("compound",true));
|
||||||
writer.setMergeFactor(config.get("merge.factor",OpenIndexTask.DEFAULT_MERGE_PFACTOR));
|
writer.setMergeFactor(config.get("merge.factor",OpenIndexTask.DEFAULT_MERGE_PFACTOR));
|
||||||
|
|
|
@ -612,8 +612,8 @@ public class TestPerfTasksLogic extends TestCase {
|
||||||
|
|
||||||
public static class MyMergePolicy extends LogDocMergePolicy {
|
public static class MyMergePolicy extends LogDocMergePolicy {
|
||||||
boolean called;
|
boolean called;
|
||||||
public MyMergePolicy() {
|
public MyMergePolicy(IndexWriter writer) {
|
||||||
super();
|
super(writer);
|
||||||
called = true;
|
called = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -354,7 +354,7 @@ public class IndexWriter {
|
||||||
// merges
|
// merges
|
||||||
private HashSet mergingSegments = new HashSet();
|
private HashSet mergingSegments = new HashSet();
|
||||||
|
|
||||||
private MergePolicy mergePolicy = new LogByteSizeMergePolicy();
|
private MergePolicy mergePolicy = new LogByteSizeMergePolicy(this);
|
||||||
private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
|
private MergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
|
||||||
private LinkedList pendingMerges = new LinkedList();
|
private LinkedList pendingMerges = new LinkedList();
|
||||||
private Set runningMerges = new HashSet();
|
private Set runningMerges = new HashSet();
|
||||||
|
@ -2899,7 +2899,7 @@ public class IndexWriter {
|
||||||
MergePolicy.MergeSpecification spec;
|
MergePolicy.MergeSpecification spec;
|
||||||
|
|
||||||
synchronized(this) {
|
synchronized(this) {
|
||||||
spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos, this);
|
spec = mergePolicy.findMergesToExpungeDeletes(segmentInfos);
|
||||||
if (spec != null) {
|
if (spec != null) {
|
||||||
final int numMerges = spec.merges.size();
|
final int numMerges = spec.merges.size();
|
||||||
for(int i=0;i<numMerges;i++)
|
for(int i=0;i<numMerges;i++)
|
||||||
|
@ -3014,7 +3014,7 @@ public class IndexWriter {
|
||||||
|
|
||||||
final MergePolicy.MergeSpecification spec;
|
final MergePolicy.MergeSpecification spec;
|
||||||
if (optimize) {
|
if (optimize) {
|
||||||
spec = mergePolicy.findMergesForOptimize(segmentInfos, this, maxNumSegmentsOptimize, segmentsToOptimize);
|
spec = mergePolicy.findMergesForOptimize(segmentInfos, maxNumSegmentsOptimize, segmentsToOptimize);
|
||||||
|
|
||||||
if (spec != null) {
|
if (spec != null) {
|
||||||
final int numMerges = spec.merges.size();
|
final int numMerges = spec.merges.size();
|
||||||
|
@ -3026,7 +3026,7 @@ public class IndexWriter {
|
||||||
}
|
}
|
||||||
|
|
||||||
} else
|
} else
|
||||||
spec = mergePolicy.findMerges(segmentInfos, this);
|
spec = mergePolicy.findMerges(segmentInfos);
|
||||||
|
|
||||||
if (spec != null) {
|
if (spec != null) {
|
||||||
final int numMerges = spec.merges.size();
|
final int numMerges = spec.merges.size();
|
||||||
|
|
|
@ -28,10 +28,10 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
|
||||||
|
|
||||||
/** Default maximum segment size. A segment of this size
|
/** Default maximum segment size. A segment of this size
|
||||||
* or larger will never be merged. @see setMaxMergeMB */
|
* or larger will never be merged. @see setMaxMergeMB */
|
||||||
public static final double DEFAULT_MAX_MERGE_MB = (double) Long.MAX_VALUE;
|
public static final double DEFAULT_MAX_MERGE_MB = Long.MAX_VALUE;
|
||||||
|
|
||||||
public LogByteSizeMergePolicy() {
|
public LogByteSizeMergePolicy(IndexWriter writer) {
|
||||||
super();
|
super(writer);
|
||||||
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
|
minMergeSize = (long) (DEFAULT_MIN_MERGE_MB*1024*1024);
|
||||||
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
|
maxMergeSize = (long) (DEFAULT_MAX_MERGE_MB*1024*1024);
|
||||||
}
|
}
|
||||||
|
@ -82,4 +82,3 @@ public class LogByteSizeMergePolicy extends LogMergePolicy {
|
||||||
return ((double) minMergeSize)/1024/1024;
|
return ((double) minMergeSize)/1024/1024;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,8 @@ public class LogDocMergePolicy extends LogMergePolicy {
|
||||||
/** Default minimum segment size. @see setMinMergeDocs */
|
/** Default minimum segment size. @see setMinMergeDocs */
|
||||||
public static final int DEFAULT_MIN_MERGE_DOCS = 1000;
|
public static final int DEFAULT_MIN_MERGE_DOCS = 1000;
|
||||||
|
|
||||||
public LogDocMergePolicy() {
|
public LogDocMergePolicy(IndexWriter writer) {
|
||||||
super();
|
super(writer);
|
||||||
minMergeSize = DEFAULT_MIN_MERGE_DOCS;
|
minMergeSize = DEFAULT_MIN_MERGE_DOCS;
|
||||||
|
|
||||||
// maxMergeSize is never used by LogDocMergePolicy; set
|
// maxMergeSize is never used by LogDocMergePolicy; set
|
||||||
|
@ -60,4 +60,3 @@ public class LogDocMergePolicy extends LogMergePolicy {
|
||||||
return (int) minMergeSize;
|
return (int) minMergeSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,8 +65,11 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
|
|
||||||
private boolean useCompoundFile = true;
|
private boolean useCompoundFile = true;
|
||||||
private boolean useCompoundDocStore = true;
|
private boolean useCompoundDocStore = true;
|
||||||
protected IndexWriter writer;
|
|
||||||
|
|
||||||
|
public LogMergePolicy(IndexWriter writer) {
|
||||||
|
super(writer);
|
||||||
|
}
|
||||||
|
|
||||||
protected boolean verbose() {
|
protected boolean verbose() {
|
||||||
return writer != null && writer.verbose();
|
return writer != null && writer.verbose();
|
||||||
}
|
}
|
||||||
|
@ -166,13 +169,13 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
if (calibrateSizeByDeletes) {
|
if (calibrateSizeByDeletes) {
|
||||||
int delCount = writer.numDeletedDocs(info);
|
int delCount = writer.numDeletedDocs(info);
|
||||||
float delRatio = (info.docCount <= 0 ? 0.0f : ((float)delCount / (float)info.docCount));
|
float delRatio = (info.docCount <= 0 ? 0.0f : ((float)delCount / (float)info.docCount));
|
||||||
return (info.docCount <= 0 ? byteSize : (long)((float)byteSize * (1.0f - delRatio)));
|
return (info.docCount <= 0 ? byteSize : (long)(byteSize * (1.0f - delRatio)));
|
||||||
} else {
|
} else {
|
||||||
return byteSize;
|
return byteSize;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isOptimized(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Set segmentsToOptimize) throws IOException {
|
private boolean isOptimized(SegmentInfos infos, int maxNumSegments, Set segmentsToOptimize) throws IOException {
|
||||||
final int numSegments = infos.size();
|
final int numSegments = infos.size();
|
||||||
int numToOptimize = 0;
|
int numToOptimize = 0;
|
||||||
SegmentInfo optimizeInfo = null;
|
SegmentInfo optimizeInfo = null;
|
||||||
|
@ -185,13 +188,13 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
return numToOptimize <= maxNumSegments &&
|
return numToOptimize <= maxNumSegments &&
|
||||||
(numToOptimize != 1 || isOptimized(writer, optimizeInfo));
|
(numToOptimize != 1 || isOptimized(optimizeInfo));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Returns true if this single info is optimized (has no
|
/** Returns true if this single info is optimized (has no
|
||||||
* pending norms or deletes, is in the same dir as the
|
* pending norms or deletes, is in the same dir as the
|
||||||
* writer, and matches the current compound file setting */
|
* writer, and matches the current compound file setting */
|
||||||
private boolean isOptimized(IndexWriter writer, SegmentInfo info)
|
private boolean isOptimized(SegmentInfo info)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
boolean hasDeletions = writer.numDeletedDocs(info) > 0;
|
boolean hasDeletions = writer.numDeletedDocs(info) > 0;
|
||||||
return !hasDeletions &&
|
return !hasDeletions &&
|
||||||
|
@ -208,12 +211,13 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
* setting is true. This method returns multiple merges
|
* setting is true. This method returns multiple merges
|
||||||
* (mergeFactor at a time) so the {@link MergeScheduler}
|
* (mergeFactor at a time) so the {@link MergeScheduler}
|
||||||
* in use may make use of concurrency. */
|
* in use may make use of concurrency. */
|
||||||
public MergeSpecification findMergesForOptimize(SegmentInfos infos, IndexWriter writer, int maxNumSegments, Set segmentsToOptimize) throws IOException {
|
public MergeSpecification findMergesForOptimize(SegmentInfos infos,
|
||||||
|
int maxNumSegments, Set segmentsToOptimize) throws IOException {
|
||||||
MergeSpecification spec;
|
MergeSpecification spec;
|
||||||
|
|
||||||
assert maxNumSegments > 0;
|
assert maxNumSegments > 0;
|
||||||
|
|
||||||
if (!isOptimized(infos, writer, maxNumSegments, segmentsToOptimize)) {
|
if (!isOptimized(infos, maxNumSegments, segmentsToOptimize)) {
|
||||||
|
|
||||||
// Find the newest (rightmost) segment that needs to
|
// Find the newest (rightmost) segment that needs to
|
||||||
// be optimized (other segments may have been flushed
|
// be optimized (other segments may have been flushed
|
||||||
|
@ -245,7 +249,7 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
|
|
||||||
// Since we must optimize down to 1 segment, the
|
// Since we must optimize down to 1 segment, the
|
||||||
// choice is simple:
|
// choice is simple:
|
||||||
if (last > 1 || !isOptimized(writer, infos.info(0)))
|
if (last > 1 || !isOptimized(infos.info(0)))
|
||||||
spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
|
spec.add(new OneMerge(infos.range(0, last), useCompoundFile));
|
||||||
} else if (last > maxNumSegments) {
|
} else if (last > maxNumSegments) {
|
||||||
|
|
||||||
|
@ -291,12 +295,8 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
* index. We simply merge adjacent segments that have
|
* index. We simply merge adjacent segments that have
|
||||||
* deletes, up to mergeFactor at a time.
|
* deletes, up to mergeFactor at a time.
|
||||||
*/
|
*/
|
||||||
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos,
|
public MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos)
|
||||||
IndexWriter writer)
|
throws CorruptIndexException, IOException {
|
||||||
throws CorruptIndexException, IOException
|
|
||||||
{
|
|
||||||
this.writer = writer;
|
|
||||||
|
|
||||||
final int numSegments = segmentInfos.size();
|
final int numSegments = segmentInfos.size();
|
||||||
|
|
||||||
if (verbose())
|
if (verbose())
|
||||||
|
@ -347,10 +347,9 @@ public abstract class LogMergePolicy extends MergePolicy {
|
||||||
* multiple levels have too many segments, this method
|
* multiple levels have too many segments, this method
|
||||||
* will return multiple merges, allowing the {@link
|
* will return multiple merges, allowing the {@link
|
||||||
* MergeScheduler} to use concurrency. */
|
* MergeScheduler} to use concurrency. */
|
||||||
public MergeSpecification findMerges(SegmentInfos infos, IndexWriter writer) throws IOException {
|
public MergeSpecification findMerges(SegmentInfos infos) throws IOException {
|
||||||
|
|
||||||
final int numSegments = infos.size();
|
final int numSegments = infos.size();
|
||||||
this.writer = writer;
|
|
||||||
if (verbose())
|
if (verbose())
|
||||||
message("findMerges: " + numSegments + " segments");
|
message("findMerges: " + numSegments + " segments");
|
||||||
|
|
||||||
|
|
|
@ -206,68 +206,69 @@ public abstract class MergePolicy {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
final protected IndexWriter writer;
|
||||||
* Determine what set of merge operations are now
|
|
||||||
* necessary on the index. The IndexWriter calls this
|
public MergePolicy(IndexWriter writer) {
|
||||||
* whenever there is a change to the segments. This call
|
this.writer = writer;
|
||||||
* is always synchronized on the IndexWriter instance so
|
|
||||||
* only one thread at a time will call this method.
|
|
||||||
*
|
|
||||||
* @param segmentInfos the total set of segments in the index
|
|
||||||
* @param writer IndexWriter instance
|
|
||||||
*/
|
|
||||||
abstract MergeSpecification findMerges(SegmentInfos segmentInfos,
|
|
||||||
IndexWriter writer)
|
|
||||||
throws CorruptIndexException, IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determine what set of merge operations is necessary in
|
|
||||||
* order to optimize the index. The IndexWriter calls
|
|
||||||
* this when its optimize() method is called. This call
|
|
||||||
* is always synchronized on the IndexWriter instance so
|
|
||||||
* only one thread at a time will call this method.
|
|
||||||
*
|
|
||||||
* @param segmentInfos the total set of segments in the index
|
|
||||||
* @param writer IndexWriter instance
|
|
||||||
* @param maxSegmentCount requested maximum number of
|
|
||||||
* segments in the index (currently this is always 1)
|
|
||||||
* @param segmentsToOptimize contains the specific
|
|
||||||
* SegmentInfo instances that must be merged away. This
|
|
||||||
* may be a subset of all SegmentInfos.
|
|
||||||
*/
|
|
||||||
abstract MergeSpecification findMergesForOptimize(SegmentInfos segmentInfos,
|
|
||||||
IndexWriter writer,
|
|
||||||
int maxSegmentCount,
|
|
||||||
Set segmentsToOptimize)
|
|
||||||
throws CorruptIndexException, IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determine what set of merge operations is necessary in
|
|
||||||
* order to expunge all deletes from the index.
|
|
||||||
* @param segmentInfos the total set of segments in the index
|
|
||||||
* @param writer IndexWriter instance
|
|
||||||
*/
|
|
||||||
MergeSpecification findMergesToExpungeDeletes(SegmentInfos segmentInfos,
|
|
||||||
IndexWriter writer)
|
|
||||||
throws CorruptIndexException, IOException
|
|
||||||
{
|
|
||||||
throw new RuntimeException("not implemented");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine what set of merge operations are now necessary on the index.
|
||||||
|
* {@link IndexWriter} calls this whenever there is a change to the segments.
|
||||||
|
* This call is always synchronized on the {@link IndexWriter} instance so
|
||||||
|
* only one thread at a time will call this method.
|
||||||
|
*
|
||||||
|
* @param segmentInfos
|
||||||
|
* the total set of segments in the index
|
||||||
|
*/
|
||||||
|
public abstract MergeSpecification findMerges(SegmentInfos segmentInfos)
|
||||||
|
throws CorruptIndexException, IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine what set of merge operations is necessary in order to optimize
|
||||||
|
* the index. {@link IndexWriter} calls this when its
|
||||||
|
* {@link IndexWriter#optimize()} method is called. This call is always
|
||||||
|
* synchronized on the {@link IndexWriter} instance so only one thread at a
|
||||||
|
* time will call this method.
|
||||||
|
*
|
||||||
|
* @param segmentInfos
|
||||||
|
* the total set of segments in the index
|
||||||
|
* @param maxSegmentCount
|
||||||
|
* requested maximum number of segments in the index (currently this
|
||||||
|
* is always 1)
|
||||||
|
* @param segmentsToOptimize
|
||||||
|
* contains the specific SegmentInfo instances that must be merged
|
||||||
|
* away. This may be a subset of all SegmentInfos.
|
||||||
|
*/
|
||||||
|
public abstract MergeSpecification findMergesForOptimize(
|
||||||
|
SegmentInfos segmentInfos, int maxSegmentCount, Set segmentsToOptimize)
|
||||||
|
throws CorruptIndexException, IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine what set of merge operations is necessary in order to expunge all
|
||||||
|
* deletes from the index.
|
||||||
|
*
|
||||||
|
* @param segmentInfos
|
||||||
|
* the total set of segments in the index
|
||||||
|
*/
|
||||||
|
public abstract MergeSpecification findMergesToExpungeDeletes(
|
||||||
|
SegmentInfos segmentInfos) throws CorruptIndexException, IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Release all resources for the policy.
|
* Release all resources for the policy.
|
||||||
*/
|
*/
|
||||||
abstract void close();
|
public abstract void close();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if a newly flushed (not from merge)
|
* Returns true if a newly flushed (not from merge)
|
||||||
* segment should use the compound file format.
|
* segment should use the compound file format.
|
||||||
*/
|
*/
|
||||||
abstract boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
|
public abstract boolean useCompoundFile(SegmentInfos segments, SegmentInfo newSegment);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the doc store files should use the
|
* Returns true if the doc store files should use the
|
||||||
* compound file format.
|
* compound file format.
|
||||||
*/
|
*/
|
||||||
abstract boolean useCompoundDocStore(SegmentInfos segments);
|
public abstract boolean useCompoundDocStore(SegmentInfos segments);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -426,7 +426,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
||||||
private IndexWriter newWriter(Directory dir, boolean create)
|
private IndexWriter newWriter(Directory dir, boolean create)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create);
|
final IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), create);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
return writer;
|
return writer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,7 +500,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
||||||
|
|
||||||
Directory dir = new MockRAMDirectory();
|
Directory dir = new MockRAMDirectory();
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.setMergePolicy(new LogByteSizeMergePolicy());
|
writer.setMergePolicy(new LogByteSizeMergePolicy(writer));
|
||||||
writer.setMaxBufferedDocs(5);
|
writer.setMaxBufferedDocs(5);
|
||||||
writer.setUseCompoundFile(false);
|
writer.setUseCompoundFile(false);
|
||||||
writer.setMergeFactor(100);
|
writer.setMergeFactor(100);
|
||||||
|
@ -526,7 +526,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
|
||||||
|
|
||||||
Directory dir2 = new MockRAMDirectory();
|
Directory dir2 = new MockRAMDirectory();
|
||||||
writer = new IndexWriter(dir2, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
writer = new IndexWriter(dir2, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
|
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy(writer);
|
||||||
lmp.setMinMergeMB(0.0001);
|
lmp.setMinMergeMB(0.0001);
|
||||||
writer.setMergePolicy(lmp);
|
writer.setMergePolicy(lmp);
|
||||||
writer.setMergeFactor(4);
|
writer.setMergeFactor(4);
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
||||||
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
|
||||||
writer.setMergeScheduler(cms);
|
writer.setMergeScheduler(cms);
|
||||||
|
|
||||||
LogDocMergePolicy mp = new LogDocMergePolicy();
|
LogDocMergePolicy mp = new LogDocMergePolicy(writer);
|
||||||
writer.setMergePolicy(mp);
|
writer.setMergePolicy(mp);
|
||||||
|
|
||||||
// Force degenerate merging so we can get a mix of
|
// Force degenerate merging so we can get a mix of
|
||||||
|
|
|
@ -930,7 +930,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
||||||
IndexWriter.unlock(dir);
|
IndexWriter.unlock(dir);
|
||||||
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED);
|
||||||
|
|
||||||
w.setMergePolicy(new LogDocMergePolicy());
|
w.setMergePolicy(new LogDocMergePolicy(w));
|
||||||
|
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
w.addDocument(createDocument(i, 4));
|
w.addDocument(createDocument(i, 4));
|
||||||
|
|
|
@ -629,7 +629,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
|
|
||||||
for(int numDocs=38;numDocs<500;numDocs += 38) {
|
for(int numDocs=38;numDocs<500;numDocs += 38) {
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
LogDocMergePolicy ldmp = new LogDocMergePolicy();
|
LogDocMergePolicy ldmp = new LogDocMergePolicy(writer);
|
||||||
ldmp.setMinMergeDocs(1);
|
ldmp.setMinMergeDocs(1);
|
||||||
writer.setMergePolicy(ldmp);
|
writer.setMergePolicy(ldmp);
|
||||||
writer.setMergeFactor(5);
|
writer.setMergeFactor(5);
|
||||||
|
@ -666,7 +666,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
|
doc.add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED));
|
||||||
|
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
LogDocMergePolicy ldmp = new LogDocMergePolicy();
|
LogDocMergePolicy ldmp = new LogDocMergePolicy(writer);
|
||||||
ldmp.setMinMergeDocs(1);
|
ldmp.setMinMergeDocs(1);
|
||||||
writer.setMergePolicy(ldmp);
|
writer.setMergePolicy(ldmp);
|
||||||
writer.setMergeFactor(4);
|
writer.setMergeFactor(4);
|
||||||
|
@ -2813,7 +2813,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
writer.setMaxBufferedDocs(2);
|
writer.setMaxBufferedDocs(2);
|
||||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
Document document = new Document();
|
Document document = new Document();
|
||||||
|
|
||||||
|
@ -2846,7 +2846,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
writer.setMaxBufferedDocs(2);
|
writer.setMaxBufferedDocs(2);
|
||||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
Directory[] indexDirs = {new MockRAMDirectory(dir)};
|
Directory[] indexDirs = {new MockRAMDirectory(dir)};
|
||||||
writer.addIndexes(indexDirs);
|
writer.addIndexes(indexDirs);
|
||||||
|
@ -2865,7 +2865,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
writer.setMaxBufferedDocs(2);
|
writer.setMaxBufferedDocs(2);
|
||||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
Document document = new Document();
|
Document document = new Document();
|
||||||
|
|
||||||
|
@ -2903,7 +2903,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
writer.setMaxBufferedDocs(2);
|
writer.setMaxBufferedDocs(2);
|
||||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
Document document = new Document();
|
Document document = new Document();
|
||||||
|
|
||||||
|
@ -2925,7 +2925,7 @@ public class TestIndexWriter extends LuceneTestCase
|
||||||
writer.setMaxBufferedDocs(2);
|
writer.setMaxBufferedDocs(2);
|
||||||
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
writer.setRAMBufferSizeMB(IndexWriter.DISABLE_AUTO_FLUSH);
|
||||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
for(int i=0;i<6;i++)
|
for(int i=0;i<6;i++)
|
||||||
writer.addDocument(document);
|
writer.addDocument(document);
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(10);
|
writer.setMergeFactor(10);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
addDoc(writer);
|
addDoc(writer);
|
||||||
|
@ -54,7 +54,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(10);
|
writer.setMergeFactor(10);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
boolean noOverMerge = false;
|
boolean noOverMerge = false;
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
|
@ -76,7 +76,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(10);
|
writer.setMergeFactor(10);
|
||||||
LogDocMergePolicy mp = new LogDocMergePolicy();
|
LogDocMergePolicy mp = new LogDocMergePolicy(writer);
|
||||||
mp.setMinMergeDocs(100);
|
mp.setMinMergeDocs(100);
|
||||||
writer.setMergePolicy(mp);
|
writer.setMergePolicy(mp);
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(100);
|
writer.setMergeFactor(100);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
for (int i = 0; i < 250; i++) {
|
for (int i = 0; i < 250; i++) {
|
||||||
addDoc(writer);
|
addDoc(writer);
|
||||||
|
@ -128,7 +128,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
||||||
writer.setMaxBufferedDocs(101);
|
writer.setMaxBufferedDocs(101);
|
||||||
writer.setMergeFactor(101);
|
writer.setMergeFactor(101);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
|
|
||||||
// leftmost* segment has 1 doc
|
// leftmost* segment has 1 doc
|
||||||
// rightmost* segment has 100 docs
|
// rightmost* segment has 100 docs
|
||||||
|
@ -142,7 +142,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
||||||
writer.setMaxBufferedDocs(101);
|
writer.setMaxBufferedDocs(101);
|
||||||
writer.setMergeFactor(101);
|
writer.setMergeFactor(101);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
|
@ -168,7 +168,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
Directory dir = new RAMDirectory();
|
Directory dir = new RAMDirectory();
|
||||||
|
|
||||||
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
IndexWriter writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), true);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(100);
|
writer.setMergeFactor(100);
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
||||||
reader.close();
|
reader.close();
|
||||||
|
|
||||||
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
writer = new IndexWriter(dir, true, new WhitespaceAnalyzer(), false);
|
||||||
writer.setMergePolicy(new LogDocMergePolicy());
|
writer.setMergePolicy(new LogDocMergePolicy(writer));
|
||||||
writer.setMaxBufferedDocs(10);
|
writer.setMaxBufferedDocs(10);
|
||||||
writer.setMergeFactor(5);
|
writer.setMergeFactor(5);
|
||||||
|
|
||||||
|
|
|
@ -539,7 +539,7 @@ public class TestIndexWriterReader extends LuceneTestCase {
|
||||||
boolean multiSegment) throws IOException {
|
boolean multiSegment) throws IOException {
|
||||||
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(),
|
IndexWriter w = new IndexWriter(dir1, new WhitespaceAnalyzer(),
|
||||||
IndexWriter.MaxFieldLength.LIMITED);
|
IndexWriter.MaxFieldLength.LIMITED);
|
||||||
w.setMergePolicy(new LogDocMergePolicy());
|
w.setMergePolicy(new LogDocMergePolicy(w));
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
w.addDocument(createDocument(i, indexName, 4));
|
w.addDocument(createDocument(i, indexName, 4));
|
||||||
if (multiSegment && (i % 10) == 0) {
|
if (multiSegment && (i % 10) == 0) {
|
||||||
|
|
Loading…
Reference in New Issue