make sure bulk merging is working

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1062509 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-01-23 20:00:08 +00:00
parent 4af1d0e0be
commit 68dc071064
2 changed files with 22 additions and 1 deletions

View File

@ -270,6 +270,9 @@ public class IndexWriter implements Closeable {
// The PayloadProcessorProvider to use when segments are merged
private PayloadProcessorProvider payloadProcessorProvider;
// for testing
boolean anyNonBulkMerges;
/**
* Expert: returns a readonly reader, covering all
* committed as well as un-committed changes to the index.
@ -333,6 +336,8 @@ public class IndexWriter implements Closeable {
ensureOpen();
final long tStart = System.currentTimeMillis();
if (infoStream != null) {
message("flush at getReader");
}
@ -355,6 +360,9 @@ public class IndexWriter implements Closeable {
}
maybeMerge();
if (infoStream != null) {
message("getReader took " + (System.currentTimeMillis() - tStart) + " msec");
}
return r;
}
@ -3085,6 +3093,7 @@ public class IndexWriter implements Closeable {
message("merge segmentCodecs=" + merger.getSegmentCodecs());
message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + numSegments);
}
anyNonBulkMerges |= merger.getMatchedSubReaderCount() != numSegments;
assert mergedDocCount == totDocCount;

View File

@ -39,6 +39,7 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.NamedThreadFactory;
@ -132,6 +133,13 @@ public class TestNRTThreads extends LuceneTestCase {
if (doc == null) {
break;
}
final String addedField;
if (random.nextBoolean()) {
addedField = "extra" + random.nextInt(10);
doc.add(new Field(addedField, "a random field", Field.Store.NO, Field.Index.ANALYZED));
} else {
addedField = null;
}
if (random.nextBoolean()) {
if (VERBOSE) {
//System.out.println(Thread.currentThread().getName() + ": add doc id:" + doc.get("id"));
@ -166,6 +174,9 @@ public class TestNRTThreads extends LuceneTestCase {
toDeleteIDs.clear();
}
addCount.getAndIncrement();
if (addedField != null) {
doc.removeField(addedField);
}
} catch (Exception exc) {
System.out.println(Thread.currentThread().getName() + ": hit exc");
exc.printStackTrace();
@ -348,6 +359,7 @@ public class TestNRTThreads extends LuceneTestCase {
writer.commit();
assertEquals("index=" + writer.segString() + " addCount=" + addCount + " delCount=" + delCount, addCount.get() - delCount.get(), writer.numDocs());
assertFalse(writer.anyNonBulkMerges);
writer.close(false);
_TestUtil.checkIndex(dir);
dir.close();