LUCENE-1075: fix possible thread hazard with IndexWriter.close(false)

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@601114 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2007-12-04 22:19:08 +00:00
parent 5f73506dd3
commit c23cb457ef
3 changed files with 30 additions and 18 deletions

View File

@ -2602,11 +2602,8 @@ public class IndexWriter {
// file that current segments does not reference), we
// abort this merge
if (merge.isAborted()) {
if (infoStream != null) {
if (merge.isAborted())
message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
}
if (infoStream != null)
message("commitMerge: skipping merge " + merge.segString(directory) + ": it was aborted");
assert merge.increfDone;
decrefMergeSegments(merge);
@ -2866,9 +2863,8 @@ public class IndexWriter {
* the synchronized lock on IndexWriter instance. */
final synchronized void mergeInit(MergePolicy.OneMerge merge) throws IOException {
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
if (merge.isAborted())
throw new IOException("merge is aborted");
assert merge.registerDone;
@ -2982,6 +2978,10 @@ public class IndexWriter {
merge.increfDone = true;
merge.mergeDocStores = mergeDocStores;
// Bind a new segment name here so even with
// ConcurrentMergePolicy we keep deterministic segment
// names.
merge.info = new SegmentInfo(newSegmentName(), 0,
directory, false, true,
docStoreOffset,
@ -3033,6 +3033,7 @@ public class IndexWriter {
try {
int totDocCount = 0;
for (int i = 0; i < numSegments; i++) {
SegmentInfo si = sourceSegmentsClone.info(i);
IndexReader reader = SegmentReader.get(si, MERGE_READ_BUFFER_SIZE, merge.mergeDocStores); // no need to set deleter (yet)
@ -3043,6 +3044,9 @@ public class IndexWriter {
message("merge: total "+totDocCount+" docs");
}
if (merge.isAborted())
throw new IOException("merge is aborted");
mergedDocCount = merge.info.docCount = merger.merge(merge.mergeDocStores);
assert mergedDocCount == totDocCount;

View File

@ -19,19 +19,14 @@ package org.apache.lucene.index;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.util._TestUtil;
import org.apache.lucene.util.English;
import org.apache.lucene.util.LuceneTestCase;
import java.io.IOException;
import java.io.File;
public class TestConcurrentMergeScheduler extends LuceneTestCase {
@ -193,6 +188,7 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler();
writer.setMergeScheduler(cms);
writer.setMaxBufferedDocs(2);
writer.setMergeFactor(100);
for(int j=0;j<201;j++) {
idField.setValue(Integer.toString(iter*201+j));
@ -205,10 +201,16 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
delID += 5;
}
// Force a bunch of merge threads to kick off so we
// stress out aborting them on close:
writer.setMergeFactor(3);
writer.addDocument(doc);
writer.flush();
writer.close(false);
IndexReader reader = IndexReader.open(directory);
assertEquals((1+iter)*181, reader.numDocs());
assertEquals((1+iter)*182, reader.numDocs());
reader.close();
// Reopen

View File

@ -146,11 +146,17 @@ public class MockRAMDirectory extends RAMDirectory {
RAMFile file = new RAMFile(this);
synchronized (this) {
RAMFile existing = (RAMFile)fileMap.get(name);
if (existing!=null) {
sizeInBytes -= existing.sizeInBytes;
existing.directory = null;
// Enforce write once:
if (existing!=null && !name.equals("segments.gen"))
throw new IOException("file " + name + " already exists");
else {
if (existing!=null) {
sizeInBytes -= existing.sizeInBytes;
existing.directory = null;
}
fileMap.put(name, file);
}
fileMap.put(name, file);
}
return new MockRAMOutputStream(this, file);