LUCENE-1270: fix intermittant case where IW.close() can hang after IW.addIndexesNoOptimize is called

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@651026 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-04-23 19:37:21 +00:00
parent e35d9f6c62
commit 963ec9e522
2 changed files with 62 additions and 6 deletions

View File

@ -1658,6 +1658,11 @@ public class IndexWriter {
// going to wait for merges:
flush(waitForMerges, true, true);
if (waitForMerges)
// Give merge scheduler last chance to run, in case
// any pending merges are waiting:
mergeScheduler.merge(this);
mergePolicy.close();
finishMerges(waitForMerges);
@ -2889,6 +2894,9 @@ public class IndexWriter {
* then copy them over. Currently this is only used by
* addIndexesNoOptimize(). */
private void copyExternalSegments() throws CorruptIndexException, IOException {
boolean any = false;
while(true) {
SegmentInfo info = null;
MergePolicy.OneMerge merge = null;
@ -2907,6 +2915,7 @@ public class IndexWriter {
if (registerMerge(merge)) {
pendingMerges.remove(merge);
runningMerges.add(merge);
any = true;
merge(merge);
} else
// This means there is a bug in the
@ -2923,6 +2932,11 @@ public class IndexWriter {
// No more external segments
break;
}
if (any)
// Sometimes, on copying an external segment over,
// more merges may become necessary:
mergeScheduler.merge(this);
}
/** Merges the provided indexes into this index.

View File

@ -26,6 +26,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MockRAMDirectory;
import org.apache.lucene.search.PhraseQuery;
@ -432,8 +433,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
private void addDocs(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc
.add(new Field("content", "aaa", Field.Store.NO,
doc.add(new Field("content", "aaa", Field.Store.NO,
Field.Index.TOKENIZED));
writer.addDocument(doc);
}
@ -442,8 +442,7 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
private void addDocs2(IndexWriter writer, int numDocs) throws IOException {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc
.add(new Field("content", "bbb", Field.Store.NO,
doc.add(new Field("content", "bbb", Field.Store.NO,
Field.Index.TOKENIZED));
writer.addDocument(doc);
}
@ -495,4 +494,47 @@ public class TestAddIndexesNoOptimize extends LuceneTestCase {
assertEquals(3, writer.getSegmentCount());
writer.close();
}
// LUCENE-1270
public void testHangOnClose() throws IOException {
Directory dir = new MockRAMDirectory();
IndexWriter writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
writer.setMergePolicy(new LogByteSizeMergePolicy());
writer.setMaxBufferedDocs(5);
writer.setUseCompoundFile(false);
writer.setMergeFactor(100);
Document doc = new Document();
doc.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
for(int i=0;i<60;i++)
writer.addDocument(doc);
writer.setMaxBufferedDocs(200);
Document doc2 = new Document();
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
doc2.add(new Field("content", "aaa bbb ccc ddd eee fff ggg hhh iii", Field.Store.YES,
Field.Index.NO));
for(int i=0;i<10;i++)
writer.addDocument(doc2);
writer.close();
Directory dir2 = new MockRAMDirectory();
writer = new IndexWriter(dir2, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
LogByteSizeMergePolicy lmp = new LogByteSizeMergePolicy();
lmp.setMinMergeMB(0.0001);
writer.setMergePolicy(lmp);
writer.setMergeFactor(4);
writer.setUseCompoundFile(false);
writer.setMergeScheduler(new SerialMergeScheduler());
writer.addIndexesNoOptimize(new Directory[] {dir});
writer.close();
dir.close();
dir2.close();
}
}