LUCENE-4653: make test more evil; fix leak on exception in IW.getReader

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1428432 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2013-01-03 16:11:31 +00:00
parent 325c3678ea
commit 85a3d34674
2 changed files with 66 additions and 40 deletions

View File

@ -336,7 +336,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
// obtained during this flush are pooled, the first time // obtained during this flush are pooled, the first time
// this method is called: // this method is called:
poolReaders = true; poolReaders = true;
final DirectoryReader r; DirectoryReader r = null;
doBeforeFlush(); doBeforeFlush();
boolean anySegmentFlushed = false; boolean anySegmentFlushed = false;
/* /*
@ -346,46 +346,54 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
* We release the two stage full flush after we are done opening the * We release the two stage full flush after we are done opening the
* directory reader! * directory reader!
*/ */
synchronized (fullFlushLock) { boolean success2 = false;
boolean success = false; try {
try { synchronized (fullFlushLock) {
anySegmentFlushed = docWriter.flushAllThreads(); boolean success = false;
if (!anySegmentFlushed) { try {
// prevent double increment since docWriter#doFlush increments the flushcount anySegmentFlushed = docWriter.flushAllThreads();
// if we flushed anything. if (!anySegmentFlushed) {
flushCount.incrementAndGet(); // prevent double increment since docWriter#doFlush increments the flushcount
} // if we flushed anything.
success = true; flushCount.incrementAndGet();
// Prevent segmentInfos from changing while opening the
// reader; in theory we could do similar retry logic,
// just like we do when loading segments_N
synchronized(this) {
maybeApplyDeletes(applyAllDeletes);
r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes);
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
} }
} success = true;
} catch (OutOfMemoryError oom) { // Prevent segmentInfos from changing while opening the
handleOOM(oom, "getReader"); // reader; in theory we could do similar retry logic,
// never reached but javac disagrees: // just like we do when loading segments_N
return null; synchronized(this) {
} finally { maybeApplyDeletes(applyAllDeletes);
if (!success) { r = StandardDirectoryReader.open(this, segmentInfos, applyAllDeletes);
if (infoStream.isEnabled("IW")) { if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during NRT reader"); infoStream.message("IW", "return reader version=" + r.getVersion() + " reader=" + r);
}
} }
} catch (OutOfMemoryError oom) {
handleOOM(oom, "getReader");
// never reached but javac disagrees:
return null;
} finally {
if (!success) {
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "hit exception during NRT reader");
}
}
// Done: finish the full flush!
docWriter.finishFullFlush(success);
doAfterFlush();
} }
// Done: finish the full flush!
docWriter.finishFullFlush(success);
doAfterFlush();
} }
} if (anySegmentFlushed) {
if (anySegmentFlushed) { maybeMerge(MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS);
maybeMerge(MergeTrigger.FULL_FLUSH, UNBOUNDED_MAX_MERGE_SEGMENTS); }
} if (infoStream.isEnabled("IW")) {
if (infoStream.isEnabled("IW")) { infoStream.message("IW", "getReader took " + (System.currentTimeMillis() - tStart) + " msec");
infoStream.message("IW", "getReader took " + (System.currentTimeMillis() - tStart) + " msec"); }
success2 = true;
} finally {
if (!success2) {
IOUtils.closeWhileHandlingException(r);
}
} }
return r; return r;
} }

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index;
* limitations under the License. * limitations under the License.
*/ */
import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
import java.io.StringReader; import java.io.StringReader;
@ -1028,14 +1027,33 @@ public class TestIndexWriter extends LuceneTestCase {
doc.add(newField("field", "some text contents", storedTextType)); doc.add(newField("field", "some text contents", storedTextType));
for(int i=0;i<100;i++) { for(int i=0;i<100;i++) {
idField.setStringValue(Integer.toString(i)); idField.setStringValue(Integer.toString(i));
if (i%2 == 0) { if (i%30 == 0) {
w.deleteAll();
} else if (i%2 == 0) {
w.updateDocument(new Term("id", idField.stringValue()), doc); w.updateDocument(new Term("id", idField.stringValue()), doc);
} else { } else {
w.addDocument(doc); w.addDocument(doc);
} }
if (i%3 == 0) {
IndexReader r = null;
boolean success = false;
try {
r = DirectoryReader.open(w, true);
success = true;
} finally {
if (success) {
r.close();
} else {
IOUtils.closeWhileHandlingException(r);
}
}
}
if (i%10 == 0) { if (i%10 == 0) {
w.commit(); w.commit();
} }
if (i%40 == 0) {
w.forceMerge(1);
}
} }
w.close(); w.close();
w = null; w = null;
@ -1120,7 +1138,7 @@ public class TestIndexWriter extends LuceneTestCase {
assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException); assertTrue(new ThreadInterruptedException(new InterruptedException()).getCause() instanceof InterruptedException);
// issue 300 interrupts to child thread // issue 300 interrupts to child thread
final int numInterrupts = atLeast(300); final int numInterrupts = atLeast(3000);
int i = 0; int i = 0;
while(i < numInterrupts) { while(i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the // TODO: would be nice to also sometimes interrupt the