LUCENE-1198: don't increment numDocsInRAM unless DocumentsWriter.ThreadState.init succeeds

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@632871 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2008-03-02 23:24:07 +00:00
parent 00fe257ddf
commit 3efa1c47ca
4 changed files with 71 additions and 15 deletions

View File

@ -722,6 +722,7 @@ final class DocumentsWriter {
void init(Document doc, int docID) throws IOException, AbortException { void init(Document doc, int docID) throws IOException, AbortException {
assert !isIdle; assert !isIdle;
assert writer.testPoint("DocumentsWriter.ThreadState.init start");
this.docID = docID; this.docID = docID;
docBoost = doc.getBoost(); docBoost = doc.getBoost();
@ -2440,18 +2441,6 @@ final class DocumentsWriter {
if (segment == null) if (segment == null)
segment = writer.newSegmentName(); segment = writer.newSegmentName();
numDocsInRAM++;
// We must at this point commit to flushing to ensure we
// always get N docs when we flush by doc count, even if
// > 1 thread is adding documents:
if (!flushPending && maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH
&& numDocsInRAM >= maxBufferedDocs) {
flushPending = true;
state.doFlushAfter = true;
} else
state.doFlushAfter = false;
state.isIdle = false; state.isIdle = false;
try { try {
@ -2460,11 +2449,21 @@ final class DocumentsWriter {
state.init(doc, nextDocID); state.init(doc, nextDocID);
if (delTerm != null) { if (delTerm != null) {
addDeleteTerm(delTerm, state.docID); addDeleteTerm(delTerm, state.docID);
if (!state.doFlushAfter) state.doFlushAfter = timeToFlushDeletes();
state.doFlushAfter = timeToFlushDeletes();
} }
// Only increment nextDocID on successful init // Only increment nextDocID & numDocsInRAM on successful init
nextDocID++; nextDocID++;
numDocsInRAM++;
// We must at this point commit to flushing to ensure we
// always get N docs when we flush by doc count, even if
// > 1 thread is adding documents:
if (!flushPending && maxBufferedDocs != IndexWriter.DISABLE_AUTO_FLUSH
&& numDocsInRAM >= maxBufferedDocs) {
flushPending = true;
state.doFlushAfter = true;
}
success = true; success = true;
} finally { } finally {
if (!success) { if (!success) {

View File

@ -4243,4 +4243,10 @@ public class IndexWriter {
public static final MaxFieldLength LIMITED public static final MaxFieldLength LIMITED
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH); = new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
} }
// Used only by assert for testing. Current points:
// "DocumentsWriter.ThreadState.init start"
boolean testPoint(String name) {
return true;
}
} }

View File

@ -3123,4 +3123,39 @@ public class TestIndexWriter extends LuceneTestCase
writer.addDocument(doc); writer.addDocument(doc);
writer.close(); writer.close();
} }
// LUCENE-1198
public class MockIndexWriter extends IndexWriter {
public MockIndexWriter(Directory dir, boolean autoCommit, Analyzer a, boolean create, MaxFieldLength mfl) throws IOException {
super(dir, autoCommit, a, create, mfl);
}
boolean doFail;
boolean testPoint(String name) {
if (doFail && name.equals("DocumentsWriter.ThreadState.init start"))
throw new RuntimeException("intentionally failing");
return true;
}
}
public void testExceptionDocumentsWriterInit() throws IOException {
MockRAMDirectory dir = new MockRAMDirectory();
MockIndexWriter w = new MockIndexWriter(dir, false, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("field", "a field", Field.Store.YES,
Field.Index.TOKENIZED));
w.addDocument(doc);
w.doFail = true;
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (RuntimeException re) {
// expected
}
w.close();
_TestUtil.checkIndex(dir);
dir.close();
}
} }

View File

@ -22,6 +22,10 @@ import java.io.IOException;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.MergeScheduler; import org.apache.lucene.index.MergeScheduler;
import org.apache.lucene.index.ConcurrentMergeScheduler; import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.store.Directory;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
public class _TestUtil { public class _TestUtil {
@ -49,4 +53,16 @@ public class _TestUtil {
if (ms instanceof ConcurrentMergeScheduler) if (ms instanceof ConcurrentMergeScheduler)
((ConcurrentMergeScheduler) ms).sync(); ((ConcurrentMergeScheduler) ms).sync();
} }
public static boolean checkIndex(Directory dir) throws IOException {
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
CheckIndex.out = new PrintStream(bos);
if (!CheckIndex.check(dir, false, null)) {
System.out.println("CheckIndex failed");
System.out.println(bos.toString());
return false;
} else
return true;
}
} }