LUCENE-4653: add deletes to TIW.testThreadInterruptDeadlock; fix a few places that didn't handle InterruptedException properly

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1428306 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2013-01-03 11:50:56 +00:00
parent e32af2f7ee
commit 176aca3a1e
5 changed files with 105 additions and 34 deletions

View File

@ -24,13 +24,12 @@ import java.util.HashMap;
import java.util.Iterator; import java.util.Iterator;
import java.util.Map; import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.lucene.codecs.FieldsProducer; import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.index.DocsAndPositionsEnum; import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum; import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfo.IndexOptions; import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos; import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.Terms; import org.apache.lucene.index.Terms;
@ -40,6 +39,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.OpenBitSet; import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
@ -67,10 +67,17 @@ class SimpleTextFieldsReader extends FieldsProducer {
final static BytesRef PAYLOAD = SimpleTextFieldsWriter.PAYLOAD; final static BytesRef PAYLOAD = SimpleTextFieldsWriter.PAYLOAD;
public SimpleTextFieldsReader(SegmentReadState state) throws IOException { public SimpleTextFieldsReader(SegmentReadState state) throws IOException {
in = state.dir.openInput(SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix), state.context);
fieldInfos = state.fieldInfos; fieldInfos = state.fieldInfos;
fields = readFields(in.clone()); in = state.dir.openInput(SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix), state.context);
boolean success = false;
try {
fields = readFields(in.clone());
success = true;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(this);
}
}
} }
private TreeMap<String,Long> readFields(IndexInput in) throws IOException { private TreeMap<String,Long> readFields(IndexInput in) throws IOException {

View File

@ -26,6 +26,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.MutableBits; import org.apache.lucene.util.MutableBits;
/** Optimized implementation of a vector of bits. This is more-or-less like /** Optimized implementation of a vector of bits. This is more-or-less like
@ -238,7 +239,7 @@ final class BitVector implements Cloneable, MutableBits {
} }
assert verifyCount(); assert verifyCount();
} finally { } finally {
output.close(); IOUtils.close(output);
} }
} }

View File

@ -23,6 +23,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.codecs.LiveDocsFormat; import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.store.Directory; import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.TrackingDirectoryWrapper;
import org.apache.lucene.util.Bits; import org.apache.lucene.util.Bits;
import org.apache.lucene.util.MutableBits; import org.apache.lucene.util.MutableBits;
@ -182,16 +183,28 @@ class ReadersAndLiveDocs {
// NOTE: removes callers ref // NOTE: removes callers ref
public synchronized void dropReaders() throws IOException { public synchronized void dropReaders() throws IOException {
if (reader != null) { // TODO: can we somehow use IOUtils here...? problem is
//System.out.println(" pool.drop info=" + info + " rc=" + reader.getRefCount()); // we are calling .decRef not .close)...
reader.decRef(); try {
reader = null; if (reader != null) {
} //System.out.println(" pool.drop info=" + info + " rc=" + reader.getRefCount());
if (mergeReader != null) { try {
//System.out.println(" pool.drop info=" + info + " merge rc=" + mergeReader.getRefCount()); reader.decRef();
mergeReader.decRef(); } finally {
mergeReader = null; reader = null;
}
}
} finally {
if (mergeReader != null) {
//System.out.println(" pool.drop info=" + info + " merge rc=" + mergeReader.getRefCount());
try {
mergeReader.decRef();
} finally {
mergeReader = null;
}
}
} }
decRef(); decRef();
} }
@ -272,13 +285,37 @@ class ReadersAndLiveDocs {
// We have new deletes // We have new deletes
assert liveDocs.length() == info.info.getDocCount(); assert liveDocs.length() == info.info.getDocCount();
// Do this so we can delete any created files on
// exception; this saves all codecs from having to do
// it:
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
// We can write directly to the actual name (vs to a // We can write directly to the actual name (vs to a
// .tmp & renaming it) because the file is not live // .tmp & renaming it) because the file is not live
// until segments file is written: // until segments file is written:
info.info.getCodec().liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, dir, info, pendingDeleteCount, IOContext.DEFAULT); boolean success = false;
try {
info.info.getCodec().liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
success = true;
} finally {
if (!success) {
// Advance only the nextWriteDelGen so that a 2nd
// attempt to write will write to a new file
info.advanceNextWriteDelGen();
// Delete any partially created file(s):
for(String fileName : trackingDir.getCreatedFiles()) {
try {
dir.deleteFile(fileName);
} catch (Throwable t) {
// Ignore so we throw only the first exc
}
}
}
}
// If we hit an exc in the line above (eg disk full) // If we hit an exc in the line above (eg disk full)
// then info remains pointing to the previous // then info's delGen remains pointing to the previous
// (successfully written) del docs: // (successfully written) del docs:
info.advanceDelGen(); info.advanceDelGen();
info.setDelCount(info.getDelCount() + pendingDeleteCount); info.setDelCount(info.getDelCount() + pendingDeleteCount);

View File

@ -40,6 +40,10 @@ public class SegmentInfoPerCommit {
// are no deletes yet): // are no deletes yet):
private long delGen; private long delGen;
// Normally 1+delGen, unless an exception was hit on last
// attempt to write:
private long nextWriteDelGen;
private volatile long sizeInBytes = -1; private volatile long sizeInBytes = -1;
/** Sole constructor. /** Sole constructor.
@ -52,17 +56,27 @@ public class SegmentInfoPerCommit {
this.info = info; this.info = info;
this.delCount = delCount; this.delCount = delCount;
this.delGen = delGen; this.delGen = delGen;
if (delGen == -1) {
nextWriteDelGen = 1;
} else {
nextWriteDelGen = delGen+1;
}
} }
/** Called when we succeed in writing deletes */
void advanceDelGen() { void advanceDelGen() {
if (delGen == -1) { delGen = nextWriteDelGen;
delGen = 1; nextWriteDelGen = delGen+1;
} else {
delGen++;
}
sizeInBytes = -1; sizeInBytes = -1;
} }
/** Called if there was an exception while writing
* deletes, so that we don't try to write to the same
* file more than once. */
void advanceNextWriteDelGen() {
nextWriteDelGen++;
}
/** Returns total size in bytes of all files for this /** Returns total size in bytes of all files for this
* segment. */ * segment. */
public long sizeInBytes() throws IOException { public long sizeInBytes() throws IOException {
@ -126,11 +140,7 @@ public class SegmentInfoPerCommit {
* of the live docs file. * of the live docs file.
*/ */
public long getNextDelGen() { public long getNextDelGen() {
if (delGen == -1) { return nextWriteDelGen;
return 1;
} else {
return delGen + 1;
}
} }
/** /**
@ -169,6 +179,12 @@ public class SegmentInfoPerCommit {
@Override @Override
public SegmentInfoPerCommit clone() { public SegmentInfoPerCommit clone() {
return new SegmentInfoPerCommit(info, delCount, delGen); SegmentInfoPerCommit other = new SegmentInfoPerCommit(info, delCount, delGen);
// Not clear that we need to carry over nextWriteDelGen
// (i.e. do we ever clone after a failed write and
// before the next successful write?), but just do it to
// be safe:
other.nextWriteDelGen = nextWriteDelGen;
return other;
} }
} }

View File

@ -1023,9 +1023,16 @@ public class TestIndexWriter extends LuceneTestCase {
w = new IndexWriter(dir, conf); w = new IndexWriter(dir, conf);
Document doc = new Document(); Document doc = new Document();
Field idField = newStringField("id", "", Field.Store.NO);
doc.add(idField);
doc.add(newField("field", "some text contents", storedTextType)); doc.add(newField("field", "some text contents", storedTextType));
for(int i=0;i<100;i++) { for(int i=0;i<100;i++) {
w.addDocument(doc); idField.setStringValue(Integer.toString(i));
if (i%2 == 0) {
w.updateDocument(new Term("id", idField.stringValue()), doc);
} else {
w.addDocument(doc);
}
if (i%10 == 0) { if (i%10 == 0) {
w.commit(); w.commit();
} }
@ -1046,10 +1053,12 @@ public class TestIndexWriter extends LuceneTestCase {
allowInterrupt = true; allowInterrupt = true;
} }
} catch (ThreadInterruptedException re) { } catch (ThreadInterruptedException re) {
if (true || VERBOSE) { // NOTE: important to leave this verbosity/noise
System.out.println("TEST: got interrupt"); // on!! This test doesn't repro easily so when
re.printStackTrace(System.out); // Jenkins hits a fail we need to study where the
} // interrupts struck!
System.out.println("TEST: got interrupt");
re.printStackTrace(System.out);
Throwable e = re.getCause(); Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException); assertTrue(e instanceof InterruptedException);
if (finish) { if (finish) {
@ -1114,6 +1123,8 @@ public class TestIndexWriter extends LuceneTestCase {
final int numInterrupts = atLeast(300); final int numInterrupts = atLeast(300);
int i = 0; int i = 0;
while(i < numInterrupts) { while(i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the
// CMS merge threads too ...
Thread.sleep(10); Thread.sleep(10);
if (t.allowInterrupt) { if (t.allowInterrupt) {
i++; i++;
@ -1238,7 +1249,6 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory(); Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random()))); TEST_VERSION_CURRENT, new MockAnalyzer(random())));
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.addDocument(new Document()); writer.addDocument(new Document());
writer.close(); writer.close();