LUCENE-4653: add deletes to TIW.testThreadInterruptDeadlock; fix a few places that didn't handle InterruptedException properly

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1428306 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2013-01-03 11:50:56 +00:00
parent e32af2f7ee
commit 176aca3a1e
5 changed files with 105 additions and 34 deletions

View File

@ -24,13 +24,12 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import java.util.TreeSet;
import org.apache.lucene.codecs.FieldsProducer;
import org.apache.lucene.index.DocsAndPositionsEnum;
import org.apache.lucene.index.DocsEnum;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.FieldInfos;
import org.apache.lucene.index.SegmentReadState;
import org.apache.lucene.index.Terms;
@ -40,6 +39,7 @@ import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntsRef;
import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.StringHelper;
@ -67,10 +67,17 @@ class SimpleTextFieldsReader extends FieldsProducer {
final static BytesRef PAYLOAD = SimpleTextFieldsWriter.PAYLOAD;
public SimpleTextFieldsReader(SegmentReadState state) throws IOException {
in = state.dir.openInput(SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix), state.context);
fieldInfos = state.fieldInfos;
fields = readFields(in.clone());
in = state.dir.openInput(SimpleTextPostingsFormat.getPostingsFileName(state.segmentInfo.name, state.segmentSuffix), state.context);
boolean success = false;
try {
fields = readFields(in.clone());
success = true;
} finally {
if (!success) {
IOUtils.closeWhileHandlingException(this);
}
}
}
private TreeMap<String,Long> readFields(IndexInput in) throws IOException {

View File

@ -26,6 +26,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.MutableBits;
/** Optimized implementation of a vector of bits. This is more-or-less like
@ -238,7 +239,7 @@ final class BitVector implements Cloneable, MutableBits {
}
assert verifyCount();
} finally {
output.close();
IOUtils.close(output);
}
}

View File

@ -23,6 +23,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.TrackingDirectoryWrapper;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.MutableBits;
@ -182,16 +183,28 @@ class ReadersAndLiveDocs {
// NOTE: removes callers ref
public synchronized void dropReaders() throws IOException {
if (reader != null) {
//System.out.println(" pool.drop info=" + info + " rc=" + reader.getRefCount());
reader.decRef();
reader = null;
}
if (mergeReader != null) {
//System.out.println(" pool.drop info=" + info + " merge rc=" + mergeReader.getRefCount());
mergeReader.decRef();
mergeReader = null;
// TODO: can we somehow use IOUtils here...? problem is
// we are calling .decRef not .close)...
try {
if (reader != null) {
//System.out.println(" pool.drop info=" + info + " rc=" + reader.getRefCount());
try {
reader.decRef();
} finally {
reader = null;
}
}
} finally {
if (mergeReader != null) {
//System.out.println(" pool.drop info=" + info + " merge rc=" + mergeReader.getRefCount());
try {
mergeReader.decRef();
} finally {
mergeReader = null;
}
}
}
decRef();
}
@ -272,13 +285,37 @@ class ReadersAndLiveDocs {
// We have new deletes
assert liveDocs.length() == info.info.getDocCount();
// Do this so we can delete any created files on
// exception; this saves all codecs from having to do
// it:
TrackingDirectoryWrapper trackingDir = new TrackingDirectoryWrapper(dir);
// We can write directly to the actual name (vs to a
// .tmp & renaming it) because the file is not live
// until segments file is written:
info.info.getCodec().liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, dir, info, pendingDeleteCount, IOContext.DEFAULT);
boolean success = false;
try {
info.info.getCodec().liveDocsFormat().writeLiveDocs((MutableBits)liveDocs, trackingDir, info, pendingDeleteCount, IOContext.DEFAULT);
success = true;
} finally {
if (!success) {
// Advance only the nextWriteDelGen so that a 2nd
// attempt to write will write to a new file
info.advanceNextWriteDelGen();
// Delete any partially created file(s):
for(String fileName : trackingDir.getCreatedFiles()) {
try {
dir.deleteFile(fileName);
} catch (Throwable t) {
// Ignore so we throw only the first exc
}
}
}
}
// If we hit an exc in the line above (eg disk full)
// then info remains pointing to the previous
// then info's delGen remains pointing to the previous
// (successfully written) del docs:
info.advanceDelGen();
info.setDelCount(info.getDelCount() + pendingDeleteCount);

View File

@ -40,6 +40,10 @@ public class SegmentInfoPerCommit {
// are no deletes yet):
private long delGen;
// Normally 1+delGen, unless an exception was hit on last
// attempt to write:
private long nextWriteDelGen;
private volatile long sizeInBytes = -1;
/** Sole constructor.
@ -52,17 +56,27 @@ public class SegmentInfoPerCommit {
this.info = info;
this.delCount = delCount;
this.delGen = delGen;
if (delGen == -1) {
nextWriteDelGen = 1;
} else {
nextWriteDelGen = delGen+1;
}
}
/** Called when we succeed in writing deletes */
void advanceDelGen() {
if (delGen == -1) {
delGen = 1;
} else {
delGen++;
}
delGen = nextWriteDelGen;
nextWriteDelGen = delGen+1;
sizeInBytes = -1;
}
/** Called if there was an exception while writing
* deletes, so that we don't try to write to the same
* file more than once. */
void advanceNextWriteDelGen() {
nextWriteDelGen++;
}
/** Returns total size in bytes of all files for this
* segment. */
public long sizeInBytes() throws IOException {
@ -126,11 +140,7 @@ public class SegmentInfoPerCommit {
* of the live docs file.
*/
public long getNextDelGen() {
if (delGen == -1) {
return 1;
} else {
return delGen + 1;
}
return nextWriteDelGen;
}
/**
@ -169,6 +179,12 @@ public class SegmentInfoPerCommit {
@Override
public SegmentInfoPerCommit clone() {
return new SegmentInfoPerCommit(info, delCount, delGen);
SegmentInfoPerCommit other = new SegmentInfoPerCommit(info, delCount, delGen);
// Not clear that we need to carry over nextWriteDelGen
// (i.e. do we ever clone after a failed write and
// before the next successful write?), but just do it to
// be safe:
other.nextWriteDelGen = nextWriteDelGen;
return other;
}
}

View File

@ -1023,9 +1023,16 @@ public class TestIndexWriter extends LuceneTestCase {
w = new IndexWriter(dir, conf);
Document doc = new Document();
Field idField = newStringField("id", "", Field.Store.NO);
doc.add(idField);
doc.add(newField("field", "some text contents", storedTextType));
for(int i=0;i<100;i++) {
w.addDocument(doc);
idField.setStringValue(Integer.toString(i));
if (i%2 == 0) {
w.updateDocument(new Term("id", idField.stringValue()), doc);
} else {
w.addDocument(doc);
}
if (i%10 == 0) {
w.commit();
}
@ -1046,10 +1053,12 @@ public class TestIndexWriter extends LuceneTestCase {
allowInterrupt = true;
}
} catch (ThreadInterruptedException re) {
if (true || VERBOSE) {
System.out.println("TEST: got interrupt");
re.printStackTrace(System.out);
}
// NOTE: important to leave this verbosity/noise
// on!! This test doesn't repro easily so when
// Jenkins hits a fail we need to study where the
// interrupts struck!
System.out.println("TEST: got interrupt");
re.printStackTrace(System.out);
Throwable e = re.getCause();
assertTrue(e instanceof InterruptedException);
if (finish) {
@ -1114,6 +1123,8 @@ public class TestIndexWriter extends LuceneTestCase {
final int numInterrupts = atLeast(300);
int i = 0;
while(i < numInterrupts) {
// TODO: would be nice to also sometimes interrupt the
// CMS merge threads too ...
Thread.sleep(10);
if (t.allowInterrupt) {
i++;
@ -1238,7 +1249,6 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(random())));
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
writer.addDocument(new Document());
writer.close();