LUCENE-2918: prune 100% del segments the moment they are created (in applyDeletes) instead of on commit, so that NRT usage doens't waste time on fully deleted segs

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1071569 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2011-02-17 11:00:45 +00:00
parent 3f8c9b5cfc
commit 07aca888ba
19 changed files with 314 additions and 109 deletions

View File

@ -21,6 +21,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.LuceneTestCase;
import org.junit.AfterClass;
import org.junit.Assume;
import org.junit.BeforeClass;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -186,6 +187,7 @@ public class TestParser extends LuceneTestCase {
}
public void testDuplicateFilterQueryXML() throws ParserException, IOException
{
Assume.assumeTrue(searcher.getIndexReader().getSequentialSubReaders().length == 1);
Query q=parse("DuplicateFilterQuery.xml");
int h = searcher.search(q, null, 1000).totalHits;
assertEquals("DuplicateFilterQuery should produce 1 result ", 1,h);

View File

@ -121,9 +121,13 @@ class BufferedDeletesStream {
// Current gen, for the merged segment:
public final long gen;
ApplyDeletesResult(boolean anyDeletes, long gen) {
// If non-null, contains segments that are 100% deleted
public final SegmentInfos allDeleted;
ApplyDeletesResult(boolean anyDeletes, long gen, SegmentInfos allDeleted) {
this.anyDeletes = anyDeletes;
this.gen = gen;
this.allDeleted = allDeleted;
}
}
@ -154,14 +158,14 @@ class BufferedDeletesStream {
final long t0 = System.currentTimeMillis();
if (infos.size() == 0) {
return new ApplyDeletesResult(false, nextGen++);
return new ApplyDeletesResult(false, nextGen++, null);
}
assert checkDeleteStats();
if (!any()) {
message("applyDeletes: no deletes; skipping");
return new ApplyDeletesResult(false, nextGen++);
return new ApplyDeletesResult(false, nextGen++, null);
}
if (infoStream != null) {
@ -178,6 +182,8 @@ class BufferedDeletesStream {
int infosIDX = infos2.size()-1;
int delIDX = deletes.size()-1;
SegmentInfos allDeleted = null;
while (infosIDX >= 0) {
//System.out.println("BD: cycle delIDX=" + delIDX + " infoIDX=" + infosIDX);
@ -199,6 +205,7 @@ class BufferedDeletesStream {
assert readerPool.infoIsLive(info);
SegmentReader reader = readerPool.get(info, false);
int delCount = 0;
final boolean segAllDeletes;
try {
if (coalescedDeletes != null) {
//System.out.println(" del coalesced");
@ -209,13 +216,21 @@ class BufferedDeletesStream {
// Don't delete by Term here; DocumentsWriter
// already did that on flush:
delCount += applyQueryDeletes(packet.queriesIterable(), reader);
segAllDeletes = reader.numDocs() == 0;
} finally {
readerPool.release(reader);
}
anyNewDeletes |= delCount > 0;
if (segAllDeletes) {
if (allDeleted == null) {
allDeleted = new SegmentInfos();
}
allDeleted.add(info);
}
if (infoStream != null) {
message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount);
message("seg=" + info + " segGen=" + segGen + " segDeletes=[" + packet + "]; coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : ""));
}
if (coalescedDeletes == null) {
@ -234,16 +249,25 @@ class BufferedDeletesStream {
assert readerPool.infoIsLive(info);
SegmentReader reader = readerPool.get(info, false);
int delCount = 0;
final boolean segAllDeletes;
try {
delCount += applyTermDeletes(coalescedDeletes.termsIterable(), reader);
delCount += applyQueryDeletes(coalescedDeletes.queriesIterable(), reader);
segAllDeletes = reader.numDocs() == 0;
} finally {
readerPool.release(reader);
}
anyNewDeletes |= delCount > 0;
if (segAllDeletes) {
if (allDeleted == null) {
allDeleted = new SegmentInfos();
}
allDeleted.add(info);
}
if (infoStream != null) {
message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount);
message("seg=" + info + " segGen=" + segGen + " coalesced deletes=[" + (coalescedDeletes == null ? "null" : coalescedDeletes) + "] delCount=" + delCount + (segAllDeletes ? " 100% deleted" : ""));
}
}
info.setBufferedDeletesGen(nextGen);
@ -258,7 +282,7 @@ class BufferedDeletesStream {
}
// assert infos != segmentInfos || !any() : "infos=" + infos + " segmentInfos=" + segmentInfos + " any=" + any;
return new ApplyDeletesResult(anyNewDeletes, nextGen++);
return new ApplyDeletesResult(anyNewDeletes, nextGen++, allDeleted);
}
public synchronized long getNextGen() {

View File

@ -146,7 +146,6 @@ class DirectoryReader extends IndexReader implements Cloneable {
this.readOnly = true;
this.applyAllDeletes = applyAllDeletes; // saved for reopen
segmentInfos = (SegmentInfos) infos.clone();// make sure we clone otherwise we share mutable state with IW
this.termInfosIndexDivisor = termInfosIndexDivisor;
if (codecs == null) {
this.codecs = CodecProvider.getDefault();
@ -159,23 +158,33 @@ class DirectoryReader extends IndexReader implements Cloneable {
// us, which ensures infos will not change; so there's
// no need to process segments in reverse order
final int numSegments = infos.size();
SegmentReader[] readers = new SegmentReader[numSegments];
List<SegmentReader> readers = new ArrayList<SegmentReader>();
final Directory dir = writer.getDirectory();
segmentInfos = (SegmentInfos) infos.clone();
int infosUpto = 0;
for (int i=0;i<numSegments;i++) {
boolean success = false;
try {
final SegmentInfo info = infos.info(i);
assert info.dir == dir;
readers[i] = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor);
readers[i].readerFinishedListeners = readerFinishedListeners;
final SegmentReader reader = writer.readerPool.getReadOnlyClone(info, true, termInfosIndexDivisor);
if (reader.numDocs() > 0 || writer.getKeepFullyDeletedSegments()) {
reader.readerFinishedListeners = readerFinishedListeners;
readers.add(reader);
infosUpto++;
} else {
reader.close();
segmentInfos.remove(infosUpto);
}
success = true;
} finally {
if (!success) {
// Close all readers we had opened:
for(i--;i>=0;i--) {
for(SegmentReader reader : readers) {
try {
readers[i].close();
reader.close();
} catch (Throwable ignore) {
// keep going - we want to clean up as much as possible
}
@ -186,7 +195,7 @@ class DirectoryReader extends IndexReader implements Cloneable {
this.writer = writer;
initialize(readers);
initialize(readers.toArray(new SegmentReader[readers.size()]));
}
/** This constructor is only used for {@link #reopen()} */

View File

@ -648,8 +648,16 @@ final class DocumentsWriter {
newSegment.setDelCount(delCount);
newSegment.advanceDelGen();
final String delFileName = newSegment.getDelFileName();
if (infoStream != null) {
message("flush: write " + delCount + " deletes to " + delFileName);
}
boolean success2 = false;
try {
// TODO: in the NRT case it'd be better to hand
// this del vector over to the
// shortly-to-be-opened SegmentReader and let it
// carry the changes; there's no reason to use
// filesystem as intermediary here.
flushState.deletedDocs.write(directory, delFileName);
success2 = true;
} finally {

View File

@ -388,8 +388,7 @@ public class IndexWriter implements Closeable {
private final Map<SegmentInfo,SegmentReader> readerMap = new HashMap<SegmentInfo,SegmentReader>();
/** Forcefully clear changes for the specified segments,
* and remove from the pool. This is called on successful merge. */
/** Forcefully clear changes for the specified segments. This is called on successful merge. */
synchronized void clear(SegmentInfos infos) throws IOException {
if (infos == null) {
for (Map.Entry<SegmentInfo,SegmentReader> ent: readerMap.entrySet()) {
@ -397,8 +396,9 @@ public class IndexWriter implements Closeable {
}
} else {
for (final SegmentInfo info: infos) {
if (readerMap.containsKey(info)) {
readerMap.get(info).hasChanges = false;
final SegmentReader r = readerMap.get(info);
if (r != null) {
r.hasChanges = false;
}
}
}
@ -407,8 +407,8 @@ public class IndexWriter implements Closeable {
// used only by asserts
public synchronized boolean infoIsLive(SegmentInfo info) {
int idx = segmentInfos.indexOf(info);
assert idx != -1;
assert segmentInfos.get(idx) == info;
assert idx != -1: "info=" + info + " isn't in pool";
assert segmentInfos.get(idx) == info: "info=" + info + " doesn't match live info in segmentInfos";
return true;
}
@ -478,6 +478,21 @@ public class IndexWriter implements Closeable {
return false;
}
public synchronized void drop(SegmentInfos infos) throws IOException {
for(SegmentInfo info : infos) {
drop(info);
}
}
public synchronized void drop(SegmentInfo info) throws IOException {
final SegmentReader sr = readerMap.get(info);
if (sr != null) {
sr.hasChanges = false;
readerMap.remove(info);
sr.close();
}
}
/** Remove all our references to readers, and commits
* any pending changes. */
@ -516,19 +531,18 @@ public class IndexWriter implements Closeable {
* Commit all segment reader in the pool.
* @throws IOException
*/
synchronized void commit() throws IOException {
synchronized void commit(SegmentInfos infos) throws IOException {
// We invoke deleter.checkpoint below, so we must be
// sync'd on IW:
assert Thread.holdsLock(IndexWriter.this);
for (Map.Entry<SegmentInfo,SegmentReader> ent : readerMap.entrySet()) {
for (SegmentInfo info : infos) {
SegmentReader sr = ent.getValue();
if (sr.hasChanges) {
assert infoIsLive(sr.getSegmentInfo());
final SegmentReader sr = readerMap.get(info);
if (sr != null && sr.hasChanges) {
assert infoIsLive(info);
sr.doCommit(null);
// Must checkpoint w/ deleter, because this
// segment reader will have created new _X_N.del
// file.
@ -2558,6 +2572,24 @@ public class IndexWriter implements Closeable {
if (result.anyDeletes) {
checkpoint();
}
if (!keepFullyDeletedSegments && result.allDeleted != null) {
if (infoStream != null) {
message("drop 100% deleted segments: " + result.allDeleted);
}
for(SegmentInfo info : result.allDeleted) {
// If a merge has already registered for this
// segment, we leave it in the readerPool; the
// merge will skip merging it and will then drop
// it once it's done:
if (!mergingSegments.contains(info)) {
segmentInfos.remove(info);
if (readerPool != null) {
readerPool.drop(info);
}
}
}
checkpoint();
}
bufferedDeletesStream.prune(segmentInfos);
assert !bufferedDeletesStream.any();
flushControl.clearDeletes();
@ -2634,9 +2666,13 @@ public class IndexWriter implements Closeable {
SegmentInfo info = sourceSegments.info(i);
minGen = Math.min(info.getBufferedDeletesGen(), minGen);
int docCount = info.docCount;
SegmentReader previousReader = merge.readersClone[i];
final SegmentReader previousReader = merge.readerClones.get(i);
if (previousReader == null) {
// Reader was skipped because it was 100% deletions
continue;
}
final Bits prevDelDocs = previousReader.getDeletedDocs();
SegmentReader currentReader = merge.readers[i];
final SegmentReader currentReader = merge.readers.get(i);
final Bits currentDelDocs = currentReader.getDeletedDocs();
if (previousReader.hasDeletions()) {
@ -2719,18 +2755,21 @@ public class IndexWriter implements Closeable {
return false;
}
ensureValidMerge(merge);
commitMergedDeletes(merge, mergedReader);
// If the doc store we are using has been closed and
// is in now compound format (but wasn't when we
// started), then we will switch to the compound
// format as well:
setMergeDocStoreIsCompoundFile(merge);
assert !segmentInfos.contains(merge.info);
final boolean allDeleted = mergedReader.numDocs() == 0;
if (infoStream != null && allDeleted) {
message("merged segment " + merge.info + " is 100% deleted" + (keepFullyDeletedSegments ? "" : "; skipping insert"));
}
final Set mergedAway = new HashSet<SegmentInfo>(merge.segments);
int segIdx = 0;
int newSegIdx = 0;
@ -2739,7 +2778,7 @@ public class IndexWriter implements Closeable {
while(segIdx < curSegCount) {
final SegmentInfo info = segmentInfos.info(segIdx++);
if (mergedAway.contains(info)) {
if (!inserted) {
if (!inserted && (!allDeleted || keepFullyDeletedSegments)) {
segmentInfos.set(segIdx-1, merge.info);
inserted = true;
newSegIdx++;
@ -2748,7 +2787,20 @@ public class IndexWriter implements Closeable {
segmentInfos.set(newSegIdx++, info);
}
}
assert newSegIdx == curSegCount - merge.segments.size() + 1;
// Either we found place to insert segment, or, we did
// not, but only because all segments we merged became
// deleted while we are merging, in which case it should
// be the case that the new segment is also all deleted:
if (!inserted) {
assert allDeleted;
if (keepFullyDeletedSegments) {
segmentInfos.add(0, merge.info);
} else {
readerPool.drop(merge.info);
}
}
segmentInfos.subList(newSegIdx, segmentInfos.size()).clear();
if (infoStream != null) {
@ -2770,7 +2822,6 @@ public class IndexWriter implements Closeable {
// cascade the optimize:
segmentsToOptimize.add(merge.info);
}
return true;
}
@ -2913,8 +2964,9 @@ public class IndexWriter implements Closeable {
// is running (while synchronized) to avoid race
// condition where two conflicting merges from different
// threads, start
for(int i=0;i<count;i++)
for(int i=0;i<count;i++) {
mergingSegments.add(merge.segments.info(i));
}
// Merge is now registered
merge.registerDone = true;
@ -2966,10 +3018,28 @@ public class IndexWriter implements Closeable {
// Lock order: IW -> BD
final BufferedDeletesStream.ApplyDeletesResult result = bufferedDeletesStream.applyDeletes(readerPool, merge.segments);
if (result.anyDeletes) {
checkpoint();
}
if (!keepFullyDeletedSegments && result.allDeleted != null) {
if (infoStream != null) {
message("drop 100% deleted segments: " + result.allDeleted);
}
for(SegmentInfo info : result.allDeleted) {
segmentInfos.remove(info);
if (merge.segments.contains(info)) {
mergingSegments.remove(info);
merge.segments.remove(info);
}
}
if (readerPool != null) {
readerPool.drop(result.allDeleted);
}
checkpoint();
}
merge.info.setBufferedDeletesGen(result.gen);
// Lock order: IW -> BD
@ -3023,8 +3093,9 @@ public class IndexWriter implements Closeable {
if (merge.registerDone) {
final SegmentInfos sourceSegments = merge.segments;
final int end = sourceSegments.size();
for(int i=0;i<end;i++)
for(int i=0;i<end;i++) {
mergingSegments.remove(sourceSegments.info(i));
}
mergingSegments.remove(merge.info);
merge.registerDone = false;
}
@ -3032,47 +3103,30 @@ public class IndexWriter implements Closeable {
runningMerges.remove(merge);
}
private synchronized void setMergeDocStoreIsCompoundFile(MergePolicy.OneMerge merge) {
final String mergeDocStoreSegment = merge.info.getDocStoreSegment();
if (mergeDocStoreSegment != null && !merge.info.getDocStoreIsCompoundFile()) {
final int size = segmentInfos.size();
for(int i=0;i<size;i++) {
final SegmentInfo info = segmentInfos.info(i);
final String docStoreSegment = info.getDocStoreSegment();
if (docStoreSegment != null &&
docStoreSegment.equals(mergeDocStoreSegment) &&
info.getDocStoreIsCompoundFile()) {
merge.info.setDocStoreIsCompoundFile(true);
break;
}
}
}
}
private synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
final int numSegments = merge.segments.size();
final int numSegments = merge.readers.size();
if (suppressExceptions) {
// Suppress any new exceptions so we throw the
// original cause
boolean anyChanges = false;
for (int i=0;i<numSegments;i++) {
if (merge.readers[i] != null) {
if (merge.readers.get(i) != null) {
try {
anyChanges |= readerPool.release(merge.readers[i], false);
anyChanges |= readerPool.release(merge.readers.get(i), false);
} catch (Throwable t) {
}
merge.readers[i] = null;
merge.readers.set(i, null);
}
if (merge.readersClone[i] != null) {
if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
try {
merge.readersClone[i].close();
merge.readerClones.get(i).close();
} catch (Throwable t) {
}
// This was a private clone and we had the
// only reference
assert merge.readersClone[i].getRefCount() == 0: "refCount should be 0 but is " + merge.readersClone[i].getRefCount();
merge.readersClone[i] = null;
assert merge.readerClones.get(i).getRefCount() == 0: "refCount should be 0 but is " + merge.readerClones.get(i).getRefCount();
merge.readerClones.set(i, null);
}
}
if (anyChanges) {
@ -3080,16 +3134,16 @@ public class IndexWriter implements Closeable {
}
} else {
for (int i=0;i<numSegments;i++) {
if (merge.readers[i] != null) {
readerPool.release(merge.readers[i], true);
merge.readers[i] = null;
if (merge.readers.get(i) != null) {
readerPool.release(merge.readers.get(i), true);
merge.readers.set(i, null);
}
if (merge.readersClone[i] != null) {
merge.readersClone[i].close();
if (i < merge.readerClones.size() && merge.readerClones.get(i) != null) {
merge.readerClones.get(i).close();
// This was a private clone and we had the only reference
assert merge.readersClone[i].getRefCount() == 0;
merge.readersClone[i] = null;
assert merge.readerClones.get(i).getRefCount() == 0;
merge.readerClones.set(i, null);
}
}
}
@ -3108,7 +3162,6 @@ public class IndexWriter implements Closeable {
int mergedDocCount = 0;
SegmentInfos sourceSegments = merge.segments;
final int numSegments = sourceSegments.size();
SegmentMerger merger = new SegmentMerger(directory, termIndexInterval, mergedName, merge,
codecs, payloadProcessorProvider,
@ -3118,36 +3171,43 @@ public class IndexWriter implements Closeable {
message("merging " + merge.segString(directory) + " mergeVectors=" + merger.fieldInfos().hasVectors());
}
merge.readers = new ArrayList<SegmentReader>();
merge.readerClones = new ArrayList<SegmentReader>();
merge.info.setHasVectors(merger.fieldInfos().hasVectors());
merge.readers = new SegmentReader[numSegments];
merge.readersClone = new SegmentReader[numSegments];
// This is try/finally to make sure merger's readers are
// closed:
boolean success = false;
try {
int totDocCount = 0;
int segUpto = 0;
while(segUpto < sourceSegments.size()) {
for (int i = 0; i < numSegments; i++) {
final SegmentInfo info = sourceSegments.info(i);
final SegmentInfo info = sourceSegments.info(segUpto);
// Hold onto the "live" reader; we will use this to
// commit merged deletes
SegmentReader reader = merge.readers[i] = readerPool.get(info, true,
MERGE_READ_BUFFER_SIZE,
-config.getReaderTermsIndexDivisor());
final SegmentReader reader = readerPool.get(info, true,
MERGE_READ_BUFFER_SIZE,
-config.getReaderTermsIndexDivisor());
merge.readers.add(reader);
// We clone the segment readers because other
// deletes may come in while we're merging so we
// need readers that will not change
SegmentReader clone = merge.readersClone[i] = (SegmentReader) reader.clone(true);
merger.add(clone);
final SegmentReader clone = (SegmentReader) reader.clone(true);
merge.readerClones.add(clone);
if (reader.numDocs() > 0) {
merger.add(clone);
}
totDocCount += clone.numDocs();
segUpto++;
}
if (infoStream != null) {
message("merge: total "+totDocCount+" docs");
message("merge: total " + totDocCount + " docs");
}
merge.checkAborted(directory);
@ -3160,11 +3220,11 @@ public class IndexWriter implements Closeable {
if (infoStream != null) {
message("merge segmentCodecs=" + merger.getSegmentCodecs());
message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + numSegments);
message("merge store matchedCount=" + merger.getMatchedSubReaderCount() + " vs " + merge.readers.size());
}
anyNonBulkMerges |= merger.getMatchedSubReaderCount() != numSegments;
anyNonBulkMerges |= merger.getMatchedSubReaderCount() != merge.readers.size();
assert mergedDocCount == totDocCount;
assert mergedDocCount == totDocCount: "mergedDocCount=" + mergedDocCount + " vs " + totDocCount;
// Very important to do this before opening the reader
// because codec must know if prox was written for
@ -3347,6 +3407,10 @@ public class IndexWriter implements Closeable {
keepFullyDeletedSegments = true;
}
boolean getKeepFullyDeletedSegments() {
return keepFullyDeletedSegments;
}
// called only from assert
private boolean filesExist(SegmentInfos toSync) throws IOException {
Collection<String> files = toSync.files(directory, false);
@ -3402,12 +3466,8 @@ public class IndexWriter implements Closeable {
if (infoStream != null)
message("startCommit index=" + segString(segmentInfos) + " changeCount=" + changeCount);
readerPool.commit();
readerPool.commit(segmentInfos);
toSync = (SegmentInfos) segmentInfos.clone();
if (!keepFullyDeletedSegments) {
toSync.pruneDeletedSegments();
}
assert filesExist(toSync);

View File

@ -72,8 +72,8 @@ public abstract class MergePolicy implements java.io.Closeable {
long mergeGen; // used by IndexWriter
boolean isExternal; // used by IndexWriter
int maxNumSegmentsOptimize; // used by IndexWriter
SegmentReader[] readers; // used by IndexWriter
SegmentReader[] readersClone; // used by IndexWriter
List<SegmentReader> readers; // used by IndexWriter
List<SegmentReader> readerClones; // used by IndexWriter
public final SegmentInfos segments;
boolean aborted;
Throwable error;

View File

@ -102,6 +102,17 @@ public class RandomIndexWriter implements Closeable {
}
}
public void updateDocument(Term t, Document doc) throws IOException {
w.updateDocument(t, doc);
if (docCount++ == flushAt) {
if (LuceneTestCase.VERBOSE) {
System.out.println("RIW.updateDocument: now doing a commit");
}
w.commit();
flushAt += _TestUtil.nextInt(r, 10, 1000);
}
}
public void addIndexes(Directory... dirs) throws CorruptIndexException, IOException {
w.addIndexes(dirs);
}
@ -127,17 +138,21 @@ public class RandomIndexWriter implements Closeable {
}
public IndexReader getReader() throws IOException {
return getReader(true);
}
public IndexReader getReader(boolean applyDeletions) throws IOException {
getReaderCalled = true;
if (r.nextInt(4) == 2)
w.optimize();
// If we are writing with PreFlexRW, force a full
// IndexReader.open so terms are sorted in codepoint
// order during searching:
if (!w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) {
if (!applyDeletions || !w.codecs.getDefaultFieldCodec().equals("PreFlex") && r.nextBoolean()) {
if (LuceneTestCase.VERBOSE) {
System.out.println("RIW.getReader: use NRT reader");
}
return w.getReader();
return w.getReader(applyDeletions);
} else {
if (LuceneTestCase.VERBOSE) {
System.out.println("RIW.getReader: open new reader");

View File

@ -2,14 +2,13 @@ package org.apache.lucene.search;
import java.io.IOException;
import java.util.Random;
import java.lang.reflect.Method;
import junit.framework.Assert;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.MultiReader;
@ -19,6 +18,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.ReaderUtil;
import org.apache.lucene.util._TestUtil;
import static org.apache.lucene.util.LuceneTestCase.TEST_VERSION_CURRENT;
@ -172,16 +172,7 @@ public class QueryUtils {
}
w.commit();
w.deleteDocuments( new MatchAllDocsQuery() );
try {
// Carefully invoke what is a package-private (test
// only, internal) method on IndexWriter:
Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments");
m.setAccessible(true);
m.invoke(w);
} catch (Exception e) {
// Should not happen?
throw new RuntimeException(e);
}
_TestUtil.keepFullyDeletedSegments(w);
w.commit();
if (0 < numDeletedDocs)

View File

@ -1243,7 +1243,7 @@ public abstract class LuceneTestCase extends Assert {
}
@Override
public String toString() {
public synchronized String toString() {
return "RandomCodecProvider: " + previousMappings.toString();
}
}

View File

@ -25,6 +25,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.lang.reflect.Method;
import java.util.Enumeration;
import java.util.Random;
import java.util.Map;
@ -305,4 +306,17 @@ public class _TestUtil {
});
Assert.assertEquals("Reflection does not produce same map", reflectedValues, map);
}
public static void keepFullyDeletedSegments(IndexWriter w) {
try {
// Carefully invoke what is a package-private (test
// only, internal) method on IndexWriter:
Method m = IndexWriter.class.getDeclaredMethod("keepFullyDeletedSegments");
m.setAccessible(true);
m.invoke(w);
} catch (Exception e) {
// Should not happen?
throw new RuntimeException(e);
}
}
}

View File

@ -81,7 +81,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)).setMaxBufferedDocs(2)
.setMaxBufferedDeleteTerms(2));
modifier.setInfoStream(VERBOSE ? System.out : null);
int id = 0;
int value = 100;

View File

@ -464,11 +464,11 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
setReaderPooling(true).
setMergePolicy(newLogMergePolicy(2))
);
_TestUtil.keepFullyDeletedSegments(w);
Document doc = new Document();
doc.add(newField("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED));
w.addDocument(doc);
w.commit();
w.deleteDocuments(new Term("f", "who"));

View File

@ -68,7 +68,7 @@ public class TestIsCurrent extends LuceneTestCase {
// assert index has a document and reader is up2date
assertEquals("One document should be in the index", 1, writer.numDocs());
assertTrue("Document added, reader should be stale ", reader.isCurrent());
assertTrue("One document added, reader should be current", reader.isCurrent());
// remove document
Term idTerm = new Term("UUID", "1");

View File

@ -32,6 +32,7 @@ public class TestMultiFields extends LuceneTestCase {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMergePolicy(NoMergePolicy.COMPOUND_FILES));
_TestUtil.keepFullyDeletedSegments(w);
Map<BytesRef,List<Integer>> docs = new HashMap<BytesRef,List<Integer>>();
Set<Integer> deleted = new HashSet<Integer>();

View File

@ -36,6 +36,7 @@ public class TestNRTReaderWithThreads extends LuceneTestCase {
setMaxBufferedDocs(10).
setMergePolicy(newLogMergePolicy(false,2))
);
writer.setInfoStream(VERBOSE ? System.out : null);
IndexReader reader = writer.getReader(); // start pooling readers
reader.close();
RunThread[] indexThreads = new RunThread[4];

View File

@ -0,0 +1,75 @@
package org.apache.lucene.index;
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.*;
import org.junit.Test;
public class TestRollingUpdates extends LuceneTestCase {
// Just updates the same set of N docs over and over, to
// stress out deletions
@Test
public void testRollingUpdates() throws Exception {
final Directory dir = newDirectory();
final LineFileDocs docs = new LineFileDocs(random);
final IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
final int SIZE = 200 * RANDOM_MULTIPLIER;
int id = 0;
IndexReader r = null;
final int numUpdates = (int) (SIZE * (2+random.nextDouble()));
for(int docIter=0;docIter<numUpdates;docIter++) {
final Document doc = docs.nextDoc();
final String myID = ""+id;
if (id == SIZE-1) {
id = 0;
} else {
id++;
}
doc.getField("id").setValue(myID);
w.updateDocument(new Term("id", myID), doc);
if (docIter >= SIZE && random.nextInt(50) == 17) {
if (r != null) {
r.close();
}
final boolean applyDeletions = random.nextBoolean();
r = w.getReader(applyDeletions);
assertTrue("applyDeletions=" + applyDeletions + " r.numDocs()=" + r.numDocs() + " vs SIZE=" + SIZE, !applyDeletions || r.numDocs() == SIZE);
}
}
if (r != null) {
r.close();
}
w.commit();
assertEquals(SIZE, w.numDocs());
w.close();
docs.close();
dir.close();
}
}

View File

@ -71,7 +71,7 @@ public class TestThreadedOptimize extends LuceneTestCase {
}
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
//writer.setInfoStream(System.out);
writer.setInfoStream(VERBOSE ? System.out : null);
Thread[] threads = new Thread[NUM_THREADS];

View File

@ -29,6 +29,7 @@ import org.apache.lucene.index.Term;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util._TestUtil;
public class TestCachingSpanFilter extends LuceneTestCase {
@ -73,7 +74,9 @@ public class TestCachingSpanFilter extends LuceneTestCase {
docs = searcher.search(constantScore, 1);
assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
// now delete the doc, refresh the reader, and see that it's not there
// now delete the doc, refresh the reader, and see that
// it's not there
_TestUtil.keepFullyDeletedSegments(writer.w);
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);

View File

@ -22,8 +22,8 @@ import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SerialMergeScheduler;
import org.apache.lucene.index.SlowMultiReaderWrapper;
@ -32,6 +32,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.OpenBitSet;
import org.apache.lucene.util.OpenBitSetDISI;
import org.apache.lucene.util._TestUtil;
public class TestCachingWrapperFilter extends LuceneTestCase {
@ -196,6 +197,7 @@ public class TestCachingWrapperFilter extends LuceneTestCase {
assertEquals("[just filter] Should find a hit...", 1, docs.totalHits);
// now delete the doc, refresh the reader, and see that it's not there
_TestUtil.keepFullyDeletedSegments(writer.w);
writer.deleteDocuments(new Term("id", "1"));
reader = refreshReader(reader);