mirror of https://github.com/apache/lucene.git
LUCENE-4529: fix test bug, null out the first buffer in Int/ByteBlockPool on reset
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1405425 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
fb9f59e9ea
commit
c870977ba2
|
@ -582,6 +582,9 @@ class DocumentsWriterPerThread {
|
||||||
infoStream.message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.getDelGen());
|
infoStream.message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.getDelGen());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: we should prune the segment if it's 100%
|
||||||
|
// deleted... but merge will also catch it.
|
||||||
|
|
||||||
// TODO: in the NRT case it'd be better to hand
|
// TODO: in the NRT case it'd be better to hand
|
||||||
// this del vector over to the
|
// this del vector over to the
|
||||||
// shortly-to-be-opened SegmentReader and let it
|
// shortly-to-be-opened SegmentReader and let it
|
||||||
|
|
|
@ -90,8 +90,8 @@ final class TermsHash extends InvertedDocConsumer {
|
||||||
// Clear all state
|
// Clear all state
|
||||||
void reset() {
|
void reset() {
|
||||||
// we don't reuse so we drop everything and don't fill with 0
|
// we don't reuse so we drop everything and don't fill with 0
|
||||||
intPool.reset();
|
intPool.reset(false, false);
|
||||||
bytePool.reset();
|
bytePool.reset(false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,8 +21,6 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.lucene.store.DataOutput;
|
import org.apache.lucene.store.DataOutput;
|
||||||
import org.apache.lucene.util.IntBlockPool.SliceReader;
|
|
||||||
import org.apache.lucene.util.IntBlockPool.SliceWriter;
|
|
||||||
|
|
||||||
import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
|
||||||
|
|
||||||
|
@ -183,6 +181,7 @@ public final class ByteBlockPool {
|
||||||
buffer = buffers[0];
|
buffer = buffers[0];
|
||||||
} else {
|
} else {
|
||||||
bufferUpto = -1;
|
bufferUpto = -1;
|
||||||
|
buffers[0] = null;
|
||||||
byteUpto = BYTE_BLOCK_SIZE;
|
byteUpto = BYTE_BLOCK_SIZE;
|
||||||
byteOffset = -BYTE_BLOCK_SIZE;
|
byteOffset = -BYTE_BLOCK_SIZE;
|
||||||
buffer = null;
|
buffer = null;
|
||||||
|
|
|
@ -133,6 +133,7 @@ public final class IntBlockPool {
|
||||||
buffer = buffers[0];
|
buffer = buffers[0];
|
||||||
} else {
|
} else {
|
||||||
bufferUpto = -1;
|
bufferUpto = -1;
|
||||||
|
buffers[0] = null;
|
||||||
intUpto = INT_BLOCK_SIZE;
|
intUpto = INT_BLOCK_SIZE;
|
||||||
intOffset = -INT_BLOCK_SIZE;
|
intOffset = -INT_BLOCK_SIZE;
|
||||||
buffer = null;
|
buffer = null;
|
||||||
|
|
|
@ -114,7 +114,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
Directory dir1 = newDirectory();
|
Directory dir1 = newDirectory();
|
||||||
Directory dir2 = newDirectory();
|
Directory dir2 = newDirectory();
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
System.out.println(" nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
|
System.out.println(" nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor + " maxBufferedDocs=" + maxBufferedDocs);
|
||||||
}
|
}
|
||||||
Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
|
Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
|
||||||
if (VERBOSE) {
|
if (VERBOSE) {
|
||||||
|
@ -334,9 +334,10 @@ public class TestStressIndexing2 extends LuceneTestCase {
|
||||||
if (fields == null) {
|
if (fields == null) {
|
||||||
// make sure r1 is in fact empty (eg has only all
|
// make sure r1 is in fact empty (eg has only all
|
||||||
// deleted docs):
|
// deleted docs):
|
||||||
|
Bits liveDocs = MultiFields.getLiveDocs(r1);
|
||||||
DocsEnum docs = null;
|
DocsEnum docs = null;
|
||||||
while(termsEnum.next() != null) {
|
while(termsEnum.next() != null) {
|
||||||
docs = _TestUtil.docs(random(), termsEnum, null, docs, 0);
|
docs = _TestUtil.docs(random(), termsEnum, liveDocs, docs, 0);
|
||||||
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||||
fail("r1 is not empty but r2 is");
|
fail("r1 is not empty but r2 is");
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue