LUCENE-4529: fix test bug, null out the first buffer in Int/ByteBlockPool on reset

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1405425 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2012-11-03 21:40:59 +00:00
parent fb9f59e9ea
commit c870977ba2
5 changed files with 26 additions and 22 deletions

View File

@ -582,6 +582,9 @@ class DocumentsWriterPerThread {
infoStream.message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.getDelGen());
}
// TODO: we should prune the segment if it's 100%
// deleted... but merge will also catch it.
// TODO: in the NRT case it'd be better to hand
// this del vector over to the
// shortly-to-be-opened SegmentReader and let it

View File

@ -90,8 +90,8 @@ final class TermsHash extends InvertedDocConsumer {
// Clear all state
void reset() {
// we don't reuse so we drop everything and don't fill with 0
intPool.reset();
bytePool.reset();
intPool.reset(false, false);
bytePool.reset(false, false);
}
@Override

View File

@ -21,8 +21,6 @@ import java.util.Arrays;
import java.util.List;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.util.IntBlockPool.SliceReader;
import org.apache.lucene.util.IntBlockPool.SliceWriter;
import static org.apache.lucene.util.RamUsageEstimator.NUM_BYTES_OBJECT_REF;
@ -183,6 +181,7 @@ public final class ByteBlockPool {
buffer = buffers[0];
} else {
bufferUpto = -1;
buffers[0] = null;
byteUpto = BYTE_BLOCK_SIZE;
byteOffset = -BYTE_BLOCK_SIZE;
buffer = null;

View File

@ -121,22 +121,23 @@ public final class IntBlockPool {
if (bufferUpto > 0 || !reuseFirst) {
final int offset = reuseFirst ? 1 : 0;
// Recycle all but the first buffer
allocator.recycleIntBlocks(buffers, offset, 1+bufferUpto);
Arrays.fill(buffers, offset, bufferUpto+1, null);
}
if (reuseFirst) {
// Re-use the first buffer
bufferUpto = 0;
intUpto = 0;
intOffset = 0;
buffer = buffers[0];
} else {
bufferUpto = -1;
intUpto = INT_BLOCK_SIZE;
intOffset = -INT_BLOCK_SIZE;
buffer = null;
}
// Recycle all but the first buffer
allocator.recycleIntBlocks(buffers, offset, 1+bufferUpto);
Arrays.fill(buffers, offset, bufferUpto+1, null);
}
if (reuseFirst) {
// Re-use the first buffer
bufferUpto = 0;
intUpto = 0;
intOffset = 0;
buffer = buffers[0];
} else {
bufferUpto = -1;
buffers[0] = null;
intUpto = INT_BLOCK_SIZE;
intOffset = -INT_BLOCK_SIZE;
buffer = null;
}
}
}

View File

@ -114,7 +114,7 @@ public class TestStressIndexing2 extends LuceneTestCase {
Directory dir1 = newDirectory();
Directory dir2 = newDirectory();
if (VERBOSE) {
System.out.println(" nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor);
System.out.println(" nThreads=" + nThreads + " iter=" + iter + " range=" + range + " doPooling=" + doReaderPooling + " maxThreadStates=" + maxThreadStates + " sameFieldOrder=" + sameFieldOrder + " mergeFactor=" + mergeFactor + " maxBufferedDocs=" + maxBufferedDocs);
}
Map<String,Document> docs = indexRandom(nThreads, iter, range, dir1, maxThreadStates, doReaderPooling);
if (VERBOSE) {
@ -334,9 +334,10 @@ public class TestStressIndexing2 extends LuceneTestCase {
if (fields == null) {
// make sure r1 is in fact empty (eg has only all
// deleted docs):
Bits liveDocs = MultiFields.getLiveDocs(r1);
DocsEnum docs = null;
while(termsEnum.next() != null) {
docs = _TestUtil.docs(random(), termsEnum, null, docs, 0);
docs = _TestUtil.docs(random(), termsEnum, liveDocs, docs, 0);
while(docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
fail("r1 is not empty but r2 is");
}