mirror of https://github.com/apache/lucene.git
LUCENE-2329: fix bug in mem tracking caused in merging back from trunk; clean up some dead code
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@943142 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
26b9faddb2
commit
351474f2b5
|
@ -44,7 +44,7 @@ final class ByteBlockPool {
|
|||
abstract static class Allocator {
|
||||
abstract void recycleByteBlocks(byte[][] blocks, int start, int end);
|
||||
abstract void recycleByteBlocks(List<byte[]> blocks);
|
||||
abstract byte[] getByteBlock(boolean trackAllocations);
|
||||
abstract byte[] getByteBlock();
|
||||
}
|
||||
|
||||
public byte[][] buffers = new byte[10][];
|
||||
|
@ -55,12 +55,10 @@ final class ByteBlockPool {
|
|||
public byte[] buffer; // Current head buffer
|
||||
public int byteOffset = -DocumentsWriter.BYTE_BLOCK_SIZE; // Current head offset
|
||||
|
||||
private final boolean trackAllocations;
|
||||
private final Allocator allocator;
|
||||
|
||||
public ByteBlockPool(Allocator allocator, boolean trackAllocations) {
|
||||
public ByteBlockPool(Allocator allocator) {
|
||||
this.allocator = allocator;
|
||||
this.trackAllocations = trackAllocations;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
|
@ -93,7 +91,7 @@ final class ByteBlockPool {
|
|||
System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
|
||||
buffers = newBuffers;
|
||||
}
|
||||
buffer = buffers[1+bufferUpto] = allocator.getByteBlock(trackAllocations);
|
||||
buffer = buffers[1+bufferUpto] = allocator.getByteBlock();
|
||||
bufferUpto++;
|
||||
|
||||
byteUpto = 0;
|
||||
|
|
|
@ -196,7 +196,7 @@ final class DocumentsWriter {
|
|||
*/
|
||||
protected byte[] newBuffer(int size) {
|
||||
assert size == PER_DOC_BLOCK_SIZE;
|
||||
return perDocAllocator.getByteBlock(false);
|
||||
return perDocAllocator.getByteBlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1300,18 +1300,12 @@ final class DocumentsWriter {
|
|||
|
||||
/* Allocate another byte[] from the shared pool */
|
||||
@Override
|
||||
byte[] getByteBlock(boolean trackAllocations) {
|
||||
byte[] getByteBlock() {
|
||||
synchronized(DocumentsWriter.this) {
|
||||
final int size = freeByteBlocks.size();
|
||||
final byte[] b;
|
||||
if (0 == size) {
|
||||
b = new byte[blockSize];
|
||||
// Always record a block allocated, even if
|
||||
// trackAllocations is false. This is necessary
|
||||
// because this block will be shared between
|
||||
// things that don't track allocations (term
|
||||
// vectors) and things that do (freq/prox
|
||||
// postings).
|
||||
numBytesUsed += blockSize;
|
||||
} else
|
||||
b = freeByteBlocks.remove(size-1);
|
||||
|
@ -1347,17 +1341,11 @@ final class DocumentsWriter {
|
|||
private ArrayList<int[]> freeIntBlocks = new ArrayList<int[]>();
|
||||
|
||||
/* Allocate another int[] from the shared pool */
|
||||
synchronized int[] getIntBlock(boolean trackAllocations) {
|
||||
synchronized int[] getIntBlock() {
|
||||
final int size = freeIntBlocks.size();
|
||||
final int[] b;
|
||||
if (0 == size) {
|
||||
b = new int[INT_BLOCK_SIZE];
|
||||
// Always record a block allocated, even if
|
||||
// trackAllocations is false. This is necessary
|
||||
// because this block will be shared between
|
||||
// things that don't track allocations (term
|
||||
// vectors) and things that do (freq/prox
|
||||
// postings).
|
||||
numBytesUsed += INT_BLOCK_SIZE*INT_NUM_BYTE;
|
||||
} else
|
||||
b = freeIntBlocks.remove(size-1);
|
||||
|
|
|
@ -28,11 +28,9 @@ final class IntBlockPool {
|
|||
public int intOffset = -DocumentsWriter.INT_BLOCK_SIZE; // Current head offset
|
||||
|
||||
final private DocumentsWriter docWriter;
|
||||
final boolean trackAllocations;
|
||||
|
||||
public IntBlockPool(DocumentsWriter docWriter, boolean trackAllocations) {
|
||||
public IntBlockPool(DocumentsWriter docWriter) {
|
||||
this.docWriter = docWriter;
|
||||
this.trackAllocations = trackAllocations;
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
|
@ -55,7 +53,7 @@ final class IntBlockPool {
|
|||
System.arraycopy(buffers, 0, newBuffers, 0, buffers.length);
|
||||
buffers = newBuffers;
|
||||
}
|
||||
buffer = buffers[1+bufferUpto] = docWriter.getIntBlock(trackAllocations);
|
||||
buffer = buffers[1+bufferUpto] = docWriter.getIntBlock();
|
||||
bufferUpto++;
|
||||
|
||||
intUpto = 0;
|
||||
|
|
|
@ -50,19 +50,6 @@ class ParallelPostingsArray {
|
|||
return newArray;
|
||||
}
|
||||
|
||||
final ParallelPostingsArray shrink(int targetSize, boolean doCopy) {
|
||||
int shrinkSize = ArrayUtil.getShrinkSize(size, targetSize, bytesPerPosting());
|
||||
if (shrinkSize != size) {
|
||||
ParallelPostingsArray newArray = newInstance(targetSize);
|
||||
if (doCopy) {
|
||||
copyTo(newArray, targetSize);
|
||||
}
|
||||
return newArray;
|
||||
} else {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
void copyTo(ParallelPostingsArray toArray, int numToCopy) {
|
||||
System.arraycopy(textStarts, 0, toArray.textStarts, 0, numToCopy);
|
||||
System.arraycopy(intStarts, 0, toArray.intStarts, 0, numToCopy);
|
||||
|
|
|
@ -72,7 +72,6 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
|
|||
fieldState = docInverterPerField.fieldState;
|
||||
this.consumer = perThread.consumer.addField(this, fieldInfo);
|
||||
initPostingsArray();
|
||||
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
|
||||
|
||||
streamCount = consumer.getStreamCount();
|
||||
numPostingInt = 2*streamCount;
|
||||
|
@ -86,6 +85,7 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
|
|||
|
||||
private void initPostingsArray() {
|
||||
postingsArray = consumer.createPostingsArray(2);
|
||||
bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
|
||||
}
|
||||
|
||||
// sugar: just forwards to DW
|
||||
|
|
|
@ -66,8 +66,8 @@ final class TermsHashPerThread extends InvertedDocConsumerPerThread {
|
|||
this.termsHash = termsHash;
|
||||
this.consumer = termsHash.consumer.addThread(this);
|
||||
|
||||
intPool = new IntBlockPool(termsHash.docWriter, termsHash.trackAllocations);
|
||||
bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator, termsHash.trackAllocations);
|
||||
intPool = new IntBlockPool(termsHash.docWriter);
|
||||
bytePool = new ByteBlockPool(termsHash.docWriter.byteBlockAllocator);
|
||||
|
||||
if (nextTermsHash != null) {
|
||||
// We are primary
|
||||
|
|
|
@ -27,7 +27,7 @@ public class TestByteSlices extends LuceneTestCase {
|
|||
|
||||
/* Allocate another byte[] from the shared pool */
|
||||
@Override
|
||||
synchronized byte[] getByteBlock(boolean trackAllocations) {
|
||||
synchronized byte[] getByteBlock() {
|
||||
final int size = freeByteBlocks.size();
|
||||
final byte[] b;
|
||||
if (0 == size)
|
||||
|
@ -53,7 +53,7 @@ public class TestByteSlices extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testBasic() throws Throwable {
|
||||
ByteBlockPool pool = new ByteBlockPool(new ByteBlockAllocator(), false);
|
||||
ByteBlockPool pool = new ByteBlockPool(new ByteBlockAllocator());
|
||||
|
||||
final int NUM_STREAM = 25;
|
||||
|
||||
|
|
Loading…
Reference in New Issue