mirror of https://github.com/apache/lucene.git
LUCENE-3892: Backport of Mike's last changes + removal of unnecessary casts.
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/pforcodec_3892@1370850 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
c8fb5b38d5
commit
207c0c6d13
|
@ -328,6 +328,10 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
// no skip data for this term):
|
||||
private int skipOffset;
|
||||
|
||||
// docID for next skip point, we won't use skipper if
|
||||
// target docID is not larger than this
|
||||
private int nextSkipDoc;
|
||||
|
||||
private Bits liveDocs;
|
||||
|
||||
public BlockDocsEnum(FieldInfo fieldInfo) throws IOException {
|
||||
|
@ -363,6 +367,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
}
|
||||
accum = 0;
|
||||
docUpto = 0;
|
||||
nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block
|
||||
docBufferUpto = BLOCK_SIZE;
|
||||
skipped = false;
|
||||
return this;
|
||||
|
@ -434,7 +439,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
|
||||
if (liveDocs == null || liveDocs.get(accum)) {
|
||||
doc = accum;
|
||||
freq = (int) freqBuffer[docBufferUpto];
|
||||
freq = freqBuffer[docBufferUpto];
|
||||
docBufferUpto++;
|
||||
if (DEBUG) {
|
||||
System.out.println(" return doc=" + doc + " freq=" + freq);
|
||||
|
@ -451,10 +456,13 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
// nocommit make frq block load lazy/skippable
|
||||
if (DEBUG) {
|
||||
System.out.println(" FPR.advance target=" + target);
|
||||
}
|
||||
|
||||
// nocommit use skipper!!! it has next last doc id!!
|
||||
|
||||
if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
|
||||
// current skip docID < docIDs generated from current buffer <= next skip docID
|
||||
// we don't need to skip if target is buffered already
|
||||
if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("load skipper");
|
||||
|
@ -493,6 +501,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
accum = skipper.getDoc(); // actually, this is just lastSkipEntry
|
||||
docIn.seek(skipper.getDocPointer()); // now point to the block we want to search
|
||||
}
|
||||
nextSkipDoc = skipper.getNextSkipDoc();
|
||||
}
|
||||
|
||||
// Now scan... this is an inlined/pared down version
|
||||
|
@ -526,7 +535,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
if (DEBUG) {
|
||||
System.out.println(" return doc=" + accum);
|
||||
}
|
||||
freq = (int) freqBuffer[docBufferUpto];
|
||||
freq = freqBuffer[docBufferUpto];
|
||||
docBufferUpto++;
|
||||
return doc = accum;
|
||||
} else {
|
||||
|
@ -597,6 +606,8 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
// no skip data for this term):
|
||||
private int skipOffset;
|
||||
|
||||
private int nextSkipDoc;
|
||||
|
||||
private Bits liveDocs;
|
||||
|
||||
public BlockDocsAndPositionsEnum(FieldInfo fieldInfo) throws IOException {
|
||||
|
@ -638,6 +649,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
doc = -1;
|
||||
accum = 0;
|
||||
docUpto = 0;
|
||||
nextSkipDoc = BLOCK_SIZE - 1;
|
||||
docBufferUpto = BLOCK_SIZE;
|
||||
skipped = false;
|
||||
return this;
|
||||
|
@ -730,8 +742,8 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
if (DEBUG) {
|
||||
System.out.println(" accum=" + accum + " docDeltaBuffer[" + docBufferUpto + "]=" + docDeltaBuffer[docBufferUpto]);
|
||||
}
|
||||
accum += (int) docDeltaBuffer[docBufferUpto];
|
||||
freq = (int) freqBuffer[docBufferUpto];
|
||||
accum += docDeltaBuffer[docBufferUpto];
|
||||
freq = freqBuffer[docBufferUpto];
|
||||
posPendingCount += freq;
|
||||
docBufferUpto++;
|
||||
docUpto++;
|
||||
|
@ -757,11 +769,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
System.out.println(" FPR.advance target=" + target);
|
||||
}
|
||||
|
||||
// nocommit 2 is heuristic guess!!
|
||||
// nocommit put cheating back! does it help?
|
||||
// nocommit use skipper!!! it has next last doc id!!
|
||||
//if (docFreq > blockSize && target - (blockSize - docBufferUpto) - 2*blockSize > accum) {
|
||||
if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
|
||||
if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {
|
||||
if (DEBUG) {
|
||||
System.out.println(" try skipper");
|
||||
}
|
||||
|
@ -807,6 +815,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
posPendingFP = skipper.getPosPointer();
|
||||
posPendingCount = skipper.getPosBufferUpto();
|
||||
}
|
||||
nextSkipDoc = skipper.getNextSkipDoc();
|
||||
}
|
||||
|
||||
// Now scan... this is an inlined/pared down version
|
||||
|
@ -829,7 +838,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
refillDocs();
|
||||
}
|
||||
accum += docDeltaBuffer[docBufferUpto];
|
||||
freq = (int) freqBuffer[docBufferUpto];
|
||||
freq = freqBuffer[docBufferUpto];
|
||||
posPendingCount += freq;
|
||||
docBufferUpto++;
|
||||
docUpto++;
|
||||
|
@ -915,7 +924,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
refillPositions();
|
||||
posBufferUpto = 0;
|
||||
}
|
||||
position += (int) posDeltaBuffer[posBufferUpto++];
|
||||
position += posDeltaBuffer[posBufferUpto++];
|
||||
posPendingCount--;
|
||||
if (DEBUG) {
|
||||
System.out.println(" return pos=" + position);
|
||||
|
@ -1020,6 +1029,8 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
// no skip data for this term):
|
||||
private int skipOffset;
|
||||
|
||||
private int nextSkipDoc;
|
||||
|
||||
private Bits liveDocs;
|
||||
|
||||
public EverythingEnum(FieldInfo fieldInfo) throws IOException {
|
||||
|
@ -1082,6 +1093,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
doc = -1;
|
||||
accum = 0;
|
||||
docUpto = 0;
|
||||
nextSkipDoc = BLOCK_SIZE - 1;
|
||||
docBufferUpto = BLOCK_SIZE;
|
||||
skipped = false;
|
||||
return this;
|
||||
|
@ -1222,7 +1234,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
System.out.println(" accum=" + accum + " docDeltaBuffer[" + docBufferUpto + "]=" + docDeltaBuffer[docBufferUpto]);
|
||||
}
|
||||
accum += docDeltaBuffer[docBufferUpto];
|
||||
freq = (int) freqBuffer[docBufferUpto];
|
||||
freq = freqBuffer[docBufferUpto];
|
||||
posPendingCount += freq;
|
||||
docBufferUpto++;
|
||||
docUpto++;
|
||||
|
@ -1251,11 +1263,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
System.out.println(" FPR.advance target=" + target);
|
||||
}
|
||||
|
||||
// nocommit 2 is heuristic guess!!
|
||||
// nocommit put cheating back! does it help?
|
||||
// nocommit use skipper!!! it has next last doc id!!
|
||||
//if (docFreq > blockSize && target - (blockSize - docBufferUpto) - 2*blockSize > accum) {
|
||||
if (docFreq > BLOCK_SIZE && target - accum > BLOCK_SIZE) {
|
||||
if (docFreq > BLOCK_SIZE && target > nextSkipDoc) {
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println(" try skipper");
|
||||
|
@ -1305,6 +1313,7 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
lastStartOffset = skipper.getStartOffset();
|
||||
payloadByteUpto = skipper.getPayloadByteUpto();
|
||||
}
|
||||
nextSkipDoc = skipper.getNextSkipDoc();
|
||||
}
|
||||
|
||||
// nocommit inline nextDoc here
|
||||
|
@ -1451,12 +1460,12 @@ public final class BlockPackedPostingsReader extends PostingsReaderBase {
|
|||
position += posDeltaBuffer[posBufferUpto];
|
||||
|
||||
if (indexHasPayloads) {
|
||||
payloadLength = (int) payloadLengthBuffer[posBufferUpto];
|
||||
payloadLength = payloadLengthBuffer[posBufferUpto];
|
||||
}
|
||||
|
||||
if (indexHasOffsets) {
|
||||
startOffset = lastStartOffset + (int) offsetStartDeltaBuffer[posBufferUpto];
|
||||
endOffset = startOffset + (int) offsetLengthBuffer[posBufferUpto];
|
||||
startOffset = lastStartOffset + offsetStartDeltaBuffer[posBufferUpto];
|
||||
endOffset = startOffset + offsetLengthBuffer[posBufferUpto];
|
||||
lastStartOffset = startOffset;
|
||||
}
|
||||
|
||||
|
|
|
@ -170,6 +170,7 @@ public final class BlockPackedPostingsWriter extends PostingsWriterBase {
|
|||
docDeltaBuffer = new int[MIN_DATA_SIZE];
|
||||
freqBuffer = new int[MIN_DATA_SIZE];
|
||||
|
||||
// nocommit should we try skipping every 2/4 blocks...?
|
||||
skipWriter = new BlockPackedSkipWriter(maxSkipLevels,
|
||||
BlockPackedPostingsFormat.BLOCK_SIZE,
|
||||
state.segmentInfo.getDocCount(),
|
||||
|
@ -387,8 +388,8 @@ public final class BlockPackedPostingsWriter extends PostingsWriterBase {
|
|||
|
||||
// vInt encode the remaining doc deltas and freqs:
|
||||
for(int i=0;i<docBufferUpto;i++) {
|
||||
final int docDelta = (int) docDeltaBuffer[i];
|
||||
final int freq = (int) freqBuffer[i];
|
||||
final int docDelta = docDeltaBuffer[i];
|
||||
final int freq = freqBuffer[i];
|
||||
if (!fieldHasFreqs) {
|
||||
docOut.writeVInt(docDelta);
|
||||
} else if (freqBuffer[i] == 1) {
|
||||
|
@ -426,9 +427,9 @@ public final class BlockPackedPostingsWriter extends PostingsWriterBase {
|
|||
int lastPayloadLength = -1;
|
||||
int payloadBytesReadUpto = 0;
|
||||
for(int i=0;i<posBufferUpto;i++) {
|
||||
final int posDelta = (int) posDeltaBuffer[i];
|
||||
final int posDelta = posDeltaBuffer[i];
|
||||
if (fieldHasPayloads) {
|
||||
final int payloadLength = (int) payloadLengthBuffer[i];
|
||||
final int payloadLength = payloadLengthBuffer[i];
|
||||
if (payloadLength != lastPayloadLength) {
|
||||
lastPayloadLength = payloadLength;
|
||||
posOut.writeVInt((posDelta<<1)|1);
|
||||
|
@ -456,8 +457,8 @@ public final class BlockPackedPostingsWriter extends PostingsWriterBase {
|
|||
if (DEBUG) {
|
||||
System.out.println(" write offset @ pos.fp=" + posOut.getFilePointer());
|
||||
}
|
||||
posOut.writeVInt((int) offsetStartDeltaBuffer[i]);
|
||||
posOut.writeVInt((int) offsetLengthBuffer[i]);
|
||||
posOut.writeVInt(offsetStartDeltaBuffer[i]);
|
||||
posOut.writeVInt(offsetLengthBuffer[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -475,7 +476,7 @@ public final class BlockPackedPostingsWriter extends PostingsWriterBase {
|
|||
|
||||
int skipOffset;
|
||||
if (docCount > BLOCK_SIZE) {
|
||||
skipOffset = (int) (skipWriter.writeSkip(docOut)-docTermStartFP);
|
||||
skipOffset = (int) (skipWriter.writeSkip(docOut) - docTermStartFP);
|
||||
|
||||
if (DEBUG) {
|
||||
System.out.println("skip packet " + (docOut.getFilePointer() - (docTermStartFP + skipOffset)) + " bytes");
|
||||
|
|
|
@ -151,6 +151,10 @@ final class BlockPackedSkipReader extends MultiLevelSkipListReader {
|
|||
return lastPayloadByteUpto;
|
||||
}
|
||||
|
||||
public int getNextSkipDoc() {
|
||||
return skipDoc[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void seekChild(int level) throws IOException {
|
||||
super.seekChild(level);
|
||||
|
|
|
@ -147,7 +147,7 @@ final class ForUtil {
|
|||
void writeBlock(int[] data, byte[] encoded, IndexOutput out) throws IOException {
|
||||
if (isAllEqual(data)) {
|
||||
out.writeVInt(ALL_VALUES_EQUAL);
|
||||
out.writeInt((int) data[0]);
|
||||
out.writeInt(data[0]);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue