mirror of https://github.com/apache/lucene.git
symmetric change on PostingsReaderBase
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3069@1515387 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
cb0d85ba03
commit
c23230e640
|
@ -831,12 +831,11 @@ public class TempBlockTermsReader extends FieldsProducer {
|
|||
|
||||
// lazily catch up on metadata decode:
|
||||
final int limit = state.termBlockOrd;
|
||||
boolean absolute = metaDataUpto == 0;
|
||||
// We must set/incr state.termCount because
|
||||
// postings impl can look at this
|
||||
state.termBlockOrd = metaDataUpto;
|
||||
if (metaDataUpto == 0) {
|
||||
Arrays.fill(longs, 0);
|
||||
}
|
||||
|
||||
// TODO: better API would be "jump straight to term=N"???
|
||||
while (metaDataUpto < limit) {
|
||||
//System.out.println(" decode mdUpto=" + metaDataUpto);
|
||||
|
@ -858,11 +857,12 @@ public class TempBlockTermsReader extends FieldsProducer {
|
|||
}
|
||||
// metadata
|
||||
for (int i = 0; i < longs.length; i++) {
|
||||
longs[i] += bytesReader.readVLong();
|
||||
longs[i] = bytesReader.readVLong();
|
||||
}
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, state);
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, state, absolute);
|
||||
metaDataUpto++;
|
||||
state.termBlockOrd++;
|
||||
absolute = false;
|
||||
}
|
||||
} else {
|
||||
//System.out.println(" skip! seekPending");
|
||||
|
|
|
@ -56,7 +56,7 @@ public abstract class TempPostingsReaderBase implements Closeable {
|
|||
public abstract BlockTermState newTermState() throws IOException;
|
||||
|
||||
/** Actually decode metadata for next term */
|
||||
public abstract void decodeTerm(long[] longs, DataInput in, FieldInfo fieldInfo, BlockTermState state) throws IOException;
|
||||
public abstract void decodeTerm(long[] longs, DataInput in, FieldInfo fieldInfo, BlockTermState state, boolean absolute) throws IOException;
|
||||
|
||||
/** Must fully consume state, since after this call that
|
||||
* TermState may be reused. */
|
||||
|
|
|
@ -799,13 +799,9 @@ public class TempBlockTreeTermsReader extends FieldsProducer {
|
|||
|
||||
// lazily catch up on metadata decode:
|
||||
final int limit = getTermBlockOrd();
|
||||
boolean absolute = metaDataUpto == 0;
|
||||
assert limit > 0;
|
||||
|
||||
if (metaDataUpto == 0) {
|
||||
Arrays.fill(longs, 0);
|
||||
}
|
||||
final int longSize = longs.length;
|
||||
|
||||
// TODO: better API would be "jump straight to term=N"???
|
||||
while (metaDataUpto < limit) {
|
||||
|
||||
|
@ -824,12 +820,13 @@ public class TempBlockTreeTermsReader extends FieldsProducer {
|
|||
termState.totalTermFreq = termState.docFreq + statsReader.readVLong();
|
||||
}
|
||||
// metadata
|
||||
for (int i = 0; i < longSize; i++) {
|
||||
longs[i] += bytesReader.readVLong();
|
||||
for (int i = 0; i < longsSize; i++) {
|
||||
longs[i] = bytesReader.readVLong();
|
||||
}
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, termState);
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, termState, absolute);
|
||||
|
||||
metaDataUpto++;
|
||||
absolute = false;
|
||||
}
|
||||
termState.termBlockOrd = metaDataUpto;
|
||||
}
|
||||
|
@ -2623,13 +2620,9 @@ public class TempBlockTreeTermsReader extends FieldsProducer {
|
|||
|
||||
// lazily catch up on metadata decode:
|
||||
final int limit = getTermBlockOrd();
|
||||
boolean absolute = metaDataUpto == 0;
|
||||
assert limit > 0;
|
||||
|
||||
if (metaDataUpto == 0) {
|
||||
Arrays.fill(longs, 0);
|
||||
}
|
||||
final int longSize = longs.length;
|
||||
|
||||
// TODO: better API would be "jump straight to term=N"???
|
||||
while (metaDataUpto < limit) {
|
||||
|
||||
|
@ -2648,12 +2641,13 @@ public class TempBlockTreeTermsReader extends FieldsProducer {
|
|||
state.totalTermFreq = state.docFreq + statsReader.readVLong();
|
||||
}
|
||||
// metadata
|
||||
for (int i = 0; i < longSize; i++) {
|
||||
longs[i] += bytesReader.readVLong();
|
||||
for (int i = 0; i < longsSize; i++) {
|
||||
longs[i] = bytesReader.readVLong();
|
||||
}
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, state);
|
||||
postingsReader.decodeTerm(longs, bytesReader, fieldInfo, state, absolute);
|
||||
|
||||
metaDataUpto++;
|
||||
absolute = false;
|
||||
}
|
||||
state.termBlockOrd = metaDataUpto;
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ public class TempFSTOrdTermsReader extends FieldsProducer {
|
|||
refillMetadata();
|
||||
}
|
||||
metaBytesReader.reset(metaBytesBlock, bytesStart[upto], bytesLength[upto]);
|
||||
postingsReader.decodeTerm(longs[upto], metaBytesReader, fieldInfo, state);
|
||||
postingsReader.decodeTerm(longs[upto], metaBytesReader, fieldInfo, state, true);
|
||||
}
|
||||
|
||||
/** Load current stats shard */
|
||||
|
|
|
@ -326,7 +326,7 @@ public class TempFSTTermsReader extends FieldsProducer {
|
|||
if (meta.bytes != null) {
|
||||
bytesReader.reset(meta.bytes, 0, meta.bytes.length);
|
||||
}
|
||||
postingsReader.decodeTerm(meta.longs, bytesReader, fieldInfo, state);
|
||||
postingsReader.decodeTerm(meta.longs, bytesReader, fieldInfo, state, true);
|
||||
decoded = true;
|
||||
}
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ public class TempFSTTermsReader extends FieldsProducer {
|
|||
if (meta.bytes != null) {
|
||||
bytesReader.reset(meta.bytes, 0, meta.bytes.length);
|
||||
}
|
||||
postingsReader.decodeTerm(meta.longs, bytesReader, fieldInfo, state);
|
||||
postingsReader.decodeTerm(meta.longs, bytesReader, fieldInfo, state, true);
|
||||
decoded = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -191,18 +191,23 @@ public final class TempPostingsReader extends TempPostingsReaderBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void decodeTerm(long[] longs, DataInput in, FieldInfo fieldInfo, BlockTermState _termState)
|
||||
public void decodeTerm(long[] longs, DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute)
|
||||
throws IOException {
|
||||
final IntBlockTermState termState = (IntBlockTermState) _termState;
|
||||
final boolean fieldHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
|
||||
final boolean fieldHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
|
||||
final boolean fieldHasPayloads = fieldInfo.hasPayloads();
|
||||
|
||||
termState.docStartFP = longs[0];
|
||||
|
||||
if (absolute) {
|
||||
termState.docStartFP = 0;
|
||||
termState.posStartFP = 0;
|
||||
termState.payStartFP = 0;
|
||||
}
|
||||
termState.docStartFP += longs[0];
|
||||
if (fieldHasPositions) {
|
||||
termState.posStartFP = longs[1];
|
||||
termState.posStartFP += longs[1];
|
||||
if (fieldHasOffsets || fieldHasPayloads) {
|
||||
termState.payStartFP = longs[2];
|
||||
termState.payStartFP += longs[2];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue