LUCENE-3069: remove impersonate codes, fix typo

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene3069@1520592 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Han Jiang 2013-09-06 15:12:24 +00:00
parent 0a8fc36864
commit 6cfd9d7b79
5 changed files with 13 additions and 50 deletions

View File

@ -94,7 +94,7 @@ import org.apache.lucene.codecs.CodecUtil;
* per-field data like number of documents in current field). For each field, there are four blocks:
* <ul>
* <li>statistics bytes block: contains term statistics; </li>
* <li>metadata longs block: delta-encodes monotonical part of metadata; </li>
* <li>metadata longs block: delta-encodes monotonic part of metadata; </li>
* <li>metadata bytes block: encodes other parts of metadata; </li>
* <li>skip block: contains skip data, to speed up metadata seeking and decoding</li>
* </ul>
@ -126,7 +126,7 @@ import org.apache.lucene.codecs.CodecUtil;
* <li>
* The format of PostingsHeader and MetaBytes are customized by the specific postings implementation:
* they contain arbitrary per-file data (such as parameters or versioning information), and per-term data
* (non-monotonical ones like pulsed postings data).
* (non-monotonic ones like pulsed postings data).
* </li>
* <li>
* During initialization the reader will load all the blocks into memory. SkipBlock will be decoded, so that during seek

View File

@ -78,7 +78,7 @@ import org.apache.lucene.codecs.CodecUtil;
* with the corresponding term. This part is used by FST to share outputs between arcs.
* </li>
* <li>
* Generic byte array: Used to store non-monotonical metadata.
* Generic byte array: Used to store non-monotonic metadata.
* </li>
* </ul>
* </p>
@ -103,14 +103,14 @@ import org.apache.lucene.codecs.CodecUtil;
* <li>
* The format of PostingsHeader and generic meta bytes are customized by the specific postings implementation:
* they contain arbitrary per-file data (such as parameters or versioning information), and per-term data
* (non-monotonical ones like pulsed postings data).
* (non-monotonic ones like pulsed postings data).
* </li>
* <li>
* The format of TermData is determined by FST, typically monotonical metadata will be dense around shallow arcs,
* The format of TermData is determined by FST, typically monotonic metadata will be dense around shallow arcs,
* while in deeper arcs only generic bytes and term statistics exist.
* </li>
* <li>
* The byte Flag is used to indicate which part of metadata exists on current arc. Specially the monotonical part
* The byte Flag is used to indicate which part of metadata exists on current arc. Specially the monotonic part
* is omitted when it is an array of 0s.
* </li>
* <li>

View File

@ -616,7 +616,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
final BlockTermState termState;
// metadata buffer, holding monotonical values
// metadata buffer, holding monotonic values
public long[] longs;
// metadata buffer, holding general values
public byte[] bytes;
@ -2314,7 +2314,7 @@ public class BlockTreeTermsReader extends FieldsProducer {
final BlockTermState state;
// metadata buffer, holding monotonical values
// metadata buffer, holding monotonic values
public long[] longs;
// metadata buffer, holding general values
public byte[] bytes;

View File

@ -195,7 +195,7 @@ public final class Lucene41PostingsReader extends PostingsReaderBase {
termState.posStartFP = 0;
termState.payStartFP = 0;
}
if (version < Lucene41PostingsWriter.VERSION_META_ARRAY) { // impersonation
if (version < Lucene41PostingsWriter.VERSION_META_ARRAY) { // backward compatibility
_decodeTerm(in, fieldInfo, termState);
return;
}

View File

@ -67,13 +67,13 @@ public final class Lucene41PostingsWriter extends PostingsWriterBase {
// Increment version to change it
final static int VERSION_START = 0;
final static int VERSION_META_ARRAY = 1;
//final static int VERSION_CURRENT = VERSION_START;
final static int VERSION_CURRENT = VERSION_META_ARRAY;
final IndexOutput docOut;
final IndexOutput posOut;
final IndexOutput payOut;
final static IntBlockTermState emptyState = new IntBlockTermState();
IntBlockTermState lastState;
// How current field indexes postings:
@ -223,10 +223,7 @@ public final class Lucene41PostingsWriter extends PostingsWriterBase {
fieldHasOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
fieldHasPayloads = fieldInfo.hasPayloads();
skipWriter.setField(fieldHasPositions, fieldHasOffsets, fieldHasPayloads);
lastState = newTermState();
if (VERSION_CURRENT < VERSION_META_ARRAY) {
return 0;
}
lastState = emptyState;
if (fieldHasPositions) {
if (fieldHasPayloads || fieldHasOffsets) {
return 3; // doc + pos + pay FP
@ -519,16 +516,12 @@ public final class Lucene41PostingsWriter extends PostingsWriterBase {
// System.out.println(" no skip: docCount=" + docCount);
// }
}
if (VERSION_CURRENT >= VERSION_META_ARRAY || state.totalTermFreq >= BLOCK_SIZE) {
state.payTermStartFP = payTermStartFP;
} else {
state.payTermStartFP = -1;
}
// if (DEBUG) {
// System.out.println(" payStartFP=" + payStartFP);
// }
state.docTermStartFP = docTermStartFP;
state.posTermStartFP = posTermStartFP;
state.payTermStartFP = payTermStartFP;
state.singletonDocID = singletonDocID;
state.skipOffset = skipOffset;
state.lastPosBlockOffset = lastPosBlockOffset;
@ -542,11 +535,7 @@ public final class Lucene41PostingsWriter extends PostingsWriterBase {
public void encodeTerm(long[] longs, DataOutput out, FieldInfo fieldInfo, BlockTermState _state, boolean absolute) throws IOException {
IntBlockTermState state = (IntBlockTermState)_state;
if (absolute) {
lastState = newTermState();
}
if (VERSION_CURRENT < VERSION_META_ARRAY) { // impersonation
_encodeTerm(out, fieldInfo, state);
return;
lastState = emptyState;
}
longs[0] = state.docTermStartFP - lastState.docTermStartFP;
if (fieldHasPositions) {
@ -566,35 +555,9 @@ public final class Lucene41PostingsWriter extends PostingsWriterBase {
if (state.skipOffset != -1) {
out.writeVLong(state.skipOffset);
}
if (state.payTermStartFP == -1) {
state.payTermStartFP = lastState.payTermStartFP;
}
lastState = state;
}
private void _encodeTerm(DataOutput out, FieldInfo fieldInfo, IntBlockTermState state) throws IOException {
if (state.singletonDocID == -1) {
out.writeVLong(state.docTermStartFP - lastState.docTermStartFP);
lastState.docTermStartFP = state.docTermStartFP;
} else {
out.writeVInt(state.singletonDocID);
}
if (fieldHasPositions) {
out.writeVLong(state.posTermStartFP - lastState.posTermStartFP);
lastState.posTermStartFP = state.posTermStartFP;
if (state.lastPosBlockOffset != -1) {
out.writeVLong(state.lastPosBlockOffset);
}
if ((fieldHasPayloads || fieldHasOffsets) && state.payTermStartFP != -1) {
out.writeVLong(state.payTermStartFP - lastState.payTermStartFP);
lastState.payTermStartFP = state.payTermStartFP;
}
}
if (state.skipOffset != -1) {
out.writeVLong(state.skipOffset);
}
}
@Override
public void close() throws IOException {
IOUtils.close(docOut, posOut, payOut);