mirror of https://github.com/apache/lucene.git
LUCENE-3613: move codecs.DefaultXXXX into lucene40, TODO: split out 3.x stuff
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1209625 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7f0e46b7f2
commit
8b8c24d8e7
|
@ -18,10 +18,6 @@ package org.apache.lucene.index.codecs.appending;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultDocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultFieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultStoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultTermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
|
@ -29,6 +25,10 @@ import org.apache.lucene.index.codecs.PostingsFormat;
|
|||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
|
||||
/**
|
||||
* This codec extends {@link Lucene40Codec} to work on append-only outputs, such
|
||||
|
@ -43,10 +43,10 @@ public class AppendingCodec extends Codec {
|
|||
|
||||
private final PostingsFormat postings = new AppendingPostingsFormat();
|
||||
private final SegmentInfosFormat infos = new AppendingSegmentInfosFormat();
|
||||
private final StoredFieldsFormat fields = new DefaultStoredFieldsFormat();
|
||||
private final FieldInfosFormat fieldInfos = new DefaultFieldInfosFormat();
|
||||
private final TermVectorsFormat vectors = new DefaultTermVectorsFormat();
|
||||
private final DocValuesFormat docValues = new DefaultDocValuesFormat();
|
||||
private final StoredFieldsFormat fields = new Lucene40StoredFieldsFormat();
|
||||
private final FieldInfosFormat fieldInfos = new Lucene40FieldInfosFormat();
|
||||
private final TermVectorsFormat vectors = new Lucene40TermVectorsFormat();
|
||||
private final DocValuesFormat docValues = new Lucene40DocValuesFormat();
|
||||
|
||||
@Override
|
||||
public PostingsFormat postingsFormat() {
|
||||
|
|
|
@ -17,10 +17,10 @@ package org.apache.lucene.index.codecs.appending;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosWriter;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosFormat;
|
||||
|
||||
public class AppendingSegmentInfosFormat extends DefaultSegmentInfosFormat {
|
||||
public class AppendingSegmentInfosFormat extends Lucene40SegmentInfosFormat {
|
||||
private final SegmentInfosWriter writer = new AppendingSegmentInfosWriter();
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,10 +19,10 @@ package org.apache.lucene.index.codecs.appending;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosWriter;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
||||
public class AppendingSegmentInfosWriter extends DefaultSegmentInfosWriter {
|
||||
public class AppendingSegmentInfosWriter extends Lucene40SegmentInfosWriter {
|
||||
|
||||
@Override
|
||||
public void prepareCommit(IndexOutput segmentOutput) throws IOException {
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.store.IOContext;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
@ -401,20 +400,20 @@ public class CheckIndex {
|
|||
String sFormat = "";
|
||||
boolean skip = false;
|
||||
|
||||
if (format == DefaultSegmentInfosWriter.FORMAT_DIAGNOSTICS) {
|
||||
if (format == SegmentInfos.FORMAT_DIAGNOSTICS) {
|
||||
sFormat = "FORMAT_DIAGNOSTICS [Lucene 2.9]";
|
||||
} else if (format == DefaultSegmentInfosWriter.FORMAT_HAS_VECTORS) {
|
||||
} else if (format == SegmentInfos.FORMAT_HAS_VECTORS) {
|
||||
sFormat = "FORMAT_HAS_VECTORS [Lucene 3.1]";
|
||||
} else if (format == DefaultSegmentInfosWriter.FORMAT_3_1) {
|
||||
} else if (format == SegmentInfos.FORMAT_3_1) {
|
||||
sFormat = "FORMAT_3_1 [Lucene 3.1+]";
|
||||
} else if (format == DefaultSegmentInfosWriter.FORMAT_4_0) {
|
||||
} else if (format == SegmentInfos.FORMAT_4_0) {
|
||||
sFormat = "FORMAT_4_0 [Lucene 4.0]";
|
||||
} else if (format == DefaultSegmentInfosWriter.FORMAT_CURRENT) {
|
||||
} else if (format == SegmentInfos.FORMAT_CURRENT) {
|
||||
throw new RuntimeException("BUG: You should update this tool!");
|
||||
} else if (format < DefaultSegmentInfosWriter.FORMAT_CURRENT) {
|
||||
} else if (format < SegmentInfos.FORMAT_CURRENT) {
|
||||
sFormat = "int=" + format + " [newer version of Lucene than this tool supports]";
|
||||
skip = true;
|
||||
} else if (format > DefaultSegmentInfosWriter.FORMAT_MINIMUM) {
|
||||
} else if (format > SegmentInfos.FORMAT_MINIMUM) {
|
||||
sFormat = "int=" + format + " [older version of Lucene than this tool supports]";
|
||||
skip = true;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import java.util.Set;
|
|||
|
||||
import org.apache.lucene.index.FieldInfos.FieldNumberBiMap;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosReader;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosWriter;
|
||||
import org.apache.lucene.store.ChecksumIndexInput;
|
||||
|
@ -61,6 +60,36 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfo> {
|
|||
* be removed, however the numbers should continue to decrease.
|
||||
*/
|
||||
|
||||
// TODO: i don't think we need *all* these version numbers here?
|
||||
// most codecs only need FORMAT_CURRENT? and we should rename it
|
||||
// to FORMAT_FLEX? because the 'preamble' is just FORMAT_CURRENT + codecname
|
||||
// after that the codec takes over.
|
||||
|
||||
// also i think this class should write this, somehow we let
|
||||
// preflexrw hackishly override this (like seek backwards and overwrite it)
|
||||
|
||||
/** This format adds optional per-segment String
|
||||
* diagnostics storage, and switches userData to Map */
|
||||
public static final int FORMAT_DIAGNOSTICS = -9;
|
||||
|
||||
/** Each segment records whether it has term vectors */
|
||||
public static final int FORMAT_HAS_VECTORS = -10;
|
||||
|
||||
/** Each segment records the Lucene version that created it. */
|
||||
public static final int FORMAT_3_1 = -11;
|
||||
|
||||
/** Each segment records whether its postings are written
|
||||
* in the new flex format */
|
||||
public static final int FORMAT_4_0 = -12;
|
||||
|
||||
/** This must always point to the most recent file format.
|
||||
* whenever you add a new format, make it 1 smaller (negative version logic)! */
|
||||
// TODO: move this, as its currently part of required preamble
|
||||
public static final int FORMAT_CURRENT = FORMAT_4_0;
|
||||
|
||||
/** This must always point to the first supported file format. */
|
||||
public static final int FORMAT_MINIMUM = FORMAT_DIAGNOSTICS;
|
||||
|
||||
/** Used for the segments.gen file only!
|
||||
* Whenever you add a new format, make it 1 smaller (negative version logic)! */
|
||||
public static final int FORMAT_SEGMENTS_GEN_CURRENT = -2;
|
||||
|
@ -240,14 +269,14 @@ public final class SegmentInfos implements Cloneable, Iterable<SegmentInfo> {
|
|||
setFormat(format);
|
||||
|
||||
// check that it is a format we can understand
|
||||
if (format > DefaultSegmentInfosWriter.FORMAT_MINIMUM)
|
||||
if (format > FORMAT_MINIMUM)
|
||||
throw new IndexFormatTooOldException(input, format,
|
||||
DefaultSegmentInfosWriter.FORMAT_MINIMUM, DefaultSegmentInfosWriter.FORMAT_CURRENT);
|
||||
if (format < DefaultSegmentInfosWriter.FORMAT_CURRENT)
|
||||
FORMAT_MINIMUM, FORMAT_CURRENT);
|
||||
if (format < FORMAT_CURRENT)
|
||||
throw new IndexFormatTooNewException(input, format,
|
||||
DefaultSegmentInfosWriter.FORMAT_MINIMUM, DefaultSegmentInfosWriter.FORMAT_CURRENT);
|
||||
FORMAT_MINIMUM, FORMAT_CURRENT);
|
||||
|
||||
if (format <= DefaultSegmentInfosWriter.FORMAT_4_0) {
|
||||
if (format <= FORMAT_4_0) {
|
||||
codecFormat = Codec.forName(input.readString());
|
||||
} else {
|
||||
codecFormat = Codec.forName("Lucene3x");
|
||||
|
|
|
@ -76,7 +76,7 @@ public abstract class DocValuesReaderBase extends PerDocValues {
|
|||
final String field = fieldInfo.name;
|
||||
// TODO can we have a compound file per segment and codec for
|
||||
// docvalues?
|
||||
final String id = DefaultDocValuesConsumer.docValuesId(segment,
|
||||
final String id = DocValuesWriterBase.docValuesId(segment,
|
||||
fieldInfo.number);
|
||||
values.put(field,
|
||||
loadDocValues(docCount, dir, id, fieldInfo.getDocValues(), context));
|
||||
|
|
|
@ -24,10 +24,6 @@ import org.apache.lucene.index.PerDocWriteState;
|
|||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultFieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultStoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultTermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
|
@ -36,6 +32,10 @@ import org.apache.lucene.index.codecs.PerDocValues;
|
|||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/**
|
||||
|
@ -49,18 +49,18 @@ public class Lucene3xCodec extends Codec {
|
|||
private final PostingsFormat postingsFormat = new Lucene3xPostingsFormat();
|
||||
|
||||
// TODO: this should really be a different impl
|
||||
private final StoredFieldsFormat fieldsFormat = new DefaultStoredFieldsFormat();
|
||||
private final StoredFieldsFormat fieldsFormat = new Lucene40StoredFieldsFormat();
|
||||
|
||||
// TODO: this should really be a different impl
|
||||
private final TermVectorsFormat vectorsFormat = new DefaultTermVectorsFormat();
|
||||
private final TermVectorsFormat vectorsFormat = new Lucene40TermVectorsFormat();
|
||||
|
||||
// TODO: this should really be a different impl
|
||||
private final FieldInfosFormat fieldInfosFormat = new DefaultFieldInfosFormat();
|
||||
private final FieldInfosFormat fieldInfosFormat = new Lucene40FieldInfosFormat();
|
||||
|
||||
// TODO: this should really be a different impl
|
||||
// also if we want preflex to *really* be read-only it should throw exception for the writer?
|
||||
// this way IR.commit fails on delete/undelete/setNorm/etc ?
|
||||
private final SegmentInfosFormat infosFormat = new DefaultSegmentInfosFormat();
|
||||
private final SegmentInfosFormat infosFormat = new Lucene40SegmentInfosFormat();
|
||||
|
||||
// 3.x doesn't support docvalues
|
||||
private final DocValuesFormat docValuesFormat = new DocValuesFormat() {
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.lucene.index.FieldInfo;
|
|||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.codecs.lucene40.DefaultSkipListReader;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SkipListReader;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
|
@ -43,7 +43,7 @@ public class SegmentTermDocs {
|
|||
|
||||
private int skipInterval;
|
||||
private int maxSkipLevels;
|
||||
private DefaultSkipListReader skipListReader;
|
||||
private Lucene40SkipListReader skipListReader;
|
||||
|
||||
private long freqBasePointer;
|
||||
private long proxBasePointer;
|
||||
|
@ -201,7 +201,7 @@ public class SegmentTermDocs {
|
|||
// don't skip if the target is close (within skipInterval docs away)
|
||||
if ((target - skipInterval) >= doc && df >= skipInterval) { // optimized case
|
||||
if (skipListReader == null)
|
||||
skipListReader = new DefaultSkipListReader((IndexInput) freqStream.clone(), maxSkipLevels, skipInterval); // lazily clone
|
||||
skipListReader = new Lucene40SkipListReader((IndexInput) freqStream.clone(), maxSkipLevels, skipInterval); // lazily clone
|
||||
|
||||
if (!haveSkipped) { // lazily initialize skip stream
|
||||
skipListReader.init(skipPointer, freqBasePointer, proxBasePointer, df, currentFieldStoresPayloads);
|
||||
|
|
|
@ -18,11 +18,6 @@ package org.apache.lucene.index.codecs.lucene40;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultDocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultFieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultStoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultTermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
|
@ -42,11 +37,11 @@ import org.apache.lucene.index.codecs.perfield.PerFieldPostingsFormat;
|
|||
// if they are backwards compatible or smallish we can probably do the backwards in the postingsreader
|
||||
// (it writes a minor version, etc).
|
||||
public class Lucene40Codec extends Codec {
|
||||
private final StoredFieldsFormat fieldsFormat = new DefaultStoredFieldsFormat();
|
||||
private final TermVectorsFormat vectorsFormat = new DefaultTermVectorsFormat();
|
||||
private final DocValuesFormat docValuesFormat = new DefaultDocValuesFormat();
|
||||
private final FieldInfosFormat fieldInfosFormat = new DefaultFieldInfosFormat();
|
||||
private final SegmentInfosFormat infosFormat = new DefaultSegmentInfosFormat();
|
||||
private final StoredFieldsFormat fieldsFormat = new Lucene40StoredFieldsFormat();
|
||||
private final TermVectorsFormat vectorsFormat = new Lucene40TermVectorsFormat();
|
||||
private final FieldInfosFormat fieldInfosFormat = new Lucene40FieldInfosFormat();
|
||||
private final DocValuesFormat docValuesFormat = new Lucene40DocValuesFormat();
|
||||
private final SegmentInfosFormat infosFormat = new Lucene40SegmentInfosFormat();
|
||||
private final PostingsFormat postingsFormat = new PerFieldPostingsFormat() {
|
||||
@Override
|
||||
public PostingsFormat getPostingsFormatForField(String field) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.index.FieldInfos;
|
|||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.PerDocWriteState;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.DocValuesWriterBase;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
|
@ -32,13 +33,13 @@ import org.apache.lucene.store.Directory;
|
|||
* Default PerDocConsumer implementation that uses compound file.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultDocValuesConsumer extends DocValuesWriterBase {
|
||||
public class Lucene40DocValuesConsumer extends DocValuesWriterBase {
|
||||
private final Directory mainDirectory;
|
||||
private Directory directory;
|
||||
|
||||
final static String DOC_VALUES_SEGMENT_SUFFIX = "dv";
|
||||
|
||||
public DefaultDocValuesConsumer(PerDocWriteState state) throws IOException {
|
||||
public Lucene40DocValuesConsumer(PerDocWriteState state) throws IOException {
|
||||
super(state);
|
||||
mainDirectory = state.directory;
|
||||
//TODO maybe we should enable a global CFS that all codecs can pull on demand to further reduce the number of files?
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -23,22 +23,25 @@ import java.util.Set;
|
|||
import org.apache.lucene.index.PerDocWriteState;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.PerDocConsumer;
|
||||
import org.apache.lucene.index.codecs.PerDocValues;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
public class DefaultDocValuesFormat extends DocValuesFormat {
|
||||
public class Lucene40DocValuesFormat extends DocValuesFormat {
|
||||
|
||||
@Override
|
||||
public PerDocConsumer docsConsumer(PerDocWriteState state) throws IOException {
|
||||
return new DefaultDocValuesConsumer(state);
|
||||
return new Lucene40DocValuesConsumer(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PerDocValues docsProducer(SegmentReadState state) throws IOException {
|
||||
return new DefaultDocValuesProducer(state);
|
||||
return new Lucene40DocValuesProducer(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
DefaultDocValuesConsumer.files(dir, info, files);
|
||||
Lucene40DocValuesConsumer.files(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -26,6 +26,7 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.codecs.DocValuesReaderBase;
|
||||
import org.apache.lucene.index.values.IndexDocValues;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -35,19 +36,19 @@ import org.apache.lucene.util.IOUtils;
|
|||
* Default PerDocValues implementation that uses compound file.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultDocValuesProducer extends DocValuesReaderBase {
|
||||
public class Lucene40DocValuesProducer extends DocValuesReaderBase {
|
||||
protected final TreeMap<String,IndexDocValues> docValues;
|
||||
private final Directory cfs;
|
||||
|
||||
/**
|
||||
* Creates a new {@link DefaultDocValuesProducer} instance and loads all
|
||||
* Creates a new {@link Lucene40DocValuesProducer} instance and loads all
|
||||
* {@link IndexDocValues} instances for this segment and codec.
|
||||
*/
|
||||
public DefaultDocValuesProducer(SegmentReadState state) throws IOException {
|
||||
public Lucene40DocValuesProducer(SegmentReadState state) throws IOException {
|
||||
if (state.fieldInfos.anyDocValuesFields()) {
|
||||
cfs = new CompoundFileDirectory(state.dir,
|
||||
IndexFileNames.segmentFileName(state.segmentInfo.name,
|
||||
DefaultDocValuesConsumer.DOC_VALUES_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_EXTENSION),
|
||||
Lucene40DocValuesConsumer.DOC_VALUES_SEGMENT_SUFFIX, IndexFileNames.COMPOUND_FILE_EXTENSION),
|
||||
state.context, false);
|
||||
docValues = load(state.fieldInfos, state.segmentInfo.name, state.segmentInfo.docCount, cfs, state.context);
|
||||
} else {
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -21,14 +21,17 @@ import java.io.IOException;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosReader;
|
||||
import org.apache.lucene.index.codecs.FieldInfosWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultFieldInfosFormat extends FieldInfosFormat {
|
||||
private final FieldInfosReader reader = new DefaultFieldInfosReader();
|
||||
private final FieldInfosWriter writer = new DefaultFieldInfosWriter();
|
||||
public class Lucene40FieldInfosFormat extends FieldInfosFormat {
|
||||
private final FieldInfosReader reader = new Lucene40FieldInfosReader();
|
||||
private final FieldInfosWriter writer = new Lucene40FieldInfosWriter();
|
||||
|
||||
@Override
|
||||
public FieldInfosReader getFieldInfosReader() throws IOException {
|
||||
|
@ -42,6 +45,6 @@ public class DefaultFieldInfosFormat extends FieldInfosFormat {
|
|||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
DefaultFieldInfosReader.files(dir, info, files);
|
||||
Lucene40FieldInfosReader.files(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
@ -11,6 +11,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.codecs.FieldInfosReader;
|
||||
import org.apache.lucene.index.values.ValueType;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -36,13 +37,13 @@ import org.apache.lucene.store.IndexInput;
|
|||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultFieldInfosReader extends FieldInfosReader {
|
||||
public class Lucene40FieldInfosReader extends FieldInfosReader {
|
||||
|
||||
static final int FORMAT_MINIMUM = DefaultFieldInfosWriter.FORMAT_START;
|
||||
static final int FORMAT_MINIMUM = Lucene40FieldInfosWriter.FORMAT_START;
|
||||
|
||||
@Override
|
||||
public FieldInfos read(Directory directory, String segmentName, IOContext iocontext) throws IOException {
|
||||
final String fileName = IndexFileNames.segmentFileName(segmentName, "", DefaultFieldInfosWriter.FIELD_INFOS_EXTENSION);
|
||||
final String fileName = IndexFileNames.segmentFileName(segmentName, "", Lucene40FieldInfosWriter.FIELD_INFOS_EXTENSION);
|
||||
IndexInput input = directory.openInput(fileName, iocontext);
|
||||
|
||||
boolean hasVectors = false;
|
||||
|
@ -53,10 +54,10 @@ public class DefaultFieldInfosReader extends FieldInfosReader {
|
|||
final int format = input.readVInt();
|
||||
|
||||
if (format > FORMAT_MINIMUM) {
|
||||
throw new IndexFormatTooOldException(input, format, FORMAT_MINIMUM, DefaultFieldInfosWriter.FORMAT_CURRENT);
|
||||
throw new IndexFormatTooOldException(input, format, FORMAT_MINIMUM, Lucene40FieldInfosWriter.FORMAT_CURRENT);
|
||||
}
|
||||
if (format < DefaultFieldInfosWriter.FORMAT_CURRENT) {
|
||||
throw new IndexFormatTooNewException(input, format, FORMAT_MINIMUM, DefaultFieldInfosWriter.FORMAT_CURRENT);
|
||||
if (format < Lucene40FieldInfosWriter.FORMAT_CURRENT) {
|
||||
throw new IndexFormatTooNewException(input, format, FORMAT_MINIMUM, Lucene40FieldInfosWriter.FORMAT_CURRENT);
|
||||
}
|
||||
|
||||
final int size = input.readVInt(); //read in the size
|
||||
|
@ -64,19 +65,19 @@ public class DefaultFieldInfosReader extends FieldInfosReader {
|
|||
|
||||
for (int i = 0; i < size; i++) {
|
||||
String name = input.readString();
|
||||
final int fieldNumber = format <= DefaultFieldInfosWriter.FORMAT_FLEX? input.readInt():i;
|
||||
final int fieldNumber = format <= Lucene40FieldInfosWriter.FORMAT_FLEX? input.readInt():i;
|
||||
byte bits = input.readByte();
|
||||
boolean isIndexed = (bits & DefaultFieldInfosWriter.IS_INDEXED) != 0;
|
||||
boolean storeTermVector = (bits & DefaultFieldInfosWriter.STORE_TERMVECTOR) != 0;
|
||||
boolean storePositionsWithTermVector = (bits & DefaultFieldInfosWriter.STORE_POSITIONS_WITH_TERMVECTOR) != 0;
|
||||
boolean storeOffsetWithTermVector = (bits & DefaultFieldInfosWriter.STORE_OFFSET_WITH_TERMVECTOR) != 0;
|
||||
boolean omitNorms = (bits & DefaultFieldInfosWriter.OMIT_NORMS) != 0;
|
||||
boolean storePayloads = (bits & DefaultFieldInfosWriter.STORE_PAYLOADS) != 0;
|
||||
boolean isIndexed = (bits & Lucene40FieldInfosWriter.IS_INDEXED) != 0;
|
||||
boolean storeTermVector = (bits & Lucene40FieldInfosWriter.STORE_TERMVECTOR) != 0;
|
||||
boolean storePositionsWithTermVector = (bits & Lucene40FieldInfosWriter.STORE_POSITIONS_WITH_TERMVECTOR) != 0;
|
||||
boolean storeOffsetWithTermVector = (bits & Lucene40FieldInfosWriter.STORE_OFFSET_WITH_TERMVECTOR) != 0;
|
||||
boolean omitNorms = (bits & Lucene40FieldInfosWriter.OMIT_NORMS) != 0;
|
||||
boolean storePayloads = (bits & Lucene40FieldInfosWriter.STORE_PAYLOADS) != 0;
|
||||
final IndexOptions indexOptions;
|
||||
if ((bits & DefaultFieldInfosWriter.OMIT_TERM_FREQ_AND_POSITIONS) != 0) {
|
||||
if ((bits & Lucene40FieldInfosWriter.OMIT_TERM_FREQ_AND_POSITIONS) != 0) {
|
||||
indexOptions = IndexOptions.DOCS_ONLY;
|
||||
} else if ((bits & DefaultFieldInfosWriter.OMIT_POSITIONS) != 0) {
|
||||
if (format <= DefaultFieldInfosWriter.FORMAT_OMIT_POSITIONS) {
|
||||
} else if ((bits & Lucene40FieldInfosWriter.OMIT_POSITIONS) != 0) {
|
||||
if (format <= Lucene40FieldInfosWriter.FORMAT_OMIT_POSITIONS) {
|
||||
indexOptions = IndexOptions.DOCS_AND_FREQS;
|
||||
} else {
|
||||
throw new CorruptIndexException("Corrupt fieldinfos, OMIT_POSITIONS set but format=" + format + " (resource: " + input + ")");
|
||||
|
@ -95,7 +96,7 @@ public class DefaultFieldInfosReader extends FieldInfosReader {
|
|||
hasProx |= isIndexed && indexOptions == IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
|
||||
hasFreq |= isIndexed && indexOptions != IndexOptions.DOCS_ONLY;
|
||||
ValueType docValuesType = null;
|
||||
if (format <= DefaultFieldInfosWriter.FORMAT_FLEX) {
|
||||
if (format <= Lucene40FieldInfosWriter.FORMAT_FLEX) {
|
||||
final byte b = input.readByte();
|
||||
switch(b) {
|
||||
case 0:
|
||||
|
@ -161,6 +162,6 @@ public class DefaultFieldInfosReader extends FieldInfosReader {
|
|||
}
|
||||
|
||||
public static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", DefaultFieldInfosWriter.FIELD_INFOS_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", Lucene40FieldInfosWriter.FIELD_INFOS_EXTENSION));
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -22,6 +22,7 @@ import org.apache.lucene.index.FieldInfo;
|
|||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.codecs.FieldInfosWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
@ -29,7 +30,7 @@ import org.apache.lucene.store.IndexOutput;
|
|||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultFieldInfosWriter extends FieldInfosWriter {
|
||||
public class Lucene40FieldInfosWriter extends FieldInfosWriter {
|
||||
|
||||
/** Extension of field infos */
|
||||
static final String FIELD_INFOS_EXTENSION = "fnm";
|
|
@ -292,7 +292,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
int skipOffset;
|
||||
|
||||
boolean skipped;
|
||||
DefaultSkipListReader skipper;
|
||||
Lucene40SkipListReader skipper;
|
||||
|
||||
public SegmentDocsEnum(IndexInput freqIn) throws IOException {
|
||||
startFreqIn = freqIn;
|
||||
|
@ -450,7 +450,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
|
||||
if (skipper == null) {
|
||||
// This is the first time this enum has ever been used for skipping -- do lazy init
|
||||
skipper = new DefaultSkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
skipper = new Lucene40SkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
}
|
||||
|
||||
if (!skipped) {
|
||||
|
@ -502,7 +502,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
int posPendingCount;
|
||||
|
||||
boolean skipped;
|
||||
DefaultSkipListReader skipper;
|
||||
Lucene40SkipListReader skipper;
|
||||
private long lazyProxPointer;
|
||||
|
||||
public SegmentDocsAndPositionsEnum(IndexInput freqIn, IndexInput proxIn) throws IOException {
|
||||
|
@ -597,7 +597,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
|
||||
if (skipper == null) {
|
||||
// This is the first time this enum has ever been used for skipping -- do lazy init
|
||||
skipper = new DefaultSkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
skipper = new Lucene40SkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
}
|
||||
|
||||
if (!skipped) {
|
||||
|
@ -698,7 +698,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
boolean payloadPending;
|
||||
|
||||
boolean skipped;
|
||||
DefaultSkipListReader skipper;
|
||||
Lucene40SkipListReader skipper;
|
||||
private BytesRef payload;
|
||||
private long lazyProxPointer;
|
||||
|
||||
|
@ -796,7 +796,7 @@ public class Lucene40PostingsReader extends PostingsReaderBase {
|
|||
|
||||
if (skipper == null) {
|
||||
// This is the first time this enum has ever been used for skipping -- do lazy init
|
||||
skipper = new DefaultSkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
skipper = new Lucene40SkipListReader((IndexInput) freqIn.clone(), maxSkipLevels, skipInterval);
|
||||
}
|
||||
|
||||
if (!skipped) {
|
||||
|
|
|
@ -50,7 +50,7 @@ public final class Lucene40PostingsWriter extends PostingsWriterBase {
|
|||
|
||||
final IndexOutput freqOut;
|
||||
final IndexOutput proxOut;
|
||||
final DefaultSkipListWriter skipListWriter;
|
||||
final Lucene40SkipListWriter skipListWriter;
|
||||
/** Expert: The fraction of TermDocs entries stored in skip tables,
|
||||
* used to accelerate {@link DocsEnum#advance(int)}. Larger values result in
|
||||
* smaller indexes, greater acceleration, but fewer accelerable cases, while
|
||||
|
@ -113,7 +113,7 @@ public final class Lucene40PostingsWriter extends PostingsWriterBase {
|
|||
|
||||
totalNumDocs = state.numDocs;
|
||||
|
||||
skipListWriter = new DefaultSkipListWriter(skipInterval,
|
||||
skipListWriter = new Lucene40SkipListWriter(skipInterval,
|
||||
maxSkipLevels,
|
||||
state.numDocs,
|
||||
freqOut,
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosReader;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosWriter;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -20,9 +24,9 @@ package org.apache.lucene.index.codecs;
|
|||
/**
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultSegmentInfosFormat extends SegmentInfosFormat {
|
||||
private final SegmentInfosReader reader = new DefaultSegmentInfosReader();
|
||||
private final SegmentInfosWriter writer = new DefaultSegmentInfosWriter();
|
||||
public class Lucene40SegmentInfosFormat extends SegmentInfosFormat {
|
||||
private final SegmentInfosReader reader = new Lucene40SegmentInfosReader();
|
||||
private final SegmentInfosWriter writer = new Lucene40SegmentInfosWriter();
|
||||
|
||||
@Override
|
||||
public SegmentInfosReader getSegmentInfosReader() {
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -25,6 +25,9 @@ import org.apache.lucene.index.IndexFileNames;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosReader;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsReader;
|
||||
import org.apache.lucene.store.ChecksumIndexInput;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -34,7 +37,7 @@ import org.apache.lucene.store.IOContext;
|
|||
* Default implementation of {@link SegmentInfosReader}.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
||||
public class Lucene40SegmentInfosReader extends SegmentInfosReader {
|
||||
|
||||
// TODO: shove all backwards code to preflex!
|
||||
// this is a little tricky, because of IR.commit(), two options:
|
||||
|
@ -68,7 +71,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
}
|
||||
|
||||
try {
|
||||
DefaultStoredFieldsReader.checkCodeVersion(dir, si.getDocStoreSegment());
|
||||
Lucene40StoredFieldsReader.checkCodeVersion(dir, si.getDocStoreSegment());
|
||||
} finally {
|
||||
// If we opened the directory, close it
|
||||
if (dir != directory) dir.close();
|
||||
|
@ -93,7 +96,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
// if we make a preflex impl we can remove a lot of this hair...
|
||||
public SegmentInfo readSegmentInfo(Directory dir, int format, ChecksumIndexInput input) throws IOException {
|
||||
final String version;
|
||||
if (format <= DefaultSegmentInfosWriter.FORMAT_3_1) {
|
||||
if (format <= SegmentInfos.FORMAT_3_1) {
|
||||
version = input.readString();
|
||||
} else {
|
||||
version = null;
|
||||
|
@ -112,7 +115,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
docStoreIsCompoundFile = false;
|
||||
}
|
||||
|
||||
if (format > DefaultSegmentInfosWriter.FORMAT_4_0) {
|
||||
if (format > SegmentInfos.FORMAT_4_0) {
|
||||
// pre-4.0 indexes write a byte if there is a single norms file
|
||||
byte b = input.readByte();
|
||||
assert 1 == b;
|
||||
|
@ -126,7 +129,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
normGen = new HashMap<Integer, Long>();
|
||||
for(int j=0;j<numNormGen;j++) {
|
||||
int fieldNumber = j;
|
||||
if (format <= DefaultSegmentInfosWriter.FORMAT_4_0) {
|
||||
if (format <= SegmentInfos.FORMAT_4_0) {
|
||||
fieldNumber = input.readInt();
|
||||
}
|
||||
|
||||
|
@ -142,7 +145,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
|
||||
final Codec codec;
|
||||
// note: if the codec is not available: Codec.forName will throw an exception.
|
||||
if (format <= DefaultSegmentInfosWriter.FORMAT_4_0) {
|
||||
if (format <= SegmentInfos.FORMAT_4_0) {
|
||||
codec = Codec.forName(input.readString());
|
||||
} else {
|
||||
codec = Codec.forName("Lucene3x");
|
||||
|
@ -150,7 +153,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
final Map<String,String> diagnostics = input.readStringStringMap();
|
||||
|
||||
final int hasVectors;
|
||||
if (format <= DefaultSegmentInfosWriter.FORMAT_HAS_VECTORS) {
|
||||
if (format <= SegmentInfos.FORMAT_HAS_VECTORS) {
|
||||
hasVectors = input.readByte();
|
||||
} else {
|
||||
final String storesSegment;
|
||||
|
@ -173,7 +176,7 @@ public class DefaultSegmentInfosReader extends SegmentInfosReader {
|
|||
}
|
||||
try {
|
||||
// TODO: remove this manual file check or push to preflex codec
|
||||
hasVectors = dirToTest.fileExists(IndexFileNames.segmentFileName(storesSegment, "", DefaultTermVectorsReader.VECTORS_INDEX_EXTENSION)) ? SegmentInfo.YES : SegmentInfo.NO;
|
||||
hasVectors = dirToTest.fileExists(IndexFileNames.segmentFileName(storesSegment, "", Lucene40TermVectorsReader.VECTORS_INDEX_EXTENSION)) ? SegmentInfo.YES : SegmentInfo.NO;
|
||||
} finally {
|
||||
if (isCompoundFile) {
|
||||
dirToTest.close();
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -23,6 +23,7 @@ import java.util.Map.Entry;
|
|||
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosWriter;
|
||||
import org.apache.lucene.store.ChecksumIndexOutput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FlushInfo;
|
||||
|
@ -34,29 +35,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
* Default implementation of {@link SegmentInfosWriter}.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultSegmentInfosWriter extends SegmentInfosWriter {
|
||||
|
||||
/** This format adds optional per-segment String
|
||||
* diagnostics storage, and switches userData to Map */
|
||||
public static final int FORMAT_DIAGNOSTICS = -9;
|
||||
|
||||
/** Each segment records whether it has term vectors */
|
||||
public static final int FORMAT_HAS_VECTORS = -10;
|
||||
|
||||
/** Each segment records the Lucene version that created it. */
|
||||
public static final int FORMAT_3_1 = -11;
|
||||
|
||||
/** Each segment records whether its postings are written
|
||||
* in the new flex format */
|
||||
public static final int FORMAT_4_0 = -12;
|
||||
|
||||
/** This must always point to the most recent file format.
|
||||
* whenever you add a new format, make it 1 smaller (negative version logic)! */
|
||||
// TODO: move this, as its currently part of required preamble
|
||||
public static final int FORMAT_CURRENT = FORMAT_4_0;
|
||||
|
||||
/** This must always point to the first supported file format. */
|
||||
public static final int FORMAT_MINIMUM = FORMAT_DIAGNOSTICS;
|
||||
public class Lucene40SegmentInfosWriter extends SegmentInfosWriter {
|
||||
|
||||
@Override
|
||||
public IndexOutput writeInfos(Directory dir, String segmentFileName, String codecID, SegmentInfos infos, IOContext context)
|
||||
|
@ -64,7 +43,7 @@ public class DefaultSegmentInfosWriter extends SegmentInfosWriter {
|
|||
IndexOutput out = createOutput(dir, segmentFileName, new IOContext(new FlushInfo(infos.size(), infos.totalDocCount())));
|
||||
boolean success = false;
|
||||
try {
|
||||
out.writeInt(FORMAT_CURRENT); // write FORMAT
|
||||
out.writeInt(SegmentInfos.FORMAT_CURRENT); // write FORMAT
|
||||
out.writeString(codecID); // write codecID
|
||||
out.writeLong(infos.version);
|
||||
out.writeInt(infos.counter); // write counter
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.store.IndexInput;
|
|||
* that stores positions and payloads.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultSkipListReader extends MultiLevelSkipListReader {
|
||||
public class Lucene40SkipListReader extends MultiLevelSkipListReader {
|
||||
private boolean currentFieldStoresPayloads;
|
||||
private long freqPointer[];
|
||||
private long proxPointer[];
|
||||
|
@ -39,7 +39,7 @@ public class DefaultSkipListReader extends MultiLevelSkipListReader {
|
|||
private int lastPayloadLength;
|
||||
|
||||
|
||||
public DefaultSkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) {
|
||||
public Lucene40SkipListReader(IndexInput skipStream, int maxSkipLevels, int skipInterval) {
|
||||
super(skipStream, maxSkipLevels, skipInterval);
|
||||
freqPointer = new long[maxSkipLevels];
|
||||
proxPointer = new long[maxSkipLevels];
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.index.codecs.MultiLevelSkipListWriter;
|
|||
* that stores positions and payloads.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public class DefaultSkipListWriter extends MultiLevelSkipListWriter {
|
||||
public class Lucene40SkipListWriter extends MultiLevelSkipListWriter {
|
||||
private int[] lastSkipDoc;
|
||||
private int[] lastSkipPayloadLength;
|
||||
private long[] lastSkipFreqPointer;
|
||||
|
@ -44,7 +44,7 @@ public class DefaultSkipListWriter extends MultiLevelSkipListWriter {
|
|||
private long curFreqPointer;
|
||||
private long curProxPointer;
|
||||
|
||||
public DefaultSkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput) {
|
||||
public Lucene40SkipListWriter(int skipInterval, int numberOfSkipLevels, int docCount, IndexOutput freqOutput, IndexOutput proxOutput) {
|
||||
super(skipInterval, numberOfSkipLevels, docCount);
|
||||
this.freqOutput = freqOutput;
|
||||
this.proxOutput = proxOutput;
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -22,26 +22,29 @@ import java.util.Set;
|
|||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
/** @lucene.experimental */
|
||||
public class DefaultStoredFieldsFormat extends StoredFieldsFormat {
|
||||
public class Lucene40StoredFieldsFormat extends StoredFieldsFormat {
|
||||
|
||||
@Override
|
||||
public StoredFieldsReader fieldsReader(Directory directory, SegmentInfo si,
|
||||
FieldInfos fn, IOContext context) throws IOException {
|
||||
return new DefaultStoredFieldsReader(directory, si, fn, context);
|
||||
return new Lucene40StoredFieldsReader(directory, si, fn, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsWriter fieldsWriter(Directory directory, String segment,
|
||||
IOContext context) throws IOException {
|
||||
return new DefaultStoredFieldsWriter(directory, segment, context);
|
||||
return new Lucene40StoredFieldsWriter(directory, segment, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
DefaultStoredFieldsReader.files(dir, info, files);
|
||||
Lucene40StoredFieldsReader.files(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -28,6 +28,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -45,7 +46,7 @@ import java.util.Set;
|
|||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public final class DefaultStoredFieldsReader extends StoredFieldsReader implements Cloneable, Closeable {
|
||||
public final class Lucene40StoredFieldsReader extends StoredFieldsReader implements Cloneable, Closeable {
|
||||
private final static int FORMAT_SIZE = 4;
|
||||
|
||||
private final FieldInfos fieldInfos;
|
||||
|
@ -66,22 +67,22 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
* clones are called (eg, currently SegmentReader manages
|
||||
* this logic). */
|
||||
@Override
|
||||
public DefaultStoredFieldsReader clone() {
|
||||
public Lucene40StoredFieldsReader clone() {
|
||||
ensureOpen();
|
||||
return new DefaultStoredFieldsReader(fieldInfos, numTotalDocs, size, format, docStoreOffset, (IndexInput)fieldsStream.clone(), (IndexInput)indexStream.clone());
|
||||
return new Lucene40StoredFieldsReader(fieldInfos, numTotalDocs, size, format, docStoreOffset, (IndexInput)fieldsStream.clone(), (IndexInput)indexStream.clone());
|
||||
}
|
||||
|
||||
/** Verifies that the code version which wrote the segment is supported. */
|
||||
public static void checkCodeVersion(Directory dir, String segment) throws IOException {
|
||||
final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", DefaultStoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
IndexInput idxStream = dir.openInput(indexStreamFN, IOContext.DEFAULT);
|
||||
|
||||
try {
|
||||
int format = idxStream.readInt();
|
||||
if (format < DefaultStoredFieldsWriter.FORMAT_MINIMUM)
|
||||
throw new IndexFormatTooOldException(idxStream, format, DefaultStoredFieldsWriter.FORMAT_MINIMUM, DefaultStoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format > DefaultStoredFieldsWriter.FORMAT_CURRENT)
|
||||
throw new IndexFormatTooNewException(idxStream, format, DefaultStoredFieldsWriter.FORMAT_MINIMUM, DefaultStoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format < Lucene40StoredFieldsWriter.FORMAT_MINIMUM)
|
||||
throw new IndexFormatTooOldException(idxStream, format, Lucene40StoredFieldsWriter.FORMAT_MINIMUM, Lucene40StoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format > Lucene40StoredFieldsWriter.FORMAT_CURRENT)
|
||||
throw new IndexFormatTooNewException(idxStream, format, Lucene40StoredFieldsWriter.FORMAT_MINIMUM, Lucene40StoredFieldsWriter.FORMAT_CURRENT);
|
||||
} finally {
|
||||
idxStream.close();
|
||||
}
|
||||
|
@ -89,7 +90,7 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
}
|
||||
|
||||
// Used only by clone
|
||||
private DefaultStoredFieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int docStoreOffset,
|
||||
private Lucene40StoredFieldsReader(FieldInfos fieldInfos, int numTotalDocs, int size, int format, int docStoreOffset,
|
||||
IndexInput fieldsStream, IndexInput indexStream) {
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.numTotalDocs = numTotalDocs;
|
||||
|
@ -100,23 +101,23 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
this.indexStream = indexStream;
|
||||
}
|
||||
|
||||
public DefaultStoredFieldsReader(Directory d, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException {
|
||||
public Lucene40StoredFieldsReader(Directory d, SegmentInfo si, FieldInfos fn, IOContext context) throws IOException {
|
||||
final String segment = si.getDocStoreSegment();
|
||||
final int docStoreOffset = si.getDocStoreOffset();
|
||||
final int size = si.docCount;
|
||||
boolean success = false;
|
||||
fieldInfos = fn;
|
||||
try {
|
||||
fieldsStream = d.openInput(IndexFileNames.segmentFileName(segment, "", DefaultStoredFieldsWriter.FIELDS_EXTENSION), context);
|
||||
final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", DefaultStoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
fieldsStream = d.openInput(IndexFileNames.segmentFileName(segment, "", Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context);
|
||||
final String indexStreamFN = IndexFileNames.segmentFileName(segment, "", Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
indexStream = d.openInput(indexStreamFN, context);
|
||||
|
||||
format = indexStream.readInt();
|
||||
|
||||
if (format < DefaultStoredFieldsWriter.FORMAT_MINIMUM)
|
||||
throw new IndexFormatTooOldException(indexStream, format, DefaultStoredFieldsWriter.FORMAT_MINIMUM, DefaultStoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format > DefaultStoredFieldsWriter.FORMAT_CURRENT)
|
||||
throw new IndexFormatTooNewException(indexStream, format, DefaultStoredFieldsWriter.FORMAT_MINIMUM, DefaultStoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format < Lucene40StoredFieldsWriter.FORMAT_MINIMUM)
|
||||
throw new IndexFormatTooOldException(indexStream, format, Lucene40StoredFieldsWriter.FORMAT_MINIMUM, Lucene40StoredFieldsWriter.FORMAT_CURRENT);
|
||||
if (format > Lucene40StoredFieldsWriter.FORMAT_CURRENT)
|
||||
throw new IndexFormatTooNewException(indexStream, format, Lucene40StoredFieldsWriter.FORMAT_MINIMUM, Lucene40StoredFieldsWriter.FORMAT_CURRENT);
|
||||
|
||||
final long indexSize = indexStream.length() - FORMAT_SIZE;
|
||||
|
||||
|
@ -190,7 +191,7 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);
|
||||
|
||||
int bits = fieldsStream.readByte() & 0xFF;
|
||||
assert bits <= (DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_MASK | DefaultStoredFieldsWriter.FIELD_IS_BINARY): "bits=" + Integer.toHexString(bits);
|
||||
assert bits <= (Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_MASK | Lucene40StoredFieldsWriter.FIELD_IS_BINARY): "bits=" + Integer.toHexString(bits);
|
||||
|
||||
switch(visitor.needsField(fieldInfo)) {
|
||||
case YES:
|
||||
|
@ -208,19 +209,19 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
static final Charset UTF8 = Charset.forName("UTF-8");
|
||||
|
||||
private void readField(StoredFieldVisitor visitor, FieldInfo info, int bits) throws IOException {
|
||||
final int numeric = bits & DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_MASK;
|
||||
final int numeric = bits & Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_MASK;
|
||||
if (numeric != 0) {
|
||||
switch(numeric) {
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_INT:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_INT:
|
||||
visitor.intField(info, fieldsStream.readInt());
|
||||
return;
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_LONG:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_LONG:
|
||||
visitor.longField(info, fieldsStream.readLong());
|
||||
return;
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_FLOAT:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_FLOAT:
|
||||
visitor.floatField(info, Float.intBitsToFloat(fieldsStream.readInt()));
|
||||
return;
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_DOUBLE:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_DOUBLE:
|
||||
visitor.doubleField(info, Double.longBitsToDouble(fieldsStream.readLong()));
|
||||
return;
|
||||
default:
|
||||
|
@ -230,7 +231,7 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
final int length = fieldsStream.readVInt();
|
||||
byte bytes[] = new byte[length];
|
||||
fieldsStream.readBytes(bytes, 0, length);
|
||||
if ((bits & DefaultStoredFieldsWriter.FIELD_IS_BINARY) != 0) {
|
||||
if ((bits & Lucene40StoredFieldsWriter.FIELD_IS_BINARY) != 0) {
|
||||
visitor.binaryField(info, bytes, 0, bytes.length);
|
||||
} else {
|
||||
visitor.stringField(info, new String(bytes, 0, bytes.length, UTF8));
|
||||
|
@ -239,15 +240,15 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
}
|
||||
|
||||
private void skipField(int bits) throws IOException {
|
||||
final int numeric = bits & DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_MASK;
|
||||
final int numeric = bits & Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_MASK;
|
||||
if (numeric != 0) {
|
||||
switch(numeric) {
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_INT:
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_FLOAT:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_INT:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_FLOAT:
|
||||
fieldsStream.readInt();
|
||||
return;
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_LONG:
|
||||
case DefaultStoredFieldsWriter.FIELD_IS_NUMERIC_DOUBLE:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_LONG:
|
||||
case Lucene40StoredFieldsWriter.FIELD_IS_NUMERIC_DOUBLE:
|
||||
fieldsStream.readLong();
|
||||
return;
|
||||
default:
|
||||
|
@ -291,12 +292,12 @@ public final class DefaultStoredFieldsReader extends StoredFieldsReader implemen
|
|||
if (info.getDocStoreOffset() != -1) {
|
||||
assert info.getDocStoreSegment() != null;
|
||||
if (!info.getDocStoreIsCompoundFile()) {
|
||||
files.add(IndexFileNames.segmentFileName(info.getDocStoreSegment(), "", DefaultStoredFieldsWriter.FIELDS_INDEX_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.getDocStoreSegment(), "", DefaultStoredFieldsWriter.FIELDS_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.getDocStoreSegment(), "", Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.getDocStoreSegment(), "", Lucene40StoredFieldsWriter.FIELDS_EXTENSION));
|
||||
}
|
||||
} else {
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", DefaultStoredFieldsWriter.FIELDS_INDEX_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", DefaultStoredFieldsWriter.FIELDS_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION));
|
||||
files.add(IndexFileNames.segmentFileName(info.name, "", Lucene40StoredFieldsWriter.FIELDS_EXTENSION));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Copyright 2004 The Apache Software Foundation
|
||||
|
@ -26,6 +26,8 @@ import org.apache.lucene.index.IndexableField;
|
|||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -35,7 +37,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
/** @lucene.experimental */
|
||||
public final class DefaultStoredFieldsWriter extends StoredFieldsWriter {
|
||||
public final class Lucene40StoredFieldsWriter extends StoredFieldsWriter {
|
||||
// NOTE: bit 0 is free here! You can steal it!
|
||||
static final int FIELD_IS_BINARY = 1 << 1;
|
||||
|
||||
|
@ -78,7 +80,7 @@ public final class DefaultStoredFieldsWriter extends StoredFieldsWriter {
|
|||
private IndexOutput fieldsStream;
|
||||
private IndexOutput indexStream;
|
||||
|
||||
public DefaultStoredFieldsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
public Lucene40StoredFieldsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
assert directory != null;
|
||||
this.directory = directory;
|
||||
this.segment = segment;
|
||||
|
@ -227,12 +229,12 @@ public final class DefaultStoredFieldsWriter extends StoredFieldsWriter {
|
|||
|
||||
for (MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
|
||||
final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
|
||||
DefaultStoredFieldsReader matchingFieldsReader = null;
|
||||
Lucene40StoredFieldsReader matchingFieldsReader = null;
|
||||
if (matchingSegmentReader != null) {
|
||||
final StoredFieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
|
||||
// we can only bulk-copy if the matching reader is also a DefaultFieldsReader
|
||||
if (fieldsReader != null && fieldsReader instanceof DefaultStoredFieldsReader) {
|
||||
matchingFieldsReader = (DefaultStoredFieldsReader) fieldsReader;
|
||||
// we can only bulk-copy if the matching reader is also a Lucene40FieldsReader
|
||||
if (fieldsReader != null && fieldsReader instanceof Lucene40StoredFieldsReader) {
|
||||
matchingFieldsReader = (Lucene40StoredFieldsReader) fieldsReader;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,7 +255,7 @@ public final class DefaultStoredFieldsWriter extends StoredFieldsWriter {
|
|||
private final static int MAX_RAW_MERGE_DOCS = 4192;
|
||||
|
||||
private int copyFieldsWithDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
|
||||
final DefaultStoredFieldsReader matchingFieldsReader, int rawDocLengths[])
|
||||
final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
|
||||
throws IOException, MergeAbortedException, CorruptIndexException {
|
||||
int docCount = 0;
|
||||
final int maxDoc = reader.reader.maxDoc();
|
||||
|
@ -307,7 +309,7 @@ public final class DefaultStoredFieldsWriter extends StoredFieldsWriter {
|
|||
}
|
||||
|
||||
private int copyFieldsNoDeletions(MergeState mergeState, final MergeState.IndexReaderAndLiveDocs reader,
|
||||
final DefaultStoredFieldsReader matchingFieldsReader, int rawDocLengths[])
|
||||
final Lucene40StoredFieldsReader matchingFieldsReader, int rawDocLengths[])
|
||||
throws IOException, MergeAbortedException, CorruptIndexException {
|
||||
final int maxDoc = reader.reader.maxDoc();
|
||||
int docCount = 0;
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -22,23 +22,26 @@ import java.util.Set;
|
|||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.index.codecs.TermVectorsWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
||||
public class DefaultTermVectorsFormat extends TermVectorsFormat {
|
||||
public class Lucene40TermVectorsFormat extends TermVectorsFormat {
|
||||
|
||||
@Override
|
||||
public TermVectorsReader vectorsReader(Directory directory, SegmentInfo segmentInfo, FieldInfos fieldInfos, IOContext context) throws IOException {
|
||||
return new DefaultTermVectorsReader(directory, segmentInfo, fieldInfos, context);
|
||||
return new Lucene40TermVectorsReader(directory, segmentInfo, fieldInfos, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermVectorsWriter vectorsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
return new DefaultTermVectorsWriter(directory, segment, context);
|
||||
return new Lucene40TermVectorsWriter(directory, segment, context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
||||
DefaultTermVectorsReader.files(dir, info, files);
|
||||
Lucene40TermVectorsReader.files(dir, info, files);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -38,6 +38,7 @@ import org.apache.lucene.index.IndexFormatTooOldException;
|
|||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -45,7 +46,7 @@ import org.apache.lucene.util.Bits;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
||||
public class DefaultTermVectorsReader extends TermVectorsReader {
|
||||
public class Lucene40TermVectorsReader extends TermVectorsReader {
|
||||
|
||||
// NOTE: if you make a new format, it must be larger than
|
||||
// the current format
|
||||
|
@ -74,7 +75,8 @@ public class DefaultTermVectorsReader extends TermVectorsReader {
|
|||
static final String VECTORS_DOCUMENTS_EXTENSION = "tvd";
|
||||
|
||||
/** Extension of vectors index file */
|
||||
static final String VECTORS_INDEX_EXTENSION = "tvx";
|
||||
// TODO: shouldnt be visible to segments reader, preflex should do this itself somehow
|
||||
public static final String VECTORS_INDEX_EXTENSION = "tvx";
|
||||
|
||||
private FieldInfos fieldInfos;
|
||||
|
||||
|
@ -91,7 +93,7 @@ public class DefaultTermVectorsReader extends TermVectorsReader {
|
|||
private final int format;
|
||||
|
||||
// used by clone
|
||||
DefaultTermVectorsReader(FieldInfos fieldInfos, IndexInput tvx, IndexInput tvd, IndexInput tvf, int size, int numTotalDocs, int docStoreOffset, int format) {
|
||||
Lucene40TermVectorsReader(FieldInfos fieldInfos, IndexInput tvx, IndexInput tvd, IndexInput tvf, int size, int numTotalDocs, int docStoreOffset, int format) {
|
||||
this.fieldInfos = fieldInfos;
|
||||
this.tvx = tvx;
|
||||
this.tvd = tvd;
|
||||
|
@ -102,7 +104,7 @@ public class DefaultTermVectorsReader extends TermVectorsReader {
|
|||
this.format = format;
|
||||
}
|
||||
|
||||
public DefaultTermVectorsReader(Directory d, SegmentInfo si, FieldInfos fieldInfos, IOContext context)
|
||||
public Lucene40TermVectorsReader(Directory d, SegmentInfo si, FieldInfos fieldInfos, IOContext context)
|
||||
throws CorruptIndexException, IOException {
|
||||
final String segment = si.getDocStoreSegment();
|
||||
final int docStoreOffset = si.getDocStoreOffset();
|
||||
|
@ -395,7 +397,7 @@ public class DefaultTermVectorsReader extends TermVectorsReader {
|
|||
|
||||
// NOTE: tvf is pre-positioned by caller
|
||||
public TVTermsEnum() throws IOException {
|
||||
this.origTVF = DefaultTermVectorsReader.this.tvf;
|
||||
this.origTVF = Lucene40TermVectorsReader.this.tvf;
|
||||
tvf = (IndexInput) origTVF.clone();
|
||||
}
|
||||
|
||||
|
@ -717,7 +719,7 @@ public class DefaultTermVectorsReader extends TermVectorsReader {
|
|||
cloneTvf = (IndexInput) tvf.clone();
|
||||
}
|
||||
|
||||
return new DefaultTermVectorsReader(fieldInfos, cloneTvx, cloneTvd, cloneTvf, size, numTotalDocs, docStoreOffset, format);
|
||||
return new Lucene40TermVectorsReader(fieldInfos, cloneTvx, cloneTvd, cloneTvf, size, numTotalDocs, docStoreOffset, format);
|
||||
}
|
||||
|
||||
public static void files(Directory dir, SegmentInfo info, Set<String> files) throws IOException {
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index.codecs;
|
||||
package org.apache.lucene.index.codecs.lucene40;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -25,6 +25,8 @@ import org.apache.lucene.index.IndexFileNames;
|
|||
import org.apache.lucene.index.MergePolicy.MergeAbortedException;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.index.codecs.TermVectorsWriter;
|
||||
import org.apache.lucene.store.DataInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -44,23 +46,23 @@ import org.apache.lucene.util.StringHelper;
|
|||
// file; saves a seek to tvd only to read a 0 vint (and
|
||||
// saves a byte in tvd)
|
||||
|
||||
public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
||||
public final class Lucene40TermVectorsWriter extends TermVectorsWriter {
|
||||
private final Directory directory;
|
||||
private final String segment;
|
||||
private IndexOutput tvx = null, tvd = null, tvf = null;
|
||||
|
||||
public DefaultTermVectorsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
public Lucene40TermVectorsWriter(Directory directory, String segment, IOContext context) throws IOException {
|
||||
this.directory = directory;
|
||||
this.segment = segment;
|
||||
boolean success = false;
|
||||
try {
|
||||
// Open files for TermVector storage
|
||||
tvx = directory.createOutput(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_INDEX_EXTENSION), context);
|
||||
tvx.writeInt(DefaultTermVectorsReader.FORMAT_CURRENT);
|
||||
tvd = directory.createOutput(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_DOCUMENTS_EXTENSION), context);
|
||||
tvd.writeInt(DefaultTermVectorsReader.FORMAT_CURRENT);
|
||||
tvf = directory.createOutput(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_FIELDS_EXTENSION), context);
|
||||
tvf.writeInt(DefaultTermVectorsReader.FORMAT_CURRENT);
|
||||
tvx = directory.createOutput(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_INDEX_EXTENSION), context);
|
||||
tvx.writeInt(Lucene40TermVectorsReader.FORMAT_CURRENT);
|
||||
tvd = directory.createOutput(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_DOCUMENTS_EXTENSION), context);
|
||||
tvd.writeInt(Lucene40TermVectorsReader.FORMAT_CURRENT);
|
||||
tvf = directory.createOutput(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_FIELDS_EXTENSION), context);
|
||||
tvf.writeInt(Lucene40TermVectorsReader.FORMAT_CURRENT);
|
||||
success = true;
|
||||
} finally {
|
||||
if (!success) {
|
||||
|
@ -97,9 +99,9 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
tvf.writeVInt(numTerms);
|
||||
byte bits = 0x0;
|
||||
if (positions)
|
||||
bits |= DefaultTermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR;
|
||||
bits |= Lucene40TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR;
|
||||
if (offsets)
|
||||
bits |= DefaultTermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR;
|
||||
bits |= Lucene40TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR;
|
||||
tvf.writeByte(bits);
|
||||
|
||||
assert fieldCount <= numVectorFields;
|
||||
|
@ -202,15 +204,15 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
} catch (IOException ignored) {}
|
||||
|
||||
try {
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_INDEX_EXTENSION));
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_INDEX_EXTENSION));
|
||||
} catch (IOException ignored) {}
|
||||
|
||||
try {
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_DOCUMENTS_EXTENSION));
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_DOCUMENTS_EXTENSION));
|
||||
} catch (IOException ignored) {}
|
||||
|
||||
try {
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", DefaultTermVectorsReader.VECTORS_FIELDS_EXTENSION));
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "", Lucene40TermVectorsReader.VECTORS_FIELDS_EXTENSION));
|
||||
} catch (IOException ignored) {}
|
||||
}
|
||||
|
||||
|
@ -219,7 +221,7 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
* streams. This is used to expedite merging, if the
|
||||
* field numbers are congruent.
|
||||
*/
|
||||
private void addRawDocuments(DefaultTermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs) throws IOException {
|
||||
private void addRawDocuments(Lucene40TermVectorsReader reader, int[] tvdLengths, int[] tvfLengths, int numDocs) throws IOException {
|
||||
long tvdPosition = tvd.getFilePointer();
|
||||
long tvfPosition = tvf.getFilePointer();
|
||||
long tvdStart = tvdPosition;
|
||||
|
@ -246,14 +248,14 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
int numDocs = 0;
|
||||
for (final MergeState.IndexReaderAndLiveDocs reader : mergeState.readers) {
|
||||
final SegmentReader matchingSegmentReader = mergeState.matchingSegmentReaders[idx++];
|
||||
DefaultTermVectorsReader matchingVectorsReader = null;
|
||||
Lucene40TermVectorsReader matchingVectorsReader = null;
|
||||
if (matchingSegmentReader != null) {
|
||||
TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReader();
|
||||
|
||||
if (vectorsReader != null && vectorsReader instanceof DefaultTermVectorsReader) {
|
||||
if (vectorsReader != null && vectorsReader instanceof Lucene40TermVectorsReader) {
|
||||
// If the TV* files are an older format then they cannot read raw docs:
|
||||
if (((DefaultTermVectorsReader)vectorsReader).canReadRawDocs()) {
|
||||
matchingVectorsReader = (DefaultTermVectorsReader) vectorsReader;
|
||||
if (((Lucene40TermVectorsReader)vectorsReader).canReadRawDocs()) {
|
||||
matchingVectorsReader = (Lucene40TermVectorsReader) vectorsReader;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -272,7 +274,7 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
private final static int MAX_RAW_MERGE_DOCS = 4192;
|
||||
|
||||
private int copyVectorsWithDeletions(MergeState mergeState,
|
||||
final DefaultTermVectorsReader matchingVectorsReader,
|
||||
final Lucene40TermVectorsReader matchingVectorsReader,
|
||||
final MergeState.IndexReaderAndLiveDocs reader,
|
||||
int rawDocLengths[],
|
||||
int rawDocLengths2[])
|
||||
|
@ -325,7 +327,7 @@ public final class DefaultTermVectorsWriter extends TermVectorsWriter {
|
|||
}
|
||||
|
||||
private int copyVectorsNoDeletions(MergeState mergeState,
|
||||
final DefaultTermVectorsReader matchingVectorsReader,
|
||||
final Lucene40TermVectorsReader matchingVectorsReader,
|
||||
final MergeState.IndexReaderAndLiveDocs reader,
|
||||
int rawDocLengths[],
|
||||
int rawDocLengths2[])
|
|
@ -18,13 +18,13 @@ package org.apache.lucene.index.codecs.simpletext;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultDocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40DocValuesFormat;
|
||||
|
||||
/**
|
||||
* plain text index format.
|
||||
|
@ -39,7 +39,7 @@ public final class SimpleTextCodec extends Codec {
|
|||
private final FieldInfosFormat fieldInfosFormat = new SimpleTextFieldInfosFormat();
|
||||
private final TermVectorsFormat vectorsFormat = new SimpleTextTermVectorsFormat();
|
||||
// TODO: need a plain-text impl
|
||||
private final DocValuesFormat docValues = new DefaultDocValuesFormat();
|
||||
private final DocValuesFormat docValues = new Lucene40DocValuesFormat();
|
||||
|
||||
public SimpleTextCodec() {
|
||||
super("SimpleText");
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.Map.Entry;
|
|||
|
||||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosWriter;
|
||||
import org.apache.lucene.index.codecs.SegmentInfosWriter;
|
||||
import org.apache.lucene.store.ChecksumIndexOutput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -72,8 +71,8 @@ public class SimpleTextSegmentInfosWriter extends SegmentInfosWriter {
|
|||
IndexOutput out = new ChecksumIndexOutput(dir.createOutput(segmentsFileName, new IOContext(new FlushInfo(infos.size(), infos.totalDocCount()))));
|
||||
boolean success = false;
|
||||
try {
|
||||
// required preamble
|
||||
out.writeInt(DefaultSegmentInfosWriter.FORMAT_CURRENT); // write FORMAT
|
||||
// required preamble:
|
||||
out.writeInt(SegmentInfos.FORMAT_CURRENT); // write FORMAT
|
||||
out.writeString(codecID); // write codecID
|
||||
// end preamble
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.index.codecs.TermStats;
|
|||
import org.apache.lucene.index.codecs.TermsConsumer;
|
||||
import org.apache.lucene.index.codecs.lucene3x.Lucene3xPostingsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene3x.TermInfo;
|
||||
import org.apache.lucene.index.codecs.lucene40.DefaultSkipListWriter;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SkipListWriter;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -41,7 +41,7 @@ class PreFlexFieldsWriter extends FieldsConsumer {
|
|||
private final TermInfosWriter termsOut;
|
||||
private final IndexOutput freqOut;
|
||||
private final IndexOutput proxOut;
|
||||
private final DefaultSkipListWriter skipListWriter;
|
||||
private final Lucene40SkipListWriter skipListWriter;
|
||||
private final int totalNumDocs;
|
||||
|
||||
public PreFlexFieldsWriter(SegmentWriteState state) throws IOException {
|
||||
|
@ -77,7 +77,7 @@ class PreFlexFieldsWriter extends FieldsConsumer {
|
|||
}
|
||||
}
|
||||
|
||||
skipListWriter = new DefaultSkipListWriter(termsOut.skipInterval,
|
||||
skipListWriter = new Lucene40SkipListWriter(termsOut.skipInterval,
|
||||
termsOut.maxSkipLevels,
|
||||
totalNumDocs,
|
||||
freqOut,
|
||||
|
|
|
@ -31,11 +31,6 @@ import org.apache.lucene.document.StringField;
|
|||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultDocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultFieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultStoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultSegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.DefaultTermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.StoredFieldsFormat;
|
||||
|
@ -43,6 +38,11 @@ import org.apache.lucene.index.codecs.PostingsFormat;
|
|||
import org.apache.lucene.index.codecs.SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40Codec;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40FieldInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40DocValuesFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40SegmentInfosFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsFormat;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40TermVectorsFormat;
|
||||
import org.apache.lucene.index.codecs.pulsing.Pulsing40PostingsFormat;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
|
@ -1098,27 +1098,27 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public DocValuesFormat docValuesFormat() {
|
||||
return new DefaultDocValuesFormat();
|
||||
return new Lucene40DocValuesFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public StoredFieldsFormat storedFieldsFormat() {
|
||||
return new DefaultStoredFieldsFormat();
|
||||
return new Lucene40StoredFieldsFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermVectorsFormat termVectorsFormat() {
|
||||
return new DefaultTermVectorsFormat();
|
||||
return new Lucene40TermVectorsFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldInfosFormat fieldInfosFormat() {
|
||||
return new DefaultFieldInfosFormat();
|
||||
return new Lucene40FieldInfosFormat();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SegmentInfosFormat segmentInfosFormat() {
|
||||
return new DefaultSegmentInfosFormat();
|
||||
return new Lucene40SegmentInfosFormat();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultTermVectorsReader;
|
||||
import org.apache.lucene.index.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
|
|
@ -24,13 +24,12 @@ import java.util.HashSet;
|
|||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.TestIndexWriterReader;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.DefaultStoredFieldsWriter;
|
||||
import org.apache.lucene.index.codecs.lucene40.Lucene40StoredFieldsWriter;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
|
@ -41,8 +40,8 @@ public class TestFileSwitchDirectory extends LuceneTestCase {
|
|||
*/
|
||||
public void testBasic() throws IOException {
|
||||
Set<String> fileExtensions = new HashSet<String>();
|
||||
fileExtensions.add(DefaultStoredFieldsWriter.FIELDS_EXTENSION);
|
||||
fileExtensions.add(DefaultStoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_EXTENSION);
|
||||
fileExtensions.add(Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION);
|
||||
|
||||
MockDirectoryWrapper primaryDir = new MockDirectoryWrapper(random, new RAMDirectory());
|
||||
primaryDir.setCheckIndexOnClose(false); // only part of an index
|
||||
|
|
Loading…
Reference in New Issue