mirror of https://github.com/apache/lucene.git
remove dead code
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@981659 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b905aba624
commit
7f4a23a0b7
|
@ -29,6 +29,7 @@ import org.apache.lucene.index.NoDeletionPolicy;
|
||||||
import org.apache.lucene.index.NoMergePolicy;
|
import org.apache.lucene.index.NoMergePolicy;
|
||||||
import org.apache.lucene.index.NoMergeScheduler;
|
import org.apache.lucene.index.NoMergeScheduler;
|
||||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||||
|
import org.apache.lucene.index.codecs.CodecProvider;
|
||||||
import org.apache.lucene.util.Version;
|
import org.apache.lucene.util.Version;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
|
@ -95,6 +96,11 @@ public class CreateIndexTask extends PerfTask {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
final String defaultCodec = config.get("default.codec", null);
|
||||||
|
if (defaultCodec != null) {
|
||||||
|
CodecProvider.setDefaultCodec(defaultCodec);
|
||||||
|
}
|
||||||
|
|
||||||
final String mergePolicy = config.get("merge.policy",
|
final String mergePolicy = config.get("merge.policy",
|
||||||
"org.apache.lucene.index.LogByteSizeMergePolicy");
|
"org.apache.lucene.index.LogByteSizeMergePolicy");
|
||||||
boolean isCompound = config.get("compound", true);
|
boolean isCompound = config.get("compound", true);
|
||||||
|
|
|
@ -186,12 +186,6 @@ final class FieldsReader implements Cloneable {
|
||||||
indexStream.seek(FORMAT_SIZE + (docID + docStoreOffset) * 8L);
|
indexStream.seek(FORMAT_SIZE + (docID + docStoreOffset) * 8L);
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean canReadRawDocs() {
|
|
||||||
// Since we currently only support >3.0 format anymore, always return true!
|
|
||||||
// I leave this method in because it may help for later format changes.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
|
final Document doc(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {
|
||||||
seekIndex(n);
|
seekIndex(n);
|
||||||
long position = indexStream.readLong();
|
long position = indexStream.readLong();
|
||||||
|
|
|
@ -321,7 +321,7 @@ final class SegmentMerger {
|
||||||
FieldsReader matchingFieldsReader = null;
|
FieldsReader matchingFieldsReader = null;
|
||||||
if (matchingSegmentReader != null) {
|
if (matchingSegmentReader != null) {
|
||||||
final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
|
final FieldsReader fieldsReader = matchingSegmentReader.getFieldsReader();
|
||||||
if (fieldsReader != null && fieldsReader.canReadRawDocs()) {
|
if (fieldsReader != null) {
|
||||||
matchingFieldsReader = fieldsReader;
|
matchingFieldsReader = fieldsReader;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -453,7 +453,7 @@ final class SegmentMerger {
|
||||||
TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReaderOrig();
|
TermVectorsReader vectorsReader = matchingSegmentReader.getTermVectorsReaderOrig();
|
||||||
|
|
||||||
// If the TV* files are an older format then they cannot read raw docs:
|
// If the TV* files are an older format then they cannot read raw docs:
|
||||||
if (vectorsReader != null && vectorsReader.canReadRawDocs()) {
|
if (vectorsReader != null) {
|
||||||
matchingVectorsReader = vectorsReader;
|
matchingVectorsReader = vectorsReader;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,10 +135,6 @@ class TermVectorsReader implements Cloneable {
|
||||||
tvx.seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
|
tvx.seek((docNum + docStoreOffset) * 16L + FORMAT_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean canReadRawDocs() {
|
|
||||||
return format >= FORMAT_UTF8_LENGTH_IN_BYTES;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Retrieve the length (in bytes) of the tvd and tvf
|
/** Retrieve the length (in bytes) of the tvd and tvf
|
||||||
* entries for the next numDocs starting with
|
* entries for the next numDocs starting with
|
||||||
* startDocID. This is used for bulk copying when
|
* startDocID. This is used for bulk copying when
|
||||||
|
@ -153,11 +149,6 @@ class TermVectorsReader implements Cloneable {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// SegmentMerger calls canReadRawDocs() first and should
|
|
||||||
// not call us if that returns false.
|
|
||||||
if (format < FORMAT_UTF8_LENGTH_IN_BYTES)
|
|
||||||
throw new IllegalStateException("cannot read raw docs with older term vector formats");
|
|
||||||
|
|
||||||
seekTvx(startDocID);
|
seekTvx(startDocID);
|
||||||
|
|
||||||
long tvdPosition = tvx.readLong();
|
long tvdPosition = tvx.readLong();
|
||||||
|
|
Loading…
Reference in New Issue