LUCENE-1078: remove dead code

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@601337 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2007-12-05 13:59:37 +00:00
parent a0c9992f0b
commit 8a8944de1a
9 changed files with 2 additions and 20 deletions

View File

@ -19,7 +19,6 @@ package org.apache.lucene.analysis;
import java.io.IOException;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Set;
/**

View File

@ -1900,7 +1900,6 @@ final class DocumentsWriter {
private ByteSliceReader freq = new ByteSliceReader();
private ByteSliceReader prox = new ByteSliceReader();
private int lastDocID;
private int docID;
private int termFreq;

View File

@ -36,9 +36,6 @@ import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.util.BitVector;
import java.io.IOException;
import java.util.*;
/**
* @version $Id$
*/
@ -301,7 +298,6 @@ class SegmentReader extends DirectoryIndexReader {
fieldInfos = new FieldInfos(cfsDir, segment + ".fnm");
final String fieldsSegment;
final Directory dir;
if (si.getDocStoreOffset() != -1)
fieldsSegment = si.getDocStoreSegment();
@ -424,7 +420,6 @@ class SegmentReader extends DirectoryIndexReader {
// and can thus not be shared among multiple SegmentReaders
// TODO: Change this in case FieldsReader becomes thread-safe in the future
final String fieldsSegment;
final Directory dir;
Directory storeDir = directory();

View File

@ -429,18 +429,16 @@ class TermVectorsReader implements Cloneable {
class ParallelArrayTermVectorMapper extends TermVectorMapper
{
private int numTerms;
private String[] terms;
private int[] termFreqs;
private int positions[][] = null;
private TermVectorOffsetInfo offsets[][] = null;
private int positions[][];
private TermVectorOffsetInfo offsets[][];
private int currentPosition;
private boolean storingOffsets;
private boolean storingPositions;
private String field;
public void setExpectations(String field, int numTerms, boolean storeOffsets, boolean storePositions) {
this.numTerms = numTerms;
this.field = field;
terms = new String[numTerms];
termFreqs = new int[numTerms];

View File

@ -72,7 +72,6 @@ final class TermVectorsWriter {
tvf.writeVInt(numTerms);
final TermPositionVector tpVector;
final TermFreqVector tfVector;
final byte bits;
final boolean storePositions;
@ -81,14 +80,12 @@ final class TermVectorsWriter {
if (vectors[i] instanceof TermPositionVector) {
// May have positions & offsets
tpVector = (TermPositionVector) vectors[i];
tfVector = null;
storePositions = tpVector.size() > 0 && tpVector.getTermPositions(0) != null;
storeOffsets = tpVector.size() > 0 && tpVector.getOffsets(0) != null;
bits = (byte) ((storePositions ? TermVectorsReader.STORE_POSITIONS_WITH_TERMVECTOR : 0) +
(storeOffsets ? TermVectorsReader.STORE_OFFSET_WITH_TERMVECTOR : 0));
} else {
tpVector = null;
tfVector = vectors[i];
bits = 0;
storePositions = false;
storeOffsets = false;

View File

@ -17,8 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.util.ArrayList;
/** Expert: Describes the score computation for document and query, andcan distinguish a match independent of a positive value. */
public class ComplexExplanation extends Explanation {
private Boolean match;

View File

@ -17,8 +17,6 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.SmallFloat;

View File

@ -19,7 +19,6 @@ package org.apache.lucene.search.function;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.function.DocValues;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.io.Serializable;

View File

@ -19,7 +19,6 @@ package org.apache.lucene.store;
import java.io.IOException;
import java.util.HashSet;
import java.util.Enumeration;
/**
* Implements {@link LockFactory} for a single in-process instance,