git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@150371 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Otis Gospodnetic 2004-07-10 06:19:01 +00:00
parent de1ea95568
commit 451a311f26
14 changed files with 3 additions and 21 deletions

View File

@ -26,7 +26,6 @@ import org.apache.lucene.analysis.standard.StandardTokenizer;
import java.io.File;
import java.io.IOException;
import java.io.Reader;
import java.io.IOException;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set;

View File

@ -65,8 +65,6 @@ PARSER_BEGIN(StandardTokenizer)
package org.apache.lucene.analysis.standard;
import java.io.*;
/** A grammar-based tokenizer constructed with JavaCC.
*
* <p> This should be a good tokenizer for most European-language documents.

View File

@ -1,6 +1,5 @@
/* Generated By:JavaCC: Do not edit this line. StandardTokenizerTokenManager.java */
package org.apache.lucene.analysis.standard;
import java.io.*;
public class StandardTokenizerTokenManager implements StandardTokenizerConstants
{

View File

@ -19,10 +19,8 @@ package org.apache.lucene.index;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.InputStream;
import org.apache.lucene.store.OutputStream;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import java.util.HashMap;
import java.util.Iterator;
import java.io.IOException;

View File

@ -323,7 +323,7 @@ final class DocumentWriter {
float norm = fieldBoosts[n] * similarity.lengthNorm(fi.name, fieldLengths[n]);
OutputStream norms = directory.createFile(segment + ".f" + n);
try {
norms.writeByte(similarity.encodeNorm(norm));
norms.writeByte(Similarity.encodeNorm(norm));
} finally {
norms.close();
}

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index;
*/
import java.io.IOException;
import org.apache.lucene.util.BitVector;
final class SegmentMergeInfo {
Term term;

View File

@ -27,7 +27,6 @@ import java.util.Vector;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.InputStream;
import org.apache.lucene.store.OutputStream;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BitVector;

View File

@ -1,5 +1,4 @@
package org.apache.lucene.index;
import java.io.IOException;
import java.util.*;
/**

View File

@ -1,7 +1,5 @@
package org.apache.lucene.index;
import java.io.IOException;
/** Provides access to stored term vector of
* a document field.
*/

View File

@ -17,7 +17,6 @@ package org.apache.lucene.search;
*/
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;

View File

@ -1,12 +1,8 @@
package org.apache.lucene.search;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import java.io.IOException;
import java.io.Serializable;
/**
* Abstract base class for sorting hits returned by a Query.

View File

@ -20,7 +20,6 @@ import java.io.IOException;
import java.util.List;
import java.util.ArrayList;
import java.util.Iterator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.PriorityQueue;

View File

@ -18,9 +18,7 @@ package org.apache.lucene.search.spans;
import java.io.IOException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Weight;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Similarity;

View File

@ -128,4 +128,5 @@ extends TestCase {
assertEquals(2, hits.length());
}
}
}