remove deprecations

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@387550 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2006-03-21 15:36:32 +00:00
parent 4630f11663
commit 3666a166a1
111 changed files with 342 additions and 8562 deletions

View File

@ -1,60 +0,0 @@
<project name="build-deprecated">
<!-- classpath for deprecated test cases, should be removed when moving from 1.9 to 2.0 -->
<path id="test-deprecated.classpath">
<path refid="demo.classpath"/>
<pathelement location="${build.dir}/classes/test-deprecated"/>
</path>
<!-- junit classpath for deprecated test cases, should be removed when moving from 1.9 to 2.0 -->
<path id="junit-deprecated.classpath">
<pathelement location="${build.dir}/classes/test-deprecated"/>
<pathelement location="${build.dir}/classes/java"/>
<pathelement location="${build.dir}/classes/demo"/>
<fileset dir="lib">
<include name="*.jar"/>
</fileset>
<pathelement path="${java.class.path}"/>
</path>
<target name="compile-test-deprecated" depends="compile-core,compile-demo">
<mkdir dir="${build.dir}/classes/test-deprecated"/>
<compile
srcdir="src/test-deprecated"
destdir="${build.dir}/classes/test-deprecated">
<classpath refid="test-deprecated.classpath"/>
</compile>
</target>
<!-- ================================================================== -->
<!-- R U N D E P R E C A T E D T E S T S -->
<!-- ================================================================== -->
<!-- should be removed when moving from lucene 1.9 to 2.0 -->
<!-- ================================================================== -->
<target name="test-deprecated" depends="compile-test-deprecated" description="Runs deprecated unit tests">
<fail unless="junit.present">
##################################################################
JUnit not found.
Please make sure junit.jar is in ANT_HOME/lib, or made available
to Ant using other mechanisms like -lib or CLASSPATH.
##################################################################
</fail>
<mkdir dir="${junit.output.dir}"/>
<junit printsummary="off" haltonfailure="no"
errorProperty="tests.failed" failureProperty="tests.failed">
<classpath refid="junit-deprecated.classpath"/>
<sysproperty key="dataDir" file="src/test-deprecated"/>
<sysproperty key="tempDir" file="${build.dir}/test-deprecated"/>
<formatter type="xml"/>
<formatter type="brief" usefile="false"/>
<batchtest fork="yes" todir="${junit.output.dir}" unless="testcase">
<fileset dir="src/test-deprecated" includes="**/Test*.java"/>
</batchtest>
<batchtest fork="yes" todir="${junit.output.dir}" if="testcase">
<fileset dir="src/test-deprecated" includes="**/${testcase}.java"/>
</batchtest>
</junit>
<fail if="tests.failed">Tests failed!</fail>
</target>
</project>

View File

@ -6,10 +6,7 @@
<import file="common-build.xml"/>
<!-- Import tests for deprecation compliance.
This will be removed for Lucene 2.0
-->
<import file="build-deprecated.xml"/>
<property name="demo.name" value="lucene-demos-${version}"/>
<property name="demo.war.name" value="luceneweb"/>

View File

@ -44,16 +44,6 @@ public final class BrazilianStemFilter extends TokenFilter {
stemmer = new BrazilianStemmer();
}
/**
* Builds a BrazilianStemFilter that uses an exclusiontable.
*
* @deprecated
*/
public BrazilianStemFilter(TokenStream in, Hashtable exclusiontable) {
this(in);
this.exclusions = new HashSet(exclusiontable.keySet());
}
public BrazilianStemFilter(TokenStream in, Set exclusiontable) {
this(in);
this.exclusions = exclusiontable;

View File

@ -82,15 +82,6 @@ public final class CzechAnalyzer extends Analyzer {
stoptable = StopFilter.makeStopSet( stopwords );
}
/**
* Builds an analyzer with the given stop words.
*
* @deprecated
*/
public CzechAnalyzer( Hashtable stopwords ) {
stoptable = new HashSet(stopwords.keySet());
}
public CzechAnalyzer( HashSet stopwords ) {
stoptable = stopwords;
}

View File

@ -19,10 +19,9 @@ package org.apache.lucene.analysis.de;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Set;
import java.util.HashSet;
/**
* A filter that stems German words. It supports a table of words that should
@ -47,16 +46,6 @@ public final class GermanStemFilter extends TokenFilter
stemmer = new GermanStemmer();
}
/**
* Builds a GermanStemFilter that uses an exclusiontable.
* @deprecated Use {@link #GermanStemFilter(org.apache.lucene.analysis.TokenStream, java.util.Set)} instead.
*/
public GermanStemFilter( TokenStream in, Hashtable exclusiontable )
{
this( in );
exclusionSet = new HashSet(exclusiontable.keySet());
}
/**
* Builds a GermanStemFilter that uses an exclusiontable.
*/
@ -100,14 +89,6 @@ public final class GermanStemFilter extends TokenFilter
}
}
/**
* Set an alternative exclusion list for this filter.
* @deprecated Use {@link #setExclusionSet(java.util.Set)} instead.
*/
public void setExclusionTable( Hashtable exclusiontable )
{
exclusionSet = new HashSet(exclusiontable.keySet());
}
/**
* Set an alternative exclusion list for this filter.

View File

@ -94,15 +94,6 @@ public final class FrenchAnalyzer extends Analyzer {
stoptable = StopFilter.makeStopSet(stopwords);
}
/**
* Builds an analyzer with the given stop words.
*
* @deprecated
*/
public FrenchAnalyzer(Hashtable stopwords) {
stoptable = new HashSet(stopwords.keySet());
}
/**
* Builds an analyzer with the given stop words.
* @throws IOException

View File

@ -45,15 +45,6 @@ public final class FrenchStemFilter extends TokenFilter {
stemmer = new FrenchStemmer();
}
/**
* Builds a FrenchStemFilter that uses an exclusiontable.
*
* @deprecated
*/
public FrenchStemFilter( TokenStream in, Hashtable exclusiontable ) {
this( in );
exclusions = new HashSet(exclusiontable.keySet());
}
public FrenchStemFilter( TokenStream in, Set exclusiontable ) {
this( in );

View File

@ -1049,12 +1049,7 @@ public class MemoryIndex {
return getFieldNames(storedTermVector);
}
// lucene >= 1.9 (deprecated) (remove this method for lucene-1.4.3)
public Collection getIndexedFieldNames(org.apache.lucene.document.Field.TermVector tvSpec) {
throw new UnsupportedOperationException(
"Deprecated; replaced by getFieldNames(IndexReader.FieldOption)");
}
// lucene >= 1.9 (remove this method for lucene-1.4.3)
public Collection getFieldNames(FieldOption fieldOption) {
if (DEBUG) System.err.println("MemoryIndexReader.getFieldNamesOption");

View File

@ -25,20 +25,6 @@ public interface CharStream {
*/
char readChar() throws java.io.IOException;
/**
* Returns the column position of the character last read.
* @deprecated
* @see #getEndColumn
*/
int getColumn();
/**
* Returns the line number of the character last read.
* @deprecated
* @see #getEndLine
*/
int getLine();
/**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).

View File

@ -15,16 +15,19 @@
*/
package org.apache.lucene.search.similar;
import java.io.*;
import java.util.*;
import java.net.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.analysis.*;
import org.apache.lucene.analysis.standard.*;
import org.apache.lucene.document.*;
import org.apache.lucene.search.*;
import org.apache.lucene.index.*;
import org.apache.lucene.util.*;
import java.io.IOException;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Set;
/**
* Simple similarity measures.
@ -105,7 +108,7 @@ public final class SimilarityQueries
TermQuery tq = new TermQuery( new Term( field, word));
try
{
tmp.add( tq, false, false);
tmp.add( tq, BooleanClause.Occur.SHOULD);//false, false);
}
catch( BooleanQuery.TooManyClauses too)
{

View File

@ -25,20 +25,6 @@ public interface CharStream {
*/
char readChar() throws java.io.IOException;
/**
* Returns the column position of the character last read.
* @deprecated
* @see #getEndColumn
*/
int getColumn();
/**
* Returns the line number of the character last read.
* @deprecated
* @see #getEndLine
*/
int getLine();
/**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).

View File

@ -40,7 +40,7 @@ public class DeleteFiles {
IndexReader reader = IndexReader.open(directory);
Term term = new Term("path", args[0]);
int deleted = reader.delete(term);
int deleted = reader.deleteDocuments(term);
System.out.println("deleted " + deleted +
" documents containing " + term);

View File

@ -105,7 +105,7 @@ public class IndexHTML {
while (uidIter.term() != null && uidIter.term().field() == "uid") {
System.out.println("deleting " +
HTMLDocument.uid2url(uidIter.term().text()));
reader.delete(uidIter.term());
reader.deleteDocuments(uidIter.term());
uidIter.next();
}
deleting = false;
@ -137,7 +137,7 @@ public class IndexHTML {
if (deleting) { // delete stale docs
System.out.println("deleting " +
HTMLDocument.uid2url(uidIter.term().text()));
reader.delete(uidIter.term());
reader.deleteDocuments(uidIter.term());
}
uidIter.next();
}

View File

@ -16,22 +16,22 @@ package org.apache.lucene.demo;
* limitations under the License.
*/
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.FilterIndexReader;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Hits;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Searcher;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Date;
/** Simple command-line based search demo. */
public class SearchFiles {
@ -107,7 +107,7 @@ public class SearchFiles {
} else {
in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
}
QueryParser parser = new QueryParser(field, analyzer);
while (true) {
if (queries == null) // prompt the user
System.out.print("Query: ");
@ -117,7 +117,7 @@ public class SearchFiles {
if (line == null || line.length() == -1)
break;
Query query = QueryParser.parse(line, field, analyzer);
Query query = parser.parse(line);
System.out.println("Searching for: " + query.toString(field));
Hits hits = searcher.search(query);

View File

@ -40,12 +40,6 @@ public class HTMLParser implements HTMLParserConstants {
}
}
/**
* @deprecated Use HTMLParser(FileInputStream) instead
*/
public HTMLParser(File file) throws FileNotFoundException {
this(new FileInputStream(file));
}
public String getTitle() throws IOException, InterruptedException {
if (pipeIn == null)

View File

@ -193,24 +193,6 @@ public class SimpleCharStream
return (c);
}
/**
* @deprecated
* @see #getEndColumn
*/
public int getColumn() {
return bufcolumn[bufpos];
}
/**
* @deprecated
* @see #getEndLine
*/
public int getLine() {
return bufline[bufpos];
}
public int getEndColumn() {
return bufcolumn[bufpos];
}

View File

@ -34,21 +34,8 @@ public abstract class Analyzer {
compatibility with older version. Override to allow Analyzer to choose
strategy based on document and/or field. Must be able to handle null
field name for backward compatibility. */
public TokenStream tokenStream(String fieldName, Reader reader)
{
// implemented for backward compatibility
return tokenStream(reader);
}
/** Creates a TokenStream which tokenizes all the text in the provided
* Reader. Provided for backward compatibility only.
* @deprecated use tokenStream(String, Reader) instead.
* @see #tokenStream(String, Reader)
*/
public TokenStream tokenStream(Reader reader)
{
return tokenStream(null, reader);
}
public abstract TokenStream tokenStream(String fieldName, Reader reader);
/**
* Invoked before indexing a Field instance if

View File

@ -18,7 +18,6 @@ package org.apache.lucene.analysis;
import java.io.IOException;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Set;
/**
@ -48,25 +47,6 @@ public final class StopFilter extends TokenFilter {
this.stopWords = makeStopSet(stopWords, ignoreCase);
}
/**
* Constructs a filter which removes words from the input
* TokenStream that are named in the Hashtable.
*
* @deprecated Use {@link #StopFilter(TokenStream, Set)} instead
*/
public StopFilter(TokenStream in, Hashtable stopTable) {
this(in, stopTable, false);
}
/**
* Constructs a filter which removes words from the input
* TokenStream that are named in the Hashtable.
* If ignoreCase is true, all keys in the stopTable should already
* be lowercased.
* @deprecated Use {@link #StopFilter(TokenStream, Set)} instead
*/
public StopFilter(TokenStream in, Hashtable stopTable, boolean ignoreCase) {
this(in, stopTable.keySet(), ignoreCase);
}
/**
* Construct a token stream filtering the given input.
@ -92,34 +72,6 @@ public final class StopFilter extends TokenFilter {
public StopFilter(TokenStream in, Set stopWords) {
this(in, stopWords, false);
}
/**
* Builds a Hashtable from an array of stop words,
* appropriate for passing into the StopFilter constructor.
* This permits this table construction to be cached once when
* an Analyzer is constructed.
*
* @deprecated Use {@link #makeStopSet(String[])} instead.
*/
public static final Hashtable makeStopTable(String[] stopWords) {
return makeStopTable(stopWords, false);
}
/**
* Builds a Hashtable from an array of stop words,
* appropriate for passing into the StopFilter constructor.
* This permits this table construction to be cached once when
* an Analyzer is constructed.
* @deprecated Use {@link #makeStopSet(java.lang.String[], boolean)} instead.
*/
public static final Hashtable makeStopTable(String [] stopWords, boolean ignoreCase) {
Hashtable stopTable = new Hashtable(stopWords.length);
for (int i = 0; i < stopWords.length; i++)
{
String stopWord = ignoreCase ? stopWords[i].toLowerCase() : stopWords[i];
stopTable.put(stopWord, stopWord);
}
return stopTable;
}
/**
* Builds a Set from an array of stop words,

View File

@ -27,10 +27,6 @@ public abstract class TokenFilter extends TokenStream {
/** The source of tokens for this filter. */
protected TokenStream input;
/** Call TokenFilter(TokenStream) instead.
* @deprecated */
protected TokenFilter() {}
/** Construct a token stream filtering the given input. */
protected TokenFilter(TokenStream input) {
this.input = input;

View File

@ -86,35 +86,6 @@ public class WordlistLoader {
return result;
}
/**
* @param path Path to the wordlist
* @param wordfile Name of the wordlist
*
* @deprecated Use {@link #getWordSet(File)} instead
*/
public static Hashtable getWordtable(String path, String wordfile) throws IOException {
return getWordtable(new File(path, wordfile));
}
/**
* @param wordfile Complete path to the wordlist
*
* @deprecated Use {@link #getWordSet(File)} instead
*/
public static Hashtable getWordtable(String wordfile) throws IOException {
return getWordtable(new File(wordfile));
}
/**
* @param wordfile File object that points to the wordlist
*
* @deprecated Use {@link #getWordSet(File)} instead
*/
public static Hashtable getWordtable(File wordfile) throws IOException {
HashSet wordSet = (HashSet)getWordSet(wordfile);
Hashtable result = makeWordTable(wordSet);
return result;
}
/**
* Builds a wordlist table, using words as both keys and values

View File

@ -1,111 +0,0 @@
package org.apache.lucene.analysis.de;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.LineNumberReader;
import java.util.HashSet;
import java.util.Hashtable;
import java.util.Iterator;
/**
* Loader for text files that represent a list of stopwords.
*
* @deprecated Use {@link org.apache.lucene.analysis.WordlistLoader} instead
*
* @author Gerhard Schwarz
* @version $Id$
*/
public class WordlistLoader {
/**
* Loads a text file and adds every line as an entry to a HashSet (omitting
* leading and trailing whitespace). Every line of the file should contain only
* one word. The words need to be in lowercase if you make use of an
* Analyzer which uses LowerCaseFilter (like GermanAnalyzer).
*
* @param wordfile File containing the wordlist
* @return A HashSet with the file's words
*/
public static HashSet getWordSet(File wordfile) throws IOException {
HashSet result = new HashSet();
FileReader freader = null;
LineNumberReader lnr = null;
try {
freader = new FileReader(wordfile);
lnr = new LineNumberReader(freader);
String word = null;
while ((word = lnr.readLine()) != null) {
result.add(word.trim());
}
}
finally {
if (lnr != null)
lnr.close();
if (freader != null)
freader.close();
}
return result;
}
/**
* @param path Path to the wordlist
* @param wordfile Name of the wordlist
*
* @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
*/
public static Hashtable getWordtable(String path, String wordfile) throws IOException {
return getWordtable(new File(path, wordfile));
}
/**
* @param wordfile Complete path to the wordlist
*
* @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
*/
public static Hashtable getWordtable(String wordfile) throws IOException {
return getWordtable(new File(wordfile));
}
/**
* @param wordfile File object that points to the wordlist
*
* @deprecated Use {@link #getWordSet(File)} getWordSet(File)} instead
*/
public static Hashtable getWordtable(File wordfile) throws IOException {
HashSet wordSet = (HashSet)getWordSet(wordfile);
Hashtable result = makeWordTable(wordSet);
return result;
}
/**
* Builds a wordlist table, using words as both keys and values
* for backward compatibility.
*
* @param wordSet stopword set
*/
private static Hashtable makeWordTable(HashSet wordSet) {
Hashtable table = new Hashtable();
for (Iterator iter = wordSet.iterator(); iter.hasNext();) {
String word = (String)iter.next();
table.put(word, word);
}
return table;
}
}

View File

@ -25,20 +25,6 @@ public interface CharStream {
*/
char readChar() throws java.io.IOException;
/**
* Returns the column position of the character last read.
* @deprecated
* @see #getEndColumn
*/
int getColumn();
/**
* Returns the line number of the character last read.
* @deprecated
* @see #getEndLine
*/
int getLine();
/**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).

View File

@ -16,30 +16,29 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import java.util.Date;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.RangeQuery;
import org.apache.lucene.search.PrefixQuery; // for javadoc
import org.apache.lucene.search.RangeQuery; // for javadoc
import java.util.Date; // for javadoc
/**
* Provides support for converting dates to strings and vice-versa.
* The strings are structured so that lexicographic sorting orders by date,
* which makes them suitable for use as field values and search terms.
*
*
* <P>Note that this class saves dates with millisecond granularity,
* which is bad for {@link RangeQuery} and {@link PrefixQuery}, as those
* queries are expanded to a BooleanQuery with a potentially large number
* queries are expanded to a BooleanQuery with a potentially large number
* of terms when searching. Thus you might want to use
* {@link DateTools} instead.
*
*
* <P>
* Note: dates before 1970 cannot be used, and therefore cannot be
* indexed when using this class. See {@link DateTools} for an
* alternative without such a limitation.
*
* @deprecated If you build a new index, use {@link DateTools} instead. For
* existing indices you can continue using this class, as it will not be
* removed in the near future despite being deprecated.
*
* @deprecated If you build a new index, use {@link DateTools} instead. This class is included for use with existing
* indices and will be removed in a future release.
*/
public class DateField {

View File

@ -16,15 +16,14 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import java.io.Reader;
import java.io.Serializable;
import java.util.Date;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.util.Parameter;
import java.io.Reader;
import java.io.Serializable;
/**
A field is a section of a Document. Each field has two parts, a name and a
value. Values may be free text, provided as a String or as a Reader, or they
@ -53,45 +52,45 @@ public final class Field implements Serializable {
/** Specifies whether and how a field should be stored. */
public static final class Store extends Parameter implements Serializable {
private Store(String name) {
super(name);
}
/** Store the original field value in the index in a compressed form. This is
* useful for long documents and for binary valued fields.
*/
public static final Store COMPRESS = new Store("COMPRESS");
/** Store the original field value in the index. This is useful for short texts
* like a document's title which should be displayed with the results. The
* value is stored in its original form, i.e. no analyzer is used before it is
* stored.
* stored.
*/
public static final Store YES = new Store("YES");
/** Do not store the field value in the index. */
public static final Store NO = new Store("NO");
}
/** Specifies whether and how a field should be indexed. */
public static final class Index extends Parameter implements Serializable {
private Index(String name) {
super(name);
}
/** Do not index the field value. This field can thus not be searched,
* but one can still access its contents provided it is
* but one can still access its contents provided it is
* {@link Field.Store stored}. */
public static final Index NO = new Index("NO");
/** Index the field's value so it can be searched. An Analyzer will be used
* to tokenize and possibly further normalize the text before its
* terms will be stored in the index. This is useful for common text.
*/
public static final Index TOKENIZED = new Index("TOKENIZED");
/** Index the field's value without using an Analyzer, so it can be searched.
* As no analyzer is used the value will be stored as a single term. This is
* useful for unique Ids like product numbers.
@ -181,84 +180,6 @@ public final class Field implements Serializable {
public float getBoost() {
return boost;
}
/** Constructs a String-valued Field that is not tokenized, but is indexed
and stored. Useful for non-text fields, e.g. date or url.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index)
Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead */
public static final Field Keyword(String name, String value) {
return new Field(name, value, true, true, false);
}
/** Constructs a String-valued Field that is not tokenized nor indexed,
but is stored in the index, for return with hits.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index)
Field(name, value, Field.Store.YES, Field.Index.NO)} instead */
public static final Field UnIndexed(String name, String value) {
return new Field(name, value, true, false, false);
}
/** Constructs a String-valued Field that is tokenized and indexed,
and is stored in the index, for return with hits. Useful for short text
fields, like "title" or "subject". Term vector will not be stored for this field.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index)
Field(name, value, Field.Store.YES, Field.Index.TOKENIZED)} instead */
public static final Field Text(String name, String value) {
return Text(name, value, false);
}
/** Constructs a Date-valued Field that is not tokenized and is indexed,
and stored in the index, for return with hits.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index)
Field(name, value, Field.Store.YES, Field.Index.UN_TOKENIZED)} instead */
public static final Field Keyword(String name, Date value) {
return new Field(name, DateField.dateToString(value), true, true, false);
}
/** Constructs a String-valued Field that is tokenized and indexed,
and is stored in the index, for return with hits. Useful for short text
fields, like "title" or "subject".
@deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
Field(name, value, Field.Store.YES, Field.Index.TOKENIZED, storeTermVector)} instead */
public static final Field Text(String name, String value, boolean storeTermVector) {
return new Field(name, value, true, true, true, storeTermVector);
}
/** Constructs a String-valued Field that is tokenized and indexed,
but that is not stored in the index. Term vector will not be stored for this field.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index)
Field(name, value, Field.Store.NO, Field.Index.TOKENIZED)} instead */
public static final Field UnStored(String name, String value) {
return UnStored(name, value, false);
}
/** Constructs a String-valued Field that is tokenized and indexed,
but that is not stored in the index.
@deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)
Field(name, value, Field.Store.NO, Field.Index.TOKENIZED, storeTermVector)} instead */
public static final Field UnStored(String name, String value, boolean storeTermVector) {
return new Field(name, value, false, true, true, storeTermVector);
}
/** Constructs a Reader-valued Field that is tokenized and indexed, but is
not stored in the index verbatim. Useful for longer text fields, like
"body". Term vector will not be stored for this field.
@deprecated use {@link #Field(String, Reader) Field(name, value)} instead */
public static final Field Text(String name, Reader value) {
return Text(name, value, false);
}
/** Constructs a Reader-valued Field that is tokenized and indexed, but is
not stored in the index verbatim. Useful for longer text fields, like
"body".
@deprecated use {@link #Field(String, Reader, Field.TermVector)
Field(name, value, storeTermVector)} instead */
public static final Field Text(String name, Reader value, boolean storeTermVector) {
Field f = new Field(name, value);
f.storeTermVector = storeTermVector;
return f;
}
/** Returns the name of the field as an interned string.
* For example "date", "title", "body", ...
*/
@ -405,15 +326,6 @@ public final class Field implements Serializable {
setStoreTermVector(termVector);
}
/** Create a field by specifying all parameters except for <code>storeTermVector</code>,
* which is set to <code>false</code>.
*
* @deprecated use {@link #Field(String, String, Field.Store, Field.Index)} instead
*/
public Field(String name, String string,
boolean store, boolean index, boolean token) {
this(name, string, store, index, token, false);
}
/**
@ -454,34 +366,6 @@ public final class Field implements Serializable {
setStoreTermVector(TermVector.NO);
}
/**
*
* @param name The name of the field
* @param string The string to process
* @param store true if the field should store the string
* @param index true if the field should be indexed
* @param token true if the field should be tokenized
* @param storeTermVector true if we should store the Term Vector info
*
* @deprecated use {@link #Field(String, String, Field.Store, Field.Index, Field.TermVector)} instead
*/
public Field(String name, String string,
boolean store, boolean index, boolean token, boolean storeTermVector) {
if (name == null)
throw new NullPointerException("name cannot be null");
if (string == null)
throw new NullPointerException("value cannot be null");
if (!index && storeTermVector)
throw new IllegalArgumentException("cannot store a term vector for fields that are not indexed");
this.name = name.intern(); // field names are interned
this.fieldsData = string;
this.isStored = store;
this.isIndexed = index;
this.isTokenized = token;
this.storeTermVector = storeTermVector;
}
private void setStoreTermVector(TermVector termVector) {
if (termVector == TermVector.NO) {
this.storeTermVector = false;

View File

@ -17,7 +17,6 @@ package org.apache.lucene.index;
*/
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.io.IOException;
import java.util.Collection;
@ -130,21 +129,10 @@ public class FilterIndexReader extends IndexReader {
return in.termPositions();
}
protected void doDelete(int n) throws IOException { in.delete(n); }
protected void doDelete(int n) throws IOException { in.deleteDocument(n); }
protected void doCommit() throws IOException { in.commit(); }
protected void doClose() throws IOException { in.close(); }
public Collection getFieldNames() throws IOException {
return in.getFieldNames();
}
public Collection getFieldNames(boolean indexed) throws IOException {
return in.getFieldNames(indexed);
}
public Collection getIndexedFieldNames (Field.TermVector tvSpec){
return in.getIndexedFieldNames(tvSpec);
}
public Collection getFieldNames(IndexReader.FieldOption fieldNames) {
return in.getFieldNames(fieldNames);

View File

@ -16,15 +16,15 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
/**
* A class to modify an index, i.e. to delete and add documents. This
* class hides {@link IndexReader} and {@link IndexWriter} so that you
@ -260,21 +260,6 @@ public class IndexModifier {
}
}
/**
* Deletes all documents containing <code>term</code>.
* This is useful if one uses a document field to hold a unique ID string for
* the document. Then to delete such a document, one merely constructs a
* term with the appropriate field and the unique ID string as its text and
* passes it to this method. Returns the number of documents deleted.
* @return the number of documents deleted
* @see IndexReader#deleteDocuments(Term)
* @throws IllegalStateException if the index is closed
* @deprecated Use {@link #deleteDocuments(Term)} instead.
*/
public int delete(Term term) throws IOException {
return deleteDocuments(term);
}
/**
* Deletes the document numbered <code>docNum</code>.
* @see IndexReader#deleteDocument(int)
@ -288,15 +273,6 @@ public class IndexModifier {
}
}
/**
* Deletes the document numbered <code>docNum</code>.
* @see IndexReader#deleteDocument(int)
* @throws IllegalStateException if the index is closed
* @deprecated Use {@link #deleteDocument(int)} instead.
*/
public void delete(int docNum) throws IOException {
deleteDocument(docNum);
}
/**
* Returns the number of documents currently in this index.

View File

@ -503,18 +503,6 @@ public abstract class IndexReader {
}
}
/** Deletes the document numbered <code>docNum</code>. Once a document is
* deleted it will not appear in TermDocs or TermPostitions enumerations.
* Attempts to read its field with the {@link #document}
* method will result in an error. The presence of this document may still be
* reflected in the {@link #docFreq} statistic, though
* this will be corrected eventually as the index is further modified.
*
* @deprecated Use {@link #deleteDocument(int docNum)} instead.
*/
public final synchronized void delete(int docNum) throws IOException {
deleteDocument(docNum);
}
/** Deletes the document numbered <code>docNum</code>. Once a document is
* deleted it will not appear in TermDocs or TermPostitions enumerations.
@ -536,20 +524,6 @@ public abstract class IndexReader {
*/
protected abstract void doDelete(int docNum) throws IOException;
/** Deletes all documents containing <code>term</code>.
* This is useful if one uses a document field to hold a unique ID string for
* the document. Then to delete such a document, one merely constructs a
* term with the appropriate field and the unique ID string as its text and
* passes it to this method.
* See {@link #delete(int)} for information about when this deletion will
* become effective.
* @return the number of documents deleted
*
* @deprecated Use {@link #deleteDocuments(Term term)} instead.
*/
public final int delete(Term term) throws IOException {
return deleteDocuments(term);
}
/** Deletes all documents containing <code>term</code>.
* This is useful if one uses a document field to hold a unique ID string for
@ -640,61 +614,7 @@ public abstract class IndexReader {
writeLock = null;
}
}
/**
* Returns a list of all unique field names that exist in the index pointed
* to by this IndexReader.
* @return Collection of Strings indicating the names of the fields
* @throws IOException if there is a problem with accessing the index
*
* @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
*/
public abstract Collection getFieldNames() throws IOException;
/**
* Returns a list of all unique field names that exist in the index pointed
* to by this IndexReader. The boolean argument specifies whether the fields
* returned are indexed or not.
* @param indexed <code>true</code> if only indexed fields should be returned;
* <code>false</code> if only unindexed fields should be returned.
* @return Collection of Strings indicating the names of the fields
* @throws IOException if there is a problem with accessing the index
*
* @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
*/
public abstract Collection getFieldNames(boolean indexed) throws IOException;
/**
*
* @param storedTermVector if true, returns only Indexed fields that have term vector info,
* else only indexed fields without term vector info
* @return Collection of Strings indicating the names of the fields
*
* @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
*/
public Collection getIndexedFieldNames(boolean storedTermVector){
if(storedTermVector){
Set fieldSet = new HashSet();
fieldSet.addAll(getIndexedFieldNames(Field.TermVector.YES));
fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_POSITIONS));
fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_OFFSETS));
fieldSet.addAll(getIndexedFieldNames(Field.TermVector.WITH_POSITIONS_OFFSETS));
return fieldSet;
}
else
return getIndexedFieldNames(Field.TermVector.NO);
}
/**
* Get a list of unique field names that exist in this index, are indexed, and have
* the specified term vector information.
*
* @param tvSpec specifies which term vector information should be available for the fields
* @return Collection of Strings indicating the names of the fields
*
* @deprecated Replaced by {@link #getFieldNames(IndexReader.FieldOption)}
*/
public abstract Collection getIndexedFieldNames(Field.TermVector tvSpec);
/**
* Get a list of unique field names that exist in this index and have the specified

View File

@ -16,20 +16,20 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.IOException;
import java.io.File;
import java.io.PrintStream;
import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.document.Document;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.RAMDirectory;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Vector;
/**
@ -85,11 +85,6 @@ public class IndexWriter {
*/
public final static int DEFAULT_MAX_BUFFERED_DOCS = 10;
/**
* @deprecated use {@link #DEFAULT_MAX_BUFFERED_DOCS} instead
*/
public final static int DEFAULT_MIN_MERGE_DOCS = DEFAULT_MAX_BUFFERED_DOCS;
/**
* Default value is {@link Integer#MAX_VALUE}. Change using {@link #setMaxMergeDocs(int)}.
*/
@ -454,10 +449,9 @@ public class IndexWriter {
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
* By default, no more than 10,000 terms will be indexed for a field.
*
* @deprecated use {@link #setMaxFieldLength} instead
*
*/
public int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;
/**
* Adds a document to this index. If the document contains more than
@ -502,10 +496,10 @@ public class IndexWriter {
* for batch index creation, and smaller values (< 10) for indices that are
* interactively maintained.
*
* <p>This must never be less than 2. The default value is 10.
* @deprecated use {@link #setMergeFactor} instead
* <p>This must never be less than 2. The default value is {@link DEFAULT_MERGE_FACTOR}.
*/
public int mergeFactor = DEFAULT_MERGE_FACTOR;
private int mergeFactor = DEFAULT_MERGE_FACTOR;
/** Determines the minimal number of documents required before the buffered
* in-memory documents are merging and a new Segment is created.
@ -513,10 +507,10 @@ public class IndexWriter {
* large value gives faster indexing. At the same time, mergeFactor limits
* the number of files open in a FSDirectory.
*
* <p> The default value is 10.
* @deprecated use {@link #setMaxBufferedDocs} instead
* <p> The default value is {@link DEFAULT_MAX_BUFFERED_DOCS}.
*/
public int minMergeDocs = DEFAULT_MIN_MERGE_DOCS;
private int minMergeDocs = DEFAULT_MAX_BUFFERED_DOCS;
/** Determines the largest number of documents ever merged by addDocument().
@ -524,15 +518,15 @@ public class IndexWriter {
* as this limits the length of pauses while indexing to a few seconds.
* Larger values are best for batched indexing and speedier searches.
*
* <p>The default value is {@link Integer#MAX_VALUE}.
* @deprecated use {@link #setMaxMergeDocs} instead
* <p>The default value is {@link DEFAULT_MAX_MERGE_DOCS}.
*/
public int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
private int maxMergeDocs = DEFAULT_MAX_MERGE_DOCS;
/** If non-null, information about merges will be printed to this.
* @deprecated use {@link #setInfoStream} instead
*/
public PrintStream infoStream = null;
private PrintStream infoStream = null;
/** Merges all segments together into a single segment, optimizing an index
for search. */

View File

@ -114,7 +114,7 @@ public class MultiReader extends IndexReader {
protected void doDelete(int n) throws IOException {
numDocs = -1; // invalidate cache
int i = readerIndex(n); // find segment num
subReaders[i].delete(n - starts[i]); // dispatch to segment reader
subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader
hasDeletions = true;
}
@ -224,45 +224,6 @@ public class MultiReader extends IndexReader {
subReaders[i].close();
}
/**
* @see IndexReader#getFieldNames()
*/
public Collection getFieldNames() throws IOException {
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getFieldNames();
fieldSet.addAll(names);
}
return fieldSet;
}
/**
* @see IndexReader#getFieldNames(boolean)
*/
public Collection getFieldNames(boolean indexed) throws IOException {
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getFieldNames(indexed);
fieldSet.addAll(names);
}
return fieldSet;
}
public Collection getIndexedFieldNames (Field.TermVector tvSpec){
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < subReaders.length; i++) {
IndexReader reader = subReaders[i];
Collection names = reader.getIndexedFieldNames(tvSpec);
fieldSet.addAll(names);
}
return fieldSet;
}
/**
* @see IndexReader#getFieldNames(IndexReader.FieldOption)
*/

View File

@ -221,29 +221,6 @@ public class ParallelReader extends IndexReader {
((IndexReader)readers.get(i)).close();
}
public Collection getFieldNames() throws IOException {
return fieldToReader.keySet();
}
public Collection getFieldNames(boolean indexed) throws IOException {
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));
Collection names = reader.getFieldNames(indexed);
fieldSet.addAll(names);
}
return fieldSet;
}
public Collection getIndexedFieldNames (Field.TermVector tvSpec){
Set fieldSet = new HashSet();
for (int i = 0; i < readers.size(); i++) {
IndexReader reader = ((IndexReader)readers.get(i));
Collection names = reader.getIndexedFieldNames(tvSpec);
fieldSet.addAll(names);
}
return fieldSet;
}
public Collection getFieldNames (IndexReader.FieldOption fieldNames) {
Set fieldSet = new HashSet();

View File

@ -315,86 +315,6 @@ class SegmentReader extends IndexReader {
return fieldsReader.size();
}
/**
* @see IndexReader#getFieldNames()
* @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
*/
public Collection getFieldNames() {
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < fieldInfos.size(); i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
fieldSet.add(fi.name);
}
return fieldSet;
}
/**
* @see IndexReader#getFieldNames(boolean)
* @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
*/
public Collection getFieldNames(boolean indexed) {
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < fieldInfos.size(); i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.isIndexed == indexed)
fieldSet.add(fi.name);
}
return fieldSet;
}
/**
* @see IndexReader#getIndexedFieldNames(Field.TermVector tvSpec)
* @deprecated Replaced by {@link #getFieldNames (IndexReader.FieldOption fldOption)}
*/
public Collection getIndexedFieldNames (Field.TermVector tvSpec){
boolean storedTermVector;
boolean storePositionWithTermVector;
boolean storeOffsetWithTermVector;
if(tvSpec == Field.TermVector.NO){
storedTermVector = false;
storePositionWithTermVector = false;
storeOffsetWithTermVector = false;
}
else if(tvSpec == Field.TermVector.YES){
storedTermVector = true;
storePositionWithTermVector = false;
storeOffsetWithTermVector = false;
}
else if(tvSpec == Field.TermVector.WITH_POSITIONS){
storedTermVector = true;
storePositionWithTermVector = true;
storeOffsetWithTermVector = false;
}
else if(tvSpec == Field.TermVector.WITH_OFFSETS){
storedTermVector = true;
storePositionWithTermVector = false;
storeOffsetWithTermVector = true;
}
else if(tvSpec == Field.TermVector.WITH_POSITIONS_OFFSETS){
storedTermVector = true;
storePositionWithTermVector = true;
storeOffsetWithTermVector = true;
}
else{
throw new IllegalArgumentException("unknown termVector parameter " + tvSpec);
}
// maintain a unique set of field names
Set fieldSet = new HashSet();
for (int i = 0; i < fieldInfos.size(); i++) {
FieldInfo fi = fieldInfos.fieldInfo(i);
if (fi.isIndexed && fi.storeTermVector == storedTermVector &&
fi.storePositionWithTermVector == storePositionWithTermVector &&
fi.storeOffsetWithTermVector == storeOffsetWithTermVector){
fieldSet.add(fi.name);
}
}
return fieldSet;
}
/**
* @see IndexReader#getFieldNames(IndexReader.FieldOption fldOption)
*/

View File

@ -25,20 +25,6 @@ public interface CharStream {
*/
char readChar() throws java.io.IOException;
/**
* Returns the column position of the character last read.
* @deprecated
* @see #getEndColumn
*/
int getColumn();
/**
* Returns the line number of the character last read.
* @deprecated
* @see #getEndLine
*/
int getLine();
/**
* Returns the column number of the last character for current token (being
* matched after the last call to BeginTOken).

View File

@ -16,8 +16,6 @@ package org.apache.lucene.queryParser;
* limitations under the License.
*/
import java.util.Vector;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@ -25,6 +23,8 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import java.util.Vector;
/**
* A QueryParser which constructs queries to search multiple fields.
*
@ -87,21 +87,7 @@ public class MultiFieldQueryParser extends QueryParser
protected Query getFieldQuery(String field, String queryText) throws ParseException {
return getFieldQuery(field, queryText, 0);
}
/**
* @deprecated use {@link #getFieldQuery(String, String)}
*/
protected Query getFieldQuery(String field, Analyzer analyzer, String queryText)
throws ParseException {
return getFieldQuery(field, queryText);
}
/**
* @deprecated use {@link #getFuzzyQuery(String, String, float)}
*/
protected Query getFuzzyQuery(String field, String termStr) throws ParseException {
return getFuzzyQuery(field, termStr, fuzzyMinSim);
}
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException
{
@ -140,14 +126,7 @@ public class MultiFieldQueryParser extends QueryParser
}
return super.getWildcardQuery(field, termStr);
}
/** @throws ParseException
* @deprecated use {@link #getRangeQuery(String, String, String, boolean)}
*/
protected Query getRangeQuery(String field, Analyzer analyzer,
String part1, String part2, boolean inclusive) throws ParseException {
return getRangeQuery(field, part1, part2, inclusive);
}
protected Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws ParseException {
if (field == null) {
@ -162,67 +141,7 @@ public class MultiFieldQueryParser extends QueryParser
}
/** @deprecated */
public static final int NORMAL_FIELD = 0;
/** @deprecated */
public static final int REQUIRED_FIELD = 1;
/** @deprecated */
public static final int PROHIBITED_FIELD = 2;
/**
* @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
*/
public MultiFieldQueryParser(QueryParserTokenManager tm)
{
super(tm);
}
/**
* @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
*/
public MultiFieldQueryParser(CharStream stream)
{
super(stream);
}
/**
* @deprecated use {@link #MultiFieldQueryParser(String[], Analyzer)} instead
*/
public MultiFieldQueryParser(String f, Analyzer a)
{
super(f, a);
}
/**
* Parses a query which searches on the fields specified.
* If x fields are specified, this effectively constructs:
*
* <code>
* (field1:query) (field2:query) (field3:query)...(fieldx:query)
* </code>
*
* @param query Query string to parse
* @param fields Fields to search on
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws TokenMgrError if query parsing fails
* @deprecated use {@link #parse(String)} instead but note that it
* returns a different query for queries where all terms are required:
* its query excepts all terms, no matter in what field they occur whereas
* the query built by this (deprecated) method expected all terms in all fields
* at the same time.
*/
public static Query parse(String query, String[] fields, Analyzer analyzer)
throws ParseException
{
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++)
{
Query q = parse(query, fields[i], analyzer);
bQuery.add(q, BooleanClause.Occur.SHOULD);
}
return bQuery;
}
/**
* Parses a query which searches on the fields specified.
@ -256,64 +175,6 @@ public class MultiFieldQueryParser extends QueryParser
return bQuery;
}
/**
* Parses a query, searching on the fields specified.
* Use this if you need to specify certain fields as required,
* and others as prohibited.
* <p><pre>
* Usage:
* <code>
* String[] fields = {"filename", "contents", "description"};
* int[] flags = {MultiFieldQueryParser.NORMAL_FIELD,
* MultiFieldQueryParser.REQUIRED_FIELD,
* MultiFieldQueryParser.PROHIBITED_FIELD,};
* parse(query, fields, flags, analyzer);
* </code>
* </pre>
*<p>
* The code above would construct a query:
* <pre>
* <code>
* (filename:query) +(contents:query) -(description:query)
* </code>
* </pre>
*
* @param query Query string to parse
* @param fields Fields to search on
* @param flags Flags describing the fields
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws TokenMgrError if query parsing fails
* @throws IllegalArgumentException if the length of the fields array differs
* from the length of the flags array
* @deprecated use {@link #parse(String, String[], BooleanClause.Occur[], Analyzer)} instead
*/
public static Query parse(String query, String[] fields, int[] flags,
Analyzer analyzer) throws ParseException
{
if (fields.length != flags.length)
throw new IllegalArgumentException("fields.length != flags.length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++)
{
QueryParser qp = new QueryParser(fields[i], analyzer);
Query q = qp.parse(query);
int flag = flags[i];
switch (flag)
{
case REQUIRED_FIELD:
bQuery.add(q, BooleanClause.Occur.MUST);
break;
case PROHIBITED_FIELD:
bQuery.add(q, BooleanClause.Occur.MUST_NOT);
break;
default:
bQuery.add(q, BooleanClause.Occur.SHOULD);
break;
}
}
return bQuery;
}
/**
* Parses a query, searching on the fields specified.
@ -359,65 +220,7 @@ public class MultiFieldQueryParser extends QueryParser
return bQuery;
}
/**
* Parses a query, searching on the fields specified. Use this if you need to
* specify certain fields as required, and others as prohibited.
* <p>
* <pre>
* Usage:
* <code>
* String[] fields = { &quot;filename&quot;, &quot;contents&quot;, &quot;description&quot; };
* int[] flags = { MultiFieldQueryParser.NORMAL_FIELD,
* MultiFieldQueryParser.REQUIRED_FIELD,
* MultiFieldQueryParser.PROHIBITED_FIELD, };
* parse(query, fields, flags, analyzer);
* </code>
* </pre>
*
* <p>
* The code above would construct a query:
* <pre>
* <code>
* (filename:query1) +(contents:query2) -(description:query3)
* </code>
* </pre>
*
* @param queries Queries string to parse
* @param fields Fields to search on
* @param flags Flags describing the fields
* @param analyzer Analyzer to use
* @throws ParseException if query parsing fails
* @throws TokenMgrError if query parsing fails
* @throws IllegalArgumentException if the length of the queries, fields, and flags array differ
* @deprecated use {@link #parse(String[], String[], BooleanClause.Occur[], Analyzer)} instead
*/
public static Query parse(String[] queries, String[] fields, int[] flags,
Analyzer analyzer) throws ParseException
{
if (!(queries.length == fields.length && queries.length == flags.length))
throw new IllegalArgumentException("queries, fields, and flags array have have different length");
BooleanQuery bQuery = new BooleanQuery();
for (int i = 0; i < fields.length; i++)
{
QueryParser qp = new QueryParser(fields[i], analyzer);
Query q = qp.parse(queries[i]);
int flag = flags[i];
switch (flag)
{
case REQUIRED_FIELD:
bQuery.add(q, BooleanClause.Occur.MUST);
break;
case PROHIBITED_FIELD:
bQuery.add(q, BooleanClause.Occur.MUST_NOT);
break;
default:
bQuery.add(q, BooleanClause.Occur.SHOULD);
break;
}
}
return bQuery;
}
/**
* Parses a query, searching on the fields specified.
* Use this if you need to specify certain fields as required,

View File

@ -1,16 +1,33 @@
/* Generated By:JavaCC: Do not edit this line. QueryParser.java */
package org.apache.lucene.queryParser;
import java.util.Vector;
import java.io.*;
import java.text.*;
import java.util.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.DateField;
import org.apache.lucene.document.DateTools;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.*;
import org.apache.lucene.document.*;
import org.apache.lucene.search.*;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RangeQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.util.Parameter;
import java.io.IOException;
import java.io.StringReader;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.Locale;
import java.util.Vector;
/**
* This class is generated by JavaCC. The most important method is
* {@link #parse(String)}.
@ -71,11 +88,6 @@ public class QueryParser implements QueryParserConstants {
private static final int MOD_NOT = 10;
private static final int MOD_REQ = 11;
/** @deprecated use {@link #OR_OPERATOR} instead */
public static final int DEFAULT_OPERATOR_OR = 0;
/** @deprecated use {@link #AND_OPERATOR} instead */
public static final int DEFAULT_OPERATOR_AND = 1;
// make it possible to call setDefaultOperator() without accessing
// the nested class:
/** Alternative form of QueryParser.Operator.AND */
@ -106,20 +118,6 @@ public class QueryParser implements QueryParserConstants {
static public final Operator AND = new Operator("AND");
}
/** Parses a query string, returning a {@link org.apache.lucene.search.Query}.
* @param query the query string to be parsed.
* @param field the default field for query terms.
* @param analyzer used to find terms in the query text.
* @throws ParseException if the parsing fails
*
* @deprecated Use an instance of QueryParser and the {@link #parse(String)} method instead.
*/
static public Query parse(String query, String field, Analyzer analyzer)
throws ParseException {
QueryParser parser = new QueryParser(field, analyzer);
return parser.parse(query);
}
/** Constructs a query parser.
* @param f the default field for query terms.
* @param a used to find terms in the query text.
@ -207,24 +205,6 @@ public class QueryParser implements QueryParserConstants {
return phraseSlop;
}
/**
* Sets the boolean operator of the QueryParser.
* In default mode (<code>DEFAULT_OPERATOR_OR</code>) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>.<br/>
* In <code>DEFAULT_OPERATOR_AND</code> terms are considered to be in conjuction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code>
* @deprecated use {@link #setDefaultOperator(QueryParser.Operator)} instead
*/
public void setOperator(int op) {
if (op == DEFAULT_OPERATOR_AND)
this.operator = AND_OPERATOR;
else if (op == DEFAULT_OPERATOR_OR)
this.operator = OR_OPERATOR;
else
throw new IllegalArgumentException("Unknown operator " + op);
}
/**
* Sets the boolean operator of the QueryParser.
* In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
@ -237,19 +217,6 @@ public class QueryParser implements QueryParserConstants {
this.operator = op;
}
/**
* Gets implicit operator setting, which will be either DEFAULT_OPERATOR_AND
* or DEFAULT_OPERATOR_OR.
* @deprecated use {@link #getDefaultOperator()} instead
*/
public int getOperator() {
if(operator == AND_OPERATOR)
return DEFAULT_OPERATOR_AND;
else if(operator == OR_OPERATOR)
return DEFAULT_OPERATOR_OR;
else
throw new IllegalStateException("Unknown operator " + operator);
}
/**
* Gets implicit operator setting, which will be either AND_OPERATOR
@ -259,14 +226,6 @@ public class QueryParser implements QueryParserConstants {
return operator;
}
/**
* Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
* lower-cased or not. Default is <code>true</code>.
* @deprecated use {@link #setLowercaseExpandedTerms(boolean)} instead
*/
public void setLowercaseWildcardTerms(boolean lowercaseExpandedTerms) {
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
}
/**
* Whether terms of wildcard, prefix, fuzzy and range queries are to be automatically
@ -276,13 +235,6 @@ public class QueryParser implements QueryParserConstants {
this.lowercaseExpandedTerms = lowercaseExpandedTerms;
}
/**
* @deprecated use {@link #getLowercaseExpandedTerms()} instead
*/
public boolean getLowercaseWildcardTerms() {
return lowercaseExpandedTerms;
}
/**
* @see #setLowercaseExpandedTerms(boolean)
*/
@ -354,18 +306,6 @@ public class QueryParser implements QueryParserConstants {
throw new RuntimeException("Clause cannot be both required and prohibited");
}
/**
* Note that parameter analyzer is ignored. Calls inside the parser always
* use class member analyzer.
*
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getFieldQuery(String, String)}
*/
protected Query getFieldQuery(String field,
Analyzer analyzer,
String queryText) throws ParseException {
return getFieldQuery(field, queryText);
}
/**
* @exception ParseException throw in overridden method to disallow
@ -450,20 +390,6 @@ public class QueryParser implements QueryParserConstants {
}
}
/**
* Note that parameter analyzer is ignored. Calls inside the parser always
* use class member analyzer.
*
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getFieldQuery(String, String, int)}
*/
protected Query getFieldQuery(String field,
Analyzer analyzer,
String queryText,
int slop) throws ParseException {
return getFieldQuery(field, queryText, slop);
}
/**
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
* This method may be overridden, for example, to return
@ -485,20 +411,6 @@ public class QueryParser implements QueryParserConstants {
return query;
}
/**
* Note that parameter analyzer is ignored. Calls inside the parser always
* use class member analyzer.
*
* @exception ParseException throw in overridden method to disallow
* @deprecated use {@link #getRangeQuery(String, String, String, boolean)}
*/
protected Query getRangeQuery(String field,
Analyzer analyzer,
String part1,
String part2,
boolean inclusive) throws ParseException {
return getRangeQuery(field, part1, part2, inclusive);
}
/**
* @exception ParseException throw in overridden method to disallow
@ -642,12 +554,6 @@ public class QueryParser implements QueryParserConstants {
return new PrefixQuery(t);
}
/**
* @deprecated use {@link #getFuzzyQuery(String, String, float)}
*/
protected Query getFuzzyQuery(String field, String termStr) throws ParseException {
return getFuzzyQuery(field, termStr, fuzzyMinSim);
}
/**
* Factory method for generating a query (similar to
@ -952,11 +858,11 @@ public class QueryParser implements QueryParserConstants {
{if (true) throw new ParseException("Minimum similarity for a FuzzyQuery has to be between 0.0f and 1.0f !");}
}
if(fms == fuzzyMinSim)
q = getFuzzyQuery(field, termImage);
q = getFuzzyQuery(field, termImage, fuzzyMinSim);
else
q = getFuzzyQuery(field, termImage, fms);
} else {
q = getFieldQuery(field, analyzer, termImage);
q = getFieldQuery(field, termImage);
}
break;
case RANGEIN_START:
@ -1013,7 +919,7 @@ public class QueryParser implements QueryParserConstants {
} else {
goop2.image = discardEscapeChar(goop2.image);
}
q = getRangeQuery(field, analyzer, goop1.image, goop2.image, true);
q = getRangeQuery(field, goop1.image, goop2.image, true);
break;
case RANGEEX_START:
jj_consume_token(RANGEEX_START);
@ -1070,7 +976,7 @@ public class QueryParser implements QueryParserConstants {
goop2.image = discardEscapeChar(goop2.image);
}
q = getRangeQuery(field, analyzer, goop1.image, goop2.image, false);
q = getRangeQuery(field, goop1.image, goop2.image, false);
break;
case QUOTED:
term = jj_consume_token(QUOTED);
@ -1099,7 +1005,7 @@ public class QueryParser implements QueryParserConstants {
}
catch (Exception ignored) { }
}
q = getFieldQuery(field, analyzer, term.image.substring(1, term.image.length()-1), s);
q = getFieldQuery(field, term.image.substring(1, term.image.length()-1), s);
break;
default:
jj_la1[21] = jj_gen;

View File

@ -49,57 +49,18 @@ public class BooleanClause implements java.io.Serializable {
}
/** The query whose matching documents are combined by the boolean query.
* @deprecated use {@link #setQuery(Query)} instead */
public Query query; // TODO: decrease visibility for Lucene 2.0
/** If true, documents documents which <i>do not</i>
match this sub-query will <i>not</i> match the boolean query.
@deprecated use {@link #setOccur(BooleanClause.Occur)} instead */
public boolean required = false; // TODO: decrease visibility for Lucene 2.0
/** If true, documents documents which <i>do</i>
match this sub-query will <i>not</i> match the boolean query.
@deprecated use {@link #setOccur(BooleanClause.Occur)} instead */
public boolean prohibited = false; // TODO: decrease visibility for Lucene 2.0
*/
private Query query; // TODO: decrease visibility for Lucene 2.0
private Occur occur = Occur.SHOULD;
/** Constructs a BooleanClause with query <code>q</code>, required
* <code>r</code> and prohibited <code>p</code>.
* @deprecated use BooleanClause(Query, Occur) instead
* <ul>
* <li>For BooleanClause(query, true, false) use BooleanClause(query, BooleanClause.Occur.MUST)
* <li>For BooleanClause(query, false, false) use BooleanClause(query, BooleanClause.Occur.SHOULD)
* <li>For BooleanClause(query, false, true) use BooleanClause(query, BooleanClause.Occur.MUST_NOT)
* </ul>
*/
public BooleanClause(Query q, boolean r, boolean p) {
// TODO: remove for Lucene 2.0
query = q;
required = r;
prohibited = p;
if (required) {
if (prohibited) {
// prohibited && required doesn't make sense, but we want the old behaviour:
occur = Occur.MUST_NOT;
} else {
occur = Occur.MUST;
}
} else {
if (prohibited) {
occur = Occur.MUST_NOT;
} else {
occur = Occur.SHOULD;
}
}
}
/** Constructs a BooleanClause.
*/
public BooleanClause(Query query, Occur occur) {
this.query = query;
this.occur = occur;
setFields(occur);
}
public Occur getOccur() {
@ -108,7 +69,7 @@ public class BooleanClause implements java.io.Serializable {
public void setOccur(Occur occur) {
this.occur = occur;
setFields(occur);
}
public Query getQuery() {
@ -120,27 +81,14 @@ public class BooleanClause implements java.io.Serializable {
}
public boolean isProhibited() {
return prohibited;
return Occur.MUST_NOT.equals(occur);
}
public boolean isRequired() {
return required;
return Occur.MUST.equals(occur);
}
private void setFields(Occur occur) {
if (occur == Occur.MUST) {
required = true;
prohibited = false;
} else if (occur == Occur.SHOULD) {
required = false;
prohibited = false;
} else if (occur == Occur.MUST_NOT) {
required = false;
prohibited = true;
} else {
throw new IllegalArgumentException("Unknown operator " + occur);
}
}
/** Returns true iff <code>o</code> is equal to this. */
public boolean equals(Object o) {
@ -148,13 +96,12 @@ public class BooleanClause implements java.io.Serializable {
return false;
BooleanClause other = (BooleanClause)o;
return this.query.equals(other.query)
&& (this.required == other.required)
&& (this.prohibited == other.prohibited);
&& this.occur.equals(other.occur);
}
/** Returns a hash code value for this object.*/
public int hashCode() {
return query.hashCode() ^ (this.required?1:0) ^ (this.prohibited?2:0);
return query.hashCode() ^ (Occur.MUST.equals(occur)?1:0) ^ (Occur.MUST_NOT.equals(occur)?2:0);
}

View File

@ -16,14 +16,14 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ToStringUtils;
import java.io.IOException;
import java.util.Iterator;
import java.util.Set;
import java.util.Vector;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.util.ToStringUtils;
/** A Query that matches documents matching boolean combinations of other
* queries, e.g. {@link TermQuery}s, {@link PhraseQuery}s or other
* BooleanQuerys.
@ -31,9 +31,9 @@ import org.apache.lucene.util.ToStringUtils;
public class BooleanQuery extends Query {
/**
* @deprecated use {@link #setMaxClauseCount(int)} instead
*/
public static int maxClauseCount = 1024;
private static int maxClauseCount = 1024;
/** Thrown when an attempt is made to add more than {@link
* #getMaxClauseCount()} clauses. This typically happens if
@ -142,30 +142,6 @@ public class BooleanQuery extends Query {
return minNrShouldMatch;
}
/** Adds a clause to a boolean query. Clauses may be:
* <ul>
* <li><code>required</code> which means that documents which <i>do not</i>
* match this sub-query will <i>not</i> match the boolean query;
* <li><code>prohibited</code> which means that documents which <i>do</i>
* match this sub-query will <i>not</i> match the boolean query; or
* <li>neither, in which case matched documents are neither prohibited from
* nor required to match the sub-query. However, a document must match at
* least 1 sub-query to match the boolean query.
* </ul>
* It is an error to specify a clause as both <code>required</code> and
* <code>prohibited</code>.
*
* @deprecated use {@link #add(Query, BooleanClause.Occur)} instead:
* <ul>
* <li>For add(query, true, false) use add(query, BooleanClause.Occur.MUST)
* <li>For add(query, false, false) use add(query, BooleanClause.Occur.SHOULD)
* <li>For add(query, false, true) use add(query, BooleanClause.Occur.MUST_NOT)
* </ul>
*/
public void add(Query query, boolean required, boolean prohibited) {
add(new BooleanClause(query, required, prohibited));
}
/** Adds a clause to a boolean query.
*
* @throws TooManyClauses if the new number of clauses exceeds the maximum clause number

View File

@ -1,148 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.BitSet;
import java.util.Date;
import java.io.IOException;
import org.apache.lucene.document.DateField;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.index.IndexReader;
/**
* A Filter that restricts search results to a range of time.
*
* <p>For this to work, documents must have been indexed with a
* {@link DateField}.</p>
*
* @deprecated Instead, use {@link RangeFilter} combined with
* {@link org.apache.lucene.document.DateTools}.
*/
public class DateFilter extends Filter {
String field;
String start = DateField.MIN_DATE_STRING();
String end = DateField.MAX_DATE_STRING();
private DateFilter(String f) {
field = f;
}
/**
* Constructs a filter for field <code>f</code> matching dates
* between <code>from</code> and <code>to</code> inclusively.
*/
public DateFilter(String f, Date from, Date to) {
field = f;
start = DateField.dateToString(from);
end = DateField.dateToString(to);
}
/**
* Constructs a filter for field <code>f</code> matching times
* between <code>from</code> and <code>to</code> inclusively.
*/
public DateFilter(String f, long from, long to) {
field = f;
start = DateField.timeToString(from);
end = DateField.timeToString(to);
}
/**
* Constructs a filter for field <code>f</code> matching
* dates on or before before <code>date</code>.
*/
public static DateFilter Before(String field, Date date) {
DateFilter result = new DateFilter(field);
result.end = DateField.dateToString(date);
return result;
}
/**
* Constructs a filter for field <code>f</code> matching times
* on or before <code>time</code>.
*/
public static DateFilter Before(String field, long time) {
DateFilter result = new DateFilter(field);
result.end = DateField.timeToString(time);
return result;
}
/**
* Constructs a filter for field <code>f</code> matching
* dates on or after <code>date</code>.
*/
public static DateFilter After(String field, Date date) {
DateFilter result = new DateFilter(field);
result.start = DateField.dateToString(date);
return result;
}
/**
* Constructs a filter for field <code>f</code> matching
* times on or after <code>time</code>.
*/
public static DateFilter After(String field, long time) {
DateFilter result = new DateFilter(field);
result.start = DateField.timeToString(time);
return result;
}
/**
* Returns a BitSet with true for documents which should be
* permitted in search results, and false for those that should
* not.
*/
public BitSet bits(IndexReader reader) throws IOException {
BitSet bits = new BitSet(reader.maxDoc());
TermEnum enumerator = reader.terms(new Term(field, start));
TermDocs termDocs = reader.termDocs();
if (enumerator.term() == null) {
return bits;
}
try {
Term stop = new Term(field, end);
while (enumerator.term().compareTo(stop) <= 0) {
termDocs.seek(enumerator.term());
while (termDocs.next()) {
bits.set(termDocs.doc());
}
if (!enumerator.next()) {
break;
}
}
} finally {
enumerator.close();
termDocs.close();
}
return bits;
}
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append(field);
buffer.append(":");
buffer.append(DateField.stringToDate(start).toString());
buffer.append("-");
buffer.append(DateField.stringToDate(end).toString());
return buffer.toString();
}
}

View File

@ -146,12 +146,6 @@ public class MultiSearcher extends Searcher {
return searchables[i].doc(n - starts[i]); // dispatch to searcher
}
/** Call {@link #subSearcher} instead.
* @deprecated
*/
public int searcherIndex(int n) {
return subSearcher(n);
}
/** Returns index of the searcher for document <code>n</code> in the array
* used to construct this searcher. */

View File

@ -1,273 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Vector;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultipleTermPositions;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermPositions;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.ToStringUtils;
/**
* PhrasePrefixQuery is a generalized version of PhraseQuery, with an added
* method {@link #add(Term[])}.
* To use this class, to search for the phrase "Microsoft app*" first use
* add(Term) on the term "Microsoft", then find all terms that has "app" as
* prefix using IndexReader.terms(Term), and use PhrasePrefixQuery.add(Term[]
* terms) to add them to the query.
*
* @deprecated use {@link org.apache.lucene.search.MultiPhraseQuery} instead
* @author Anders Nielsen
* @version 1.0
*/
public class PhrasePrefixQuery extends Query {
private String field;
private ArrayList termArrays = new ArrayList();
private Vector positions = new Vector();
private int slop = 0;
/** Sets the phrase slop for this query.
* @see PhraseQuery#setSlop(int)
*/
public void setSlop(int s) { slop = s; }
/** Sets the phrase slop for this query.
* @see PhraseQuery#getSlop()
*/
public int getSlop() { return slop; }
/** Add a single term at the next position in the phrase.
* @see PhraseQuery#add(Term)
*/
public void add(Term term) { add(new Term[]{term}); }
/** Add multiple terms at the next position in the phrase. Any of the terms
* may match.
*
* @see PhraseQuery#add(Term)
*/
public void add(Term[] terms) {
int position = 0;
if (positions.size() > 0)
position = ((Integer) positions.lastElement()).intValue() + 1;
add(terms, position);
}
/**
* Allows to specify the relative position of terms within the phrase.
*
* @see PhraseQuery#add(Term, int)
* @param terms
* @param position
*/
public void add(Term[] terms, int position) {
if (termArrays.size() == 0)
field = terms[0].field();
for (int i = 0; i < terms.length; i++) {
if (terms[i].field() != field) {
throw new IllegalArgumentException(
"All phrase terms must be in the same field (" + field + "): "
+ terms[i]);
}
}
termArrays.add(terms);
positions.addElement(new Integer(position));
}
/**
* Returns the relative positions of terms in this phrase.
*/
public int[] getPositions() {
int[] result = new int[positions.size()];
for (int i = 0; i < positions.size(); i++)
result[i] = ((Integer) positions.elementAt(i)).intValue();
return result;
}
private class PhrasePrefixWeight implements Weight {
private Similarity similarity;
private float value;
private float idf;
private float queryNorm;
private float queryWeight;
public PhrasePrefixWeight(Searcher searcher)
throws IOException {
this.similarity = getSimilarity(searcher);
// compute idf
Iterator i = termArrays.iterator();
while (i.hasNext()) {
Term[] terms = (Term[])i.next();
for (int j=0; j<terms.length; j++) {
idf += getSimilarity(searcher).idf(terms[j], searcher);
}
}
}
public Query getQuery() { return PhrasePrefixQuery.this; }
public float getValue() { return value; }
public float sumOfSquaredWeights() {
queryWeight = idf * getBoost(); // compute query weight
return queryWeight * queryWeight; // square it
}
public void normalize(float queryNorm) {
this.queryNorm = queryNorm;
queryWeight *= queryNorm; // normalize query weight
value = queryWeight * idf; // idf for document
}
public Scorer scorer(IndexReader reader) throws IOException {
if (termArrays.size() == 0) // optimize zero-term case
return null;
TermPositions[] tps = new TermPositions[termArrays.size()];
for (int i=0; i<tps.length; i++) {
Term[] terms = (Term[])termArrays.get(i);
TermPositions p;
if (terms.length > 1)
p = new MultipleTermPositions(reader, terms);
else
p = reader.termPositions(terms[0]);
if (p == null)
return null;
tps[i] = p;
}
if (slop == 0)
return new ExactPhraseScorer(this, tps, getPositions(), similarity,
reader.norms(field));
else
return new SloppyPhraseScorer(this, tps, getPositions(), similarity,
slop, reader.norms(field));
}
public Explanation explain(IndexReader reader, int doc)
throws IOException {
Explanation result = new Explanation();
result.setDescription("weight("+getQuery()+" in "+doc+"), product of:");
Explanation idfExpl = new Explanation(idf, "idf("+getQuery()+")");
// explain query weight
Explanation queryExpl = new Explanation();
queryExpl.setDescription("queryWeight(" + getQuery() + "), product of:");
Explanation boostExpl = new Explanation(getBoost(), "boost");
if (getBoost() != 1.0f)
queryExpl.addDetail(boostExpl);
queryExpl.addDetail(idfExpl);
Explanation queryNormExpl = new Explanation(queryNorm,"queryNorm");
queryExpl.addDetail(queryNormExpl);
queryExpl.setValue(boostExpl.getValue() *
idfExpl.getValue() *
queryNormExpl.getValue());
result.addDetail(queryExpl);
// explain field weight
Explanation fieldExpl = new Explanation();
fieldExpl.setDescription("fieldWeight("+getQuery()+" in "+doc+
"), product of:");
Explanation tfExpl = scorer(reader).explain(doc);
fieldExpl.addDetail(tfExpl);
fieldExpl.addDetail(idfExpl);
Explanation fieldNormExpl = new Explanation();
byte[] fieldNorms = reader.norms(field);
float fieldNorm =
fieldNorms!=null ? Similarity.decodeNorm(fieldNorms[doc]) : 0.0f;
fieldNormExpl.setValue(fieldNorm);
fieldNormExpl.setDescription("fieldNorm(field="+field+", doc="+doc+")");
fieldExpl.addDetail(fieldNormExpl);
fieldExpl.setValue(tfExpl.getValue() *
idfExpl.getValue() *
fieldNormExpl.getValue());
result.addDetail(fieldExpl);
// combine them
result.setValue(queryExpl.getValue() * fieldExpl.getValue());
if (queryExpl.getValue() == 1.0f)
return fieldExpl;
return result;
}
}
protected Weight createWeight(Searcher searcher) throws IOException {
if (termArrays.size() == 1) { // optimize one-term case
Term[] terms = (Term[])termArrays.get(0);
BooleanQuery boq = new BooleanQuery(true);
for (int i=0; i<terms.length; i++) {
boq.add(new TermQuery(terms[i]), BooleanClause.Occur.SHOULD);
}
boq.setBoost(getBoost());
return boq.createWeight(searcher);
}
return new PhrasePrefixWeight(searcher);
}
/** Prints a user-readable version of this query. */
public final String toString(String f) {
StringBuffer buffer = new StringBuffer();
if (!field.equals(f)) {
buffer.append(field);
buffer.append(":");
}
buffer.append("\"");
Iterator i = termArrays.iterator();
while (i.hasNext()) {
Term[] terms = (Term[])i.next();
buffer.append(terms[0].text() + (terms.length > 1 ? "*" : ""));
if (i.hasNext())
buffer.append(" ");
}
buffer.append("\"");
if (slop != 0) {
buffer.append("~");
buffer.append(slop);
}
buffer.append(ToStringUtils.boost(getBoost()));
return buffer.toString();
}
}

View File

@ -16,14 +16,13 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.util.BitSet;
import java.io.IOException;
import org.apache.lucene.search.Filter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.index.IndexReader;
import java.io.IOException;
import java.util.BitSet;
/**
* A Filter that restricts search results to a range of values in a given
@ -31,7 +30,7 @@ import org.apache.lucene.index.IndexReader;
*
* <p>
* This code borrows heavily from {@link RangeQuery}, but is implemented as a Filter
* (much like {@link DateFilter}).
*
* </p>
*/
public class RangeFilter extends Filter {

View File

@ -16,16 +16,15 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.io.IOException;
import java.rmi.Naming;
import java.rmi.RemoteException;
import java.rmi.RMISecurityManager;
import java.rmi.server.UnicastRemoteObject;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.Term;
import java.io.IOException;
import java.rmi.Naming;
import java.rmi.RMISecurityManager;
import java.rmi.RemoteException;
import java.rmi.server.UnicastRemoteObject;
/**
* A remote searchable implementation.
*
@ -42,13 +41,7 @@ public class RemoteSearchable
super();
this.local = local;
}
// this implementation should be removed when the deprecated
// Searchable#search(Query,Filter,HitCollector) is removed
public void search(Query query, Filter filter, HitCollector results)
throws IOException {
local.search(query, filter, results);
}
public void search(Weight weight, Filter filter, HitCollector results)
throws IOException {
@ -72,22 +65,10 @@ public class RemoteSearchable
return local.maxDoc();
}
// this implementation should be removed when the deprecated
// Searchable#search(Query,Filter,int) is removed
public TopDocs search(Query query, Filter filter, int n) throws IOException {
return local.search(query, filter, n);
}
public TopDocs search(Weight weight, Filter filter, int n) throws IOException {
return local.search(weight, filter, n);
}
// this implementation should be removed when the deprecated
// Searchable#search(Query,Filter,int,Sort) is removed
public TopFieldDocs search (Query query, Filter filter, int n, Sort sort)
throws IOException {
return local.search (query, filter, n, sort);
}
public TopFieldDocs search (Weight weight, Filter filter, int n, Sort sort)
throws IOException {
@ -102,12 +83,6 @@ public class RemoteSearchable
return local.rewrite(original);
}
// this implementation should be removed when the deprecated
// Searchable#explain(Query,int) is removed
public Explanation explain(Query query, int doc) throws IOException {
return local.explain(query, doc);
}
public Explanation explain(Weight weight, int doc) throws IOException {
return local.explain(weight, doc);
}

View File

@ -16,11 +16,11 @@ package org.apache.lucene.search;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader; // for javadoc
import java.io.IOException; // for javadoc
/** The interface for search implementations.
*
@ -52,11 +52,6 @@ public interface Searchable extends java.rmi.Remote {
void search(Weight weight, Filter filter, HitCollector results)
throws IOException;
/** Expert: Low-level search implementation.
* @deprecated use {@link Searcher#search(Weight, Filter, HitCollector)} instead.
*/
void search(Query query, Filter filter, HitCollector results)
throws IOException;
/** Frees resources associated with this Searcher.
* Be careful not to call this method while you are still using objects
@ -93,11 +88,6 @@ public interface Searchable extends java.rmi.Remote {
*/
TopDocs search(Weight weight, Filter filter, int n) throws IOException;
/** Expert: Low-level search implementation.
* @deprecated use {@link Searcher#search(Weight, Filter, int)} instead.
*/
TopDocs search(Query query, Filter filter, int n) throws IOException;
/** Expert: Returns the stored fields of document <code>i</code>.
* Called by {@link HitCollector} implementations.
* @see IndexReader#document(int)
@ -122,11 +112,6 @@ public interface Searchable extends java.rmi.Remote {
*/
Explanation explain(Weight weight, int doc) throws IOException;
/**
* @deprecated use {@link Searcher#explain(Weight, int)} instead.
*/
Explanation explain(Query query, int doc) throws IOException;
/** Expert: Low-level search implementation with arbitrary sorting. Finds
* the top <code>n</code> hits for <code>query</code>, applying
* <code>filter</code> if non-null, and sorting the hits by the criteria in
@ -139,9 +124,4 @@ public interface Searchable extends java.rmi.Remote {
TopFieldDocs search(Weight weight, Filter filter, int n, Sort sort)
throws IOException;
/** Expert: Low-level search implementation.
* @deprecated use {@link Searcher#search(Weight, Filter, int, Sort)} instead.
*/
TopFieldDocs search(Query query, Filter filter, int n, Sort sort)
throws IOException;
}

View File

@ -62,31 +62,15 @@ public abstract class Directory {
public abstract long fileLength(String name)
throws IOException;
/** @deprecated use {@link #createOutput(String)} */
public OutputStream createFile(String name) throws IOException {
return (OutputStream)createOutput(name);
}
/** Creates a new, empty file in the directory with the given name.
Returns a stream writing this file. */
public IndexOutput createOutput(String name) throws IOException {
// default implementation for back compatibility
// this method should be abstract
return (IndexOutput)createFile(name);
}
public abstract IndexOutput createOutput(String name) throws IOException;
/** @deprecated use {@link #openInput(String)} */
public InputStream openFile(String name) throws IOException {
return (InputStream)openInput(name);
}
/** Returns a stream reading an existing file. */
public IndexInput openInput(String name)
throws IOException {
// default implementation for back compatibility
// this method should be abstract
return (IndexInput)openFile(name);
}
public abstract IndexInput openInput(String name)
throws IOException;
/** Construct a {@link Lock}.
* @param name the name of the lock file

View File

@ -1,28 +0,0 @@
package org.apache.lucene.store;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @deprecated Use {@link IndexInput} or {@link BufferedIndexInput} instead.*/
public abstract class InputStream extends BufferedIndexInput {
protected long length; // set by subclasses
public long length() {
return length;
}
}

View File

@ -16,8 +16,6 @@ package org.apache.lucene.store;
* limitations under the License.
*/
import org.apache.lucene.index.IndexWriter;
import java.io.IOException;
/** An interprocess mutex lock.
@ -80,14 +78,6 @@ public abstract class Lock {
private Lock lock;
private long lockWaitTimeout;
/** Constructs an executor that will grab the named lock.
* Defaults lockWaitTimeout to Lock.COMMIT_LOCK_TIMEOUT.
* @deprecated Kept only to avoid breaking existing code.
*/
public With(Lock lock)
{
this(lock, IndexWriter.COMMIT_LOCK_TIMEOUT);
}
/** Constructs an executor that will grab the named lock. */
public With(Lock lock, long lockWaitTimeout) {

View File

@ -1,22 +0,0 @@
package org.apache.lucene.store;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/** @deprecated Use {@link IndexOutput} or {@link BufferedIndexOutput}
* instead.*/
public abstract class OutputStream extends BufferedIndexOutput {
}

View File

@ -1,92 +0,0 @@
package org.apache.lucene.analysis;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.*;
import junit.framework.*;
import org.apache.lucene.*;
import org.apache.lucene.analysis.*;
public class TestAnalyzers extends TestCase {
public TestAnalyzers(String name) {
super(name);
}
public void assertAnalyzesTo(Analyzer a,
String input,
String[] output) throws Exception {
TokenStream ts = a.tokenStream("dummy", new StringReader(input));
for (int i=0; i<output.length; i++) {
Token t = ts.next();
assertNotNull(t);
assertEquals(t.termText(), output[i]);
}
assertNull(ts.next());
ts.close();
}
public void testSimple() throws Exception {
Analyzer a = new SimpleAnalyzer();
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo.bar.FOO.BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "U.S.A.",
new String[] { "u", "s", "a" });
assertAnalyzesTo(a, "C++",
new String[] { "c" });
assertAnalyzesTo(a, "B2B",
new String[] { "b", "b" });
assertAnalyzesTo(a, "2B",
new String[] { "b" });
assertAnalyzesTo(a, "\"QUOTED\" word",
new String[] { "quoted", "word" });
}
public void testNull() throws Exception {
Analyzer a = new WhitespaceAnalyzer();
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "FOO", "BAR" });
assertAnalyzesTo(a, "foo bar . FOO <> BAR",
new String[] { "foo", "bar", ".", "FOO", "<>", "BAR" });
assertAnalyzesTo(a, "foo.bar.FOO.BAR",
new String[] { "foo.bar.FOO.BAR" });
assertAnalyzesTo(a, "U.S.A.",
new String[] { "U.S.A." });
assertAnalyzesTo(a, "C++",
new String[] { "C++" });
assertAnalyzesTo(a, "B2B",
new String[] { "B2B" });
assertAnalyzesTo(a, "2B",
new String[] { "2B" });
assertAnalyzesTo(a, "\"QUOTED\" word",
new String[] { "\"QUOTED\"", "word" });
}
public void testStop() throws Exception {
Analyzer a = new StopAnalyzer();
assertAnalyzesTo(a, "foo bar FOO BAR",
new String[] { "foo", "bar", "foo", "bar" });
assertAnalyzesTo(a, "foo a bar such FOO THESE BAR",
new String[] { "foo", "bar", "foo", "bar" });
}
}

View File

@ -1,43 +0,0 @@
package org.apache.lucene.analysis;
import junit.framework.TestCase;
import java.io.StringReader;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class TestPerFieldAnalzyerWrapper extends TestCase {
public void testPerField() throws Exception {
String text = "Qwerty";
PerFieldAnalyzerWrapper analyzer =
new PerFieldAnalyzerWrapper(new WhitespaceAnalyzer());
analyzer.addAnalyzer("special", new SimpleAnalyzer());
TokenStream tokenStream = analyzer.tokenStream("field",
new StringReader(text));
Token token = tokenStream.next();
assertEquals("WhitespaceAnalyzer does not lowercase",
"Qwerty",
token.termText());
tokenStream = analyzer.tokenStream("special",
new StringReader(text));
token = tokenStream.next();
assertEquals("SimpleAnalyzer lowercases",
"qwerty",
token.termText());
}
}

View File

@ -1,76 +0,0 @@
package org.apache.lucene.analysis;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import java.io.StringReader;
import java.io.IOException;
import java.util.Set;
import java.util.HashSet;
public class TestStopAnalyzer extends TestCase {
private StopAnalyzer stop = new StopAnalyzer();
private Set inValidTokens = new HashSet();
public TestStopAnalyzer(String s) {
super(s);
}
protected void setUp() {
for (int i = 0; i < StopAnalyzer.ENGLISH_STOP_WORDS.length; i++) {
inValidTokens.add(StopAnalyzer.ENGLISH_STOP_WORDS[i]);
}
}
public void testDefaults() {
assertTrue(stop != null);
StringReader reader = new StringReader("This is a test of the english stop analyzer");
TokenStream stream = stop.tokenStream("test", reader);
assertTrue(stream != null);
Token token = null;
try {
while ((token = stream.next()) != null)
{
assertTrue(inValidTokens.contains(token.termText()) == false);
}
} catch (IOException e) {
assertTrue(false);
}
}
public void testStopList() {
Set stopWordsSet = new HashSet();
stopWordsSet.add("good");
stopWordsSet.add("test");
stopWordsSet.add("analyzer");
StopAnalyzer newStop = new StopAnalyzer((String[])stopWordsSet.toArray(new String[3]));
StringReader reader = new StringReader("This is a good test of the english stop analyzer");
TokenStream stream = newStop.tokenStream("test", reader);
assertTrue(stream != null);
Token token = null;
try {
while ((token = stream.next()) != null)
{
String text = token.termText();
assertTrue(stopWordsSet.contains(text) == false);
}
} catch (IOException e) {
assertTrue(false);
}
}
}

View File

@ -1,168 +0,0 @@
package org.apache.lucene.document;
import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Hits;
import java.io.IOException;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Tests {@link Document} class.
*
* @author Otis Gospodnetic
* @version $Id$
*/
public class TestDocument extends TestCase
{
/**
* Tests {@link Document#remove()} method for a brand new Document
* that has not been indexed yet.
*
* @throws Exception on error
*/
public void testRemoveForNewDocument() throws Exception
{
Document doc = makeDocumentWithFields();
assertEquals(8, doc.fields.size());
doc.removeFields("keyword");
assertEquals(6, doc.fields.size());
doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
doc.removeFields("keyword"); // removing a field more than once
assertEquals(6, doc.fields.size());
doc.removeField("text");
assertEquals(5, doc.fields.size());
doc.removeField("text");
assertEquals(4, doc.fields.size());
doc.removeField("text");
assertEquals(4, doc.fields.size());
doc.removeField("doesnotexists"); // removing non-existing fields is siltenlty ignored
assertEquals(4, doc.fields.size());
doc.removeFields("unindexed");
assertEquals(2, doc.fields.size());
doc.removeFields("unstored");
assertEquals(0, doc.fields.size());
doc.removeFields("doesnotexists"); // removing non-existing fields is siltenlty ignored
assertEquals(0, doc.fields.size());
}
/**
* Tests {@link Document#getValues()} method for a brand new Document
* that has not been indexed yet.
*
* @throws Exception on error
*/
public void testGetValuesForNewDocument() throws Exception
{
doAssert(makeDocumentWithFields(), false);
}
/**
* Tests {@link Document#getValues()} method for a Document retrieved from
* an index.
*
* @throws Exception on error
*/
public void testGetValuesForIndexedDocument() throws Exception
{
RAMDirectory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true);
writer.addDocument(makeDocumentWithFields());
writer.close();
Searcher searcher = new IndexSearcher(dir);
// search for something that does exists
Query query = new TermQuery(new Term("keyword", "test1"));
// ensure that queries return expected results without DateFilter first
Hits hits = searcher.search(query);
assertEquals(1, hits.length());
try
{
doAssert(hits.doc(0), true);
}
catch (Exception e)
{
e.printStackTrace(System.err);
System.err.print("\n");
}
finally
{
searcher.close();
}
}
private Document makeDocumentWithFields() throws IOException
{
Document doc = new Document();
doc.add(Field.Keyword( "keyword", "test1"));
doc.add(Field.Keyword( "keyword", "test2"));
doc.add(Field.Text( "text", "test1"));
doc.add(Field.Text( "text", "test2"));
doc.add(Field.UnIndexed("unindexed", "test1"));
doc.add(Field.UnIndexed("unindexed", "test2"));
doc.add(Field.UnStored( "unstored", "test1"));
doc.add(Field.UnStored( "unstored", "test2"));
return doc;
}
private void doAssert(Document doc, boolean fromIndex)
{
String[] keywordFieldValues = doc.getValues("keyword");
String[] textFieldValues = doc.getValues("text");
String[] unindexedFieldValues = doc.getValues("unindexed");
String[] unstoredFieldValues = doc.getValues("unstored");
assertTrue(keywordFieldValues.length == 2);
assertTrue(textFieldValues.length == 2);
assertTrue(unindexedFieldValues.length == 2);
// this test cannot work for documents retrieved from the index
// since unstored fields will obviously not be returned
if (! fromIndex)
{
assertTrue(unstoredFieldValues.length == 2);
}
assertTrue(keywordFieldValues[0].equals("test1"));
assertTrue(keywordFieldValues[1].equals("test2"));
assertTrue(textFieldValues[0].equals("test1"));
assertTrue(textFieldValues[1].equals("test2"));
assertTrue(unindexedFieldValues[0].equals("test1"));
assertTrue(unindexedFieldValues[1].equals("test2"));
// this test cannot work for documents retrieved from the index
// since unstored fields will obviously not be returned
if (! fromIndex)
{
assertTrue(unstoredFieldValues[0].equals("test1"));
assertTrue(unstoredFieldValues[1].equals("test2"));
}
}
}

View File

@ -1,159 +0,0 @@
package org.apache.lucene.index;
/**
* Created by IntelliJ IDEA.
* User: Grant Ingersoll
* Date: Feb 2, 2004
* Time: 6:16:12 PM
* $Id$
* Copyright 2004. Center For Natural Language Processing
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.Similarity;
import org.apache.lucene.store.Directory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Enumeration;
/**
*
*
**/
class DocHelper {
public static final String FIELD_1_TEXT = "field one text";
public static final String TEXT_FIELD_1_KEY = "textField1";
public static Field textField1 = Field.Text(TEXT_FIELD_1_KEY, FIELD_1_TEXT, false);
public static final String FIELD_2_TEXT = "field field field two text";
//Fields will be lexicographically sorted. So, the order is: field, text, two
public static final int [] FIELD_2_FREQS = {3, 1, 1};
public static final String TEXT_FIELD_2_KEY = "textField2";
public static Field textField2 = Field.Text(TEXT_FIELD_2_KEY, FIELD_2_TEXT, true);
public static final String KEYWORD_TEXT = "Keyword";
public static final String KEYWORD_FIELD_KEY = "keyField";
public static Field keyField = Field.Keyword(KEYWORD_FIELD_KEY, KEYWORD_TEXT);
public static final String UNINDEXED_FIELD_TEXT = "unindexed field text";
public static final String UNINDEXED_FIELD_KEY = "unIndField";
public static Field unIndField = Field.UnIndexed(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT);
public static final String UNSTORED_1_FIELD_TEXT = "unstored field text";
public static final String UNSTORED_FIELD_1_KEY = "unStoredField1";
public static Field unStoredField1 = Field.UnStored(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT, false);
public static final String UNSTORED_2_FIELD_TEXT = "unstored field text";
public static final String UNSTORED_FIELD_2_KEY = "unStoredField2";
public static Field unStoredField2 = Field.UnStored(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT, true);
// public static Set fieldNamesSet = null;
// public static Set fieldValuesSet = null;
public static Map nameValues = null;
static
{
nameValues = new HashMap();
nameValues.put(TEXT_FIELD_1_KEY, FIELD_1_TEXT);
nameValues.put(TEXT_FIELD_2_KEY, FIELD_2_TEXT);
nameValues.put(KEYWORD_FIELD_KEY, KEYWORD_TEXT);
nameValues.put(UNINDEXED_FIELD_KEY, UNINDEXED_FIELD_TEXT);
nameValues.put(UNSTORED_FIELD_1_KEY, UNSTORED_1_FIELD_TEXT);
nameValues.put(UNSTORED_FIELD_2_KEY, UNSTORED_2_FIELD_TEXT);
}
/**
* Adds the fields above to a document
* @param doc The document to write
*/
public static void setupDoc(Document doc) {
doc.add(textField1);
doc.add(textField2);
doc.add(keyField);
doc.add(unIndField);
doc.add(unStoredField1);
doc.add(unStoredField2);
}
/**
* Writes the document to the directory using a segment named "test"
* @param dir
* @param doc
*/
public static void writeDoc(Directory dir, Document doc)
{
writeDoc(dir, "test", doc);
}
/**
* Writes the document to the directory in the given segment
* @param dir
* @param segment
* @param doc
*/
public static void writeDoc(Directory dir, String segment, Document doc)
{
Analyzer analyzer = new WhitespaceAnalyzer();
Similarity similarity = Similarity.getDefault();
writeDoc(dir, analyzer, similarity, segment, doc);
}
/**
* Writes the document to the directory segment named "test" using the specified analyzer and similarity
* @param dir
* @param analyzer
* @param similarity
* @param doc
*/
public static void writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, Document doc)
{
writeDoc(dir, analyzer, similarity, "test", doc);
}
/**
* Writes the document to the directory segment using the analyzer and the similarity score
* @param dir
* @param analyzer
* @param similarity
* @param segment
* @param doc
*/
public static void writeDoc(Directory dir, Analyzer analyzer, Similarity similarity, String segment, Document doc)
{
DocumentWriter writer = new DocumentWriter(dir, analyzer, similarity, 50);
try {
writer.addDocument(segment, doc);
} catch (IOException e) {
e.printStackTrace();
}
}
public static int numFields(Document doc) {
Enumeration fields = doc.fields();
int result = 0;
while (fields.hasMoreElements()) {
fields.nextElement();
result++;
}
return result;
}
}
/*
fieldNamesSet = new HashSet();
fieldNamesSet.add(TEXT_FIELD_1_KEY);
fieldNamesSet.add(TEXT_FIELD_2_KEY);
fieldNamesSet.add(KEYWORD_FIELD_KEY);
fieldNamesSet.add(UNINDEXED_FIELD_KEY);
fieldNamesSet.add(UNSTORED_FIELD_1_KEY);
fieldNamesSet.add(UNSTORED_FIELD_2_KEY);
fieldValuesSet = new HashSet();
fieldValuesSet.add(FIELD_1_TEXT);
fieldValuesSet.add(FIELD_2_TEXT);
fieldValuesSet.add(KEYWORD_TEXT);
fieldValuesSet.add(UNINDEXED_FIELD_TEXT);
fieldValuesSet.add(UNSTORED_1_FIELD_TEXT);
fieldValuesSet.add(UNSTORED_2_FIELD_TEXT);
*/

View File

@ -1,56 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.InputStream;
import java.io.IOException;
public class MockInputStream extends InputStream {
private byte[] buffer;
private int pointer = 0;
public MockInputStream(byte[] bytes) {
buffer = bytes;
length = bytes.length;
}
protected void readInternal(byte[] dest, int destOffset, int len)
throws IOException {
int remainder = len;
int start = pointer;
while (remainder != 0) {
// int bufferNumber = start / buffer.length;
int bufferOffset = start % buffer.length;
int bytesInBuffer = buffer.length - bufferOffset;
int bytesToCopy = bytesInBuffer >= remainder ? remainder : bytesInBuffer;
System.arraycopy(buffer, bufferOffset, dest, destOffset, bytesToCopy);
destOffset += bytesToCopy;
start += bytesToCopy;
remainder -= bytesToCopy;
}
pointer += len;
}
public void close() throws IOException {
// ignore
}
protected void seekInternal(long pos) throws IOException {
pointer = (int) pos;
}
}

View File

@ -1,184 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import java.util.Date;
import java.util.Random;
import java.util.Vector;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.io.File;
import java.io.FileInputStream;
class TermInfosTest {
public static void main(String[] args) {
try {
test();
} catch (Exception e) {
System.out.println(" caught a " + e.getClass() +
"\n with message: " + e.getMessage());
}
}
// FIXME: OG: remove hard-coded file names
public static void test()
throws Exception {
File file = new File("words.txt");
System.out.println(" reading word file containing " +
file.length() + " bytes");
Date start = new Date();
Vector keys = new Vector();
FileInputStream ws = new FileInputStream(file);
BufferedReader wr = new BufferedReader(new InputStreamReader(ws));
for (String key = wr.readLine(); key!=null; key = wr.readLine())
keys.addElement(new Term("word", key));
wr.close();
Date end = new Date();
System.out.print(end.getTime() - start.getTime());
System.out.println(" milliseconds to read " + keys.size() + " words");
start = new Date();
Random gen = new Random(1251971);
long fp = (gen.nextInt() & 0xF) + 1;
long pp = (gen.nextInt() & 0xF) + 1;
int[] docFreqs = new int[keys.size()];
long[] freqPointers = new long[keys.size()];
long[] proxPointers = new long[keys.size()];
for (int i = 0; i < keys.size(); i++) {
docFreqs[i] = (gen.nextInt() & 0xF) + 1;
freqPointers[i] = fp;
proxPointers[i] = pp;
fp += (gen.nextInt() & 0xF) + 1;;
pp += (gen.nextInt() & 0xF) + 1;;
}
end = new Date();
System.out.print(end.getTime() - start.getTime());
System.out.println(" milliseconds to generate values");
start = new Date();
Directory store = FSDirectory.getDirectory("test.store", true);
FieldInfos fis = new FieldInfos();
TermInfosWriter writer = new TermInfosWriter(store, "words", fis,
IndexWriter.DEFAULT_TERM_INDEX_INTERVAL);
fis.add("word", false);
for (int i = 0; i < keys.size(); i++)
writer.add((Term)keys.elementAt(i),
new TermInfo(docFreqs[i], freqPointers[i], proxPointers[i]));
writer.close();
end = new Date();
System.out.print(end.getTime() - start.getTime());
System.out.println(" milliseconds to write table");
System.out.println(" table occupies " +
store.fileLength("words.tis") + " bytes");
start = new Date();
TermInfosReader reader = new TermInfosReader(store, "words", fis);
end = new Date();
System.out.print(end.getTime() - start.getTime());
System.out.println(" milliseconds to open table");
start = new Date();
SegmentTermEnum enumerator = reader.terms();
for (int i = 0; i < keys.size(); i++) {
enumerator.next();
Term key = (Term)keys.elementAt(i);
if (!key.equals(enumerator.term()))
throw new Exception("wrong term: " + enumerator.term()
+ ", expected: " + key
+ " at " + i);
TermInfo ti = enumerator.termInfo();
if (ti.docFreq != docFreqs[i])
throw
new Exception("wrong value: " + Long.toString(ti.docFreq, 16)
+ ", expected: " + Long.toString(docFreqs[i], 16)
+ " at " + i);
if (ti.freqPointer != freqPointers[i])
throw
new Exception("wrong value: " + Long.toString(ti.freqPointer, 16)
+ ", expected: " + Long.toString(freqPointers[i], 16)
+ " at " + i);
if (ti.proxPointer != proxPointers[i])
throw
new Exception("wrong value: " + Long.toString(ti.proxPointer, 16)
+ ", expected: " + Long.toString(proxPointers[i], 16)
+ " at " + i);
}
end = new Date();
System.out.print(end.getTime() - start.getTime());
System.out.println(" milliseconds to iterate over " +
keys.size() + " words");
start = new Date();
for (int i = 0; i < keys.size(); i++) {
Term key = (Term)keys.elementAt(i);
TermInfo ti = reader.get(key);
if (ti.docFreq != docFreqs[i])
throw
new Exception("wrong value: " + Long.toString(ti.docFreq, 16)
+ ", expected: " + Long.toString(docFreqs[i], 16)
+ " at " + i);
if (ti.freqPointer != freqPointers[i])
throw
new Exception("wrong value: " + Long.toString(ti.freqPointer, 16)
+ ", expected: " + Long.toString(freqPointers[i], 16)
+ " at " + i);
if (ti.proxPointer != proxPointers[i])
throw
new Exception("wrong value: " + Long.toString(ti.proxPointer, 16)
+ ", expected: " + Long.toString(proxPointers[i], 16)
+ " at " + i);
}
end = new Date();
System.out.print((end.getTime() - start.getTime()) / (float)keys.size());
System.out.println(" average milliseconds per lookup");
TermEnum e = reader.terms(new Term("word", "azz"));
System.out.println("Word after azz is " + e.term().text);
reader.close();
store.close();
}
}

View File

@ -1,77 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.search.Similarity;
import java.util.Map;
import java.io.IOException;
public class TestFieldsReader extends TestCase {
private RAMDirectory dir = new RAMDirectory();
private Document testDoc = new Document();
private FieldInfos fieldInfos = null;
public TestFieldsReader(String s) {
super(s);
}
protected void setUp() {
fieldInfos = new FieldInfos();
DocHelper.setupDoc(testDoc);
fieldInfos.add(testDoc);
DocumentWriter writer = new DocumentWriter(dir, new WhitespaceAnalyzer(),
Similarity.getDefault(), 50);
assertTrue(writer != null);
try {
writer.addDocument("test", testDoc);
}
catch (IOException e)
{
}
}
protected void tearDown() {
}
public void test() {
assertTrue(dir != null);
assertTrue(fieldInfos != null);
try {
FieldsReader reader = new FieldsReader(dir, "test", fieldInfos);
assertTrue(reader != null);
assertTrue(reader.size() == 1);
Document doc = reader.doc(0);
assertTrue(doc != null);
assertTrue(doc.getField("textField1") != null);
Field field = doc.getField("textField2");
assertTrue(field != null);
assertTrue(field.isTermVectorStored() == true);
reader.close();
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
}

View File

@ -1,137 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
import junit.framework.TestResult;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.util.Collection;
import java.io.IOException;
public class TestFilterIndexReader extends TestCase {
private static class TestReader extends FilterIndexReader {
/** Filter that only permits terms containing 'e'.*/
private static class TestTermEnum extends FilterTermEnum {
public TestTermEnum(TermEnum termEnum)
throws IOException {
super(termEnum);
}
/** Scan for terms containing the letter 'e'.*/
public boolean next() throws IOException {
while (in.next()) {
if (in.term().text().indexOf('e') != -1)
return true;
}
return false;
}
}
/** Filter that only returns odd numbered documents. */
private static class TestTermPositions extends FilterTermPositions {
public TestTermPositions(TermPositions in)
throws IOException {
super(in);
}
/** Scan for odd numbered documents. */
public boolean next() throws IOException {
while (in.next()) {
if ((in.doc() % 2) == 1)
return true;
}
return false;
}
}
public TestReader(IndexReader reader) {
super(reader);
}
/** Filter terms with TestTermEnum. */
public TermEnum terms() throws IOException {
return new TestTermEnum(in.terms());
}
/** Filter positions with TestTermPositions. */
public TermPositions termPositions() throws IOException {
return new TestTermPositions(in.termPositions());
}
}
/** Main for running test case by itself. */
public static void main(String args[]) {
TestRunner.run (new TestSuite(TestIndexReader.class));
}
/**
* Tests the IndexReader.getFieldNames implementation
* @throws Exception on error
*/
public void testFilterIndexReader() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer =
new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document d1 = new Document();
d1.add(Field.Text("default","one two"));
writer.addDocument(d1);
Document d2 = new Document();
d2.add(Field.Text("default","one three"));
writer.addDocument(d2);
Document d3 = new Document();
d3.add(Field.Text("default","two four"));
writer.addDocument(d3);
writer.close();
IndexReader reader = new TestReader(IndexReader.open(directory));
TermEnum terms = reader.terms();
while (terms.next()) {
assertTrue(terms.term().text().indexOf('e') != -1);
}
terms.close();
TermPositions positions = reader.termPositions(new Term("default", "one"));
while (positions.next()) {
assertTrue((positions.doc() % 2) == 1);
}
reader.close();
}
}

View File

@ -1,448 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.util.Collection;
import java.io.IOException;
import java.io.File;
public class TestIndexReader extends TestCase
{
/** Main for running test case by itself. */
public static void main(String args[]) {
TestRunner.run (new TestSuite(TestIndexReader.class));
// TestRunner.run (new TestIndexReader("testBasicDelete"));
// TestRunner.run (new TestIndexReader("testDeleteReaderWriterConflict"));
// TestRunner.run (new TestIndexReader("testDeleteReaderReaderConflict"));
// TestRunner.run (new TestIndexReader("testFilesOpenClose"));
}
public TestIndexReader(String name) {
super(name);
}
/**
* Tests the IndexReader.getFieldNames implementation
* @throws Exception on error
*/
public void testGetFieldNames() throws Exception
{
RAMDirectory d = new RAMDirectory();
// set up writer
IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);
addDocumentWithFields(writer);
writer.close();
// set up reader
IndexReader reader = IndexReader.open(d);
Collection fieldNames = reader.getFieldNames();
assertTrue(fieldNames.contains("keyword"));
assertTrue(fieldNames.contains("text"));
assertTrue(fieldNames.contains("unindexed"));
assertTrue(fieldNames.contains("unstored"));
// add more documents
writer = new IndexWriter(d, new StandardAnalyzer(), false);
// want to get some more segments here
for (int i = 0; i < 5*writer.mergeFactor; i++)
{
addDocumentWithFields(writer);
}
// new fields are in some different segments (we hope)
for (int i = 0; i < 5*writer.mergeFactor; i++)
{
addDocumentWithDifferentFields(writer);
}
writer.close();
// verify fields again
reader = IndexReader.open(d);
fieldNames = reader.getFieldNames();
assertEquals(8, fieldNames.size()); // the following fields
assertTrue(fieldNames.contains("keyword"));
assertTrue(fieldNames.contains("text"));
assertTrue(fieldNames.contains("unindexed"));
assertTrue(fieldNames.contains("unstored"));
assertTrue(fieldNames.contains("keyword2"));
assertTrue(fieldNames.contains("text2"));
assertTrue(fieldNames.contains("unindexed2"));
assertTrue(fieldNames.contains("unstored2"));
// verify that only indexed fields were returned
Collection indexedFieldNames = reader.getFieldNames(true);
assertEquals(6, indexedFieldNames.size());
assertTrue(indexedFieldNames.contains("keyword"));
assertTrue(indexedFieldNames.contains("text"));
assertTrue(indexedFieldNames.contains("unstored"));
assertTrue(indexedFieldNames.contains("keyword2"));
assertTrue(indexedFieldNames.contains("text2"));
assertTrue(indexedFieldNames.contains("unstored2"));
// verify that only unindexed fields were returned
Collection unindexedFieldNames = reader.getFieldNames(false);
assertEquals(2, unindexedFieldNames.size()); // the following fields
assertTrue(unindexedFieldNames.contains("unindexed"));
assertTrue(unindexedFieldNames.contains("unindexed2"));
}
private void assertTermDocsCount(String msg,
IndexReader reader,
Term term,
int expected)
throws IOException
{
TermDocs tdocs = null;
try {
tdocs = reader.termDocs(term);
assertNotNull(msg + ", null TermDocs", tdocs);
int count = 0;
while(tdocs.next()) {
count++;
}
assertEquals(msg + ", count mismatch", expected, count);
} finally {
if (tdocs != null)
try { tdocs.close(); } catch (Exception e) { }
}
}
public void testBasicDelete() throws IOException
{
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
Term searchTerm = new Term("content", "aaa");
// add 100 documents with term : aaa
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm.text());
}
writer.close();
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
reader = IndexReader.open(dir);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("first reader", reader, searchTerm, 100);
// DELETE DOCUMENTS CONTAINING TERM: aaa
int deleted = 0;
reader = IndexReader.open(dir);
deleted = reader.delete(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
reader.close();
}
public void testDeleteReaderWriterConflictUnoptimized() throws IOException{
deleteReaderWriterConflict(false);
}
public void testDeleteReaderWriterConflictOptimized() throws IOException{
deleteReaderWriterConflict(true);
}
private void deleteReaderWriterConflict(boolean optimize) throws IOException
{
//Directory dir = new RAMDirectory();
Directory dir = getDirectory(true);
Term searchTerm = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
// add 100 documents with term : aaa
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm.text());
}
writer.close();
// OPEN READER AT THIS POINT - this should fix the view of the
// index at the point of having 100 "aaa" documents and 0 "bbb"
IndexReader reader = IndexReader.open(dir);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 0);
// add 100 documents with term : bbb
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm2.text());
}
// REQUEST OPTIMIZATION
// This causes a new segment to become current for all subsequent
// searchers. Because of this, deletions made via a previously open
// reader, which would be applied to that reader's segment, are lost
// for subsequent searchers/readers
if(optimize)
writer.optimize();
writer.close();
// The reader should not see the new data
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 0);
// DELETE DOCUMENTS CONTAINING TERM: aaa
// NOTE: the reader was created when only "aaa" documents were in
int deleted = 0;
try {
deleted = reader.delete(searchTerm);
fail("Delete allowed on an index reader with stale segment information");
} catch (IOException e) {
/* success */
}
// Re-open index reader and try again. This time it should see
// the new data.
reader.close();
reader = IndexReader.open(dir);
assertEquals("first docFreq", 100, reader.docFreq(searchTerm));
assertEquals("first docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 100);
deleted = reader.delete(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.close();
// CREATE A NEW READER and re-test
reader = IndexReader.open(dir);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
assertTermDocsCount("deleted termDocs", reader, searchTerm2, 100);
reader.close();
}
private Directory getDirectory(boolean create) throws IOException {
return FSDirectory.getDirectory(new File(System.getProperty("tempDir"), "testIndex"), create);
}
public void testFilesOpenClose() throws IOException
{
// Create initial data set
Directory dir = getDirectory(true);
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
addDoc(writer, "test");
writer.close();
dir.close();
// Try to erase the data - this ensures that the writer closed all files
dir = getDirectory(true);
// Now create the data set again, just as before
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
addDoc(writer, "test");
writer.close();
dir.close();
// Now open existing directory and test that reader closes all files
dir = getDirectory(false);
IndexReader reader1 = IndexReader.open(dir);
reader1.close();
dir.close();
// The following will fail if reader did not close all files
dir = getDirectory(true);
}
public void testDeleteReaderReaderConflictUnoptimized() throws IOException{
deleteReaderReaderConflict(false);
}
public void testDeleteReaderReaderConflictOptimized() throws IOException{
deleteReaderReaderConflict(true);
}
private void deleteReaderReaderConflict(boolean optimize) throws IOException
{
Directory dir = getDirectory(true);
Term searchTerm1 = new Term("content", "aaa");
Term searchTerm2 = new Term("content", "bbb");
Term searchTerm3 = new Term("content", "ccc");
// add 100 documents with term : aaa
// add 100 documents with term : bbb
// add 100 documents with term : ccc
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < 100; i++)
{
addDoc(writer, searchTerm1.text());
addDoc(writer, searchTerm2.text());
addDoc(writer, searchTerm3.text());
}
if(optimize)
writer.optimize();
writer.close();
// OPEN TWO READERS
// Both readers get segment info as exists at this time
IndexReader reader1 = IndexReader.open(dir);
assertEquals("first opened", 100, reader1.docFreq(searchTerm1));
assertEquals("first opened", 100, reader1.docFreq(searchTerm2));
assertEquals("first opened", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("first opened", reader1, searchTerm1, 100);
assertTermDocsCount("first opened", reader1, searchTerm2, 100);
assertTermDocsCount("first opened", reader1, searchTerm3, 100);
IndexReader reader2 = IndexReader.open(dir);
assertEquals("first opened", 100, reader2.docFreq(searchTerm1));
assertEquals("first opened", 100, reader2.docFreq(searchTerm2));
assertEquals("first opened", 100, reader2.docFreq(searchTerm3));
assertTermDocsCount("first opened", reader2, searchTerm1, 100);
assertTermDocsCount("first opened", reader2, searchTerm2, 100);
assertTermDocsCount("first opened", reader2, searchTerm3, 100);
// DELETE DOCS FROM READER 2 and CLOSE IT
// delete documents containing term: aaa
// when the reader is closed, the segment info is updated and
// the first reader is now stale
reader2.delete(searchTerm1);
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
assertTermDocsCount("after delete 1", reader2, searchTerm1, 0);
assertTermDocsCount("after delete 1", reader2, searchTerm2, 100);
assertTermDocsCount("after delete 1", reader2, searchTerm3, 100);
reader2.close();
// Make sure reader 1 is unchanged since it was open earlier
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("after delete 1", reader1, searchTerm1, 100);
assertTermDocsCount("after delete 1", reader1, searchTerm2, 100);
assertTermDocsCount("after delete 1", reader1, searchTerm3, 100);
// ATTEMPT TO DELETE FROM STALE READER
// delete documents containing term: bbb
try {
reader1.delete(searchTerm2);
fail("Delete allowed from a stale index reader");
} catch (IOException e) {
/* success */
}
// RECREATE READER AND TRY AGAIN
reader1.close();
reader1 = IndexReader.open(dir);
assertEquals("reopened", 100, reader1.docFreq(searchTerm1));
assertEquals("reopened", 100, reader1.docFreq(searchTerm2));
assertEquals("reopened", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("reopened", reader1, searchTerm1, 0);
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
reader1.delete(searchTerm2);
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));
assertTermDocsCount("deleted 2", reader1, searchTerm1, 0);
assertTermDocsCount("deleted 2", reader1, searchTerm2, 0);
assertTermDocsCount("deleted 2", reader1, searchTerm3, 100);
reader1.close();
// Open another reader to confirm that everything is deleted
reader2 = IndexReader.open(dir);
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm1));
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm2));
assertEquals("reopened 2", 100, reader2.docFreq(searchTerm3));
assertTermDocsCount("reopened 2", reader2, searchTerm1, 0);
assertTermDocsCount("reopened 2", reader2, searchTerm2, 0);
assertTermDocsCount("reopened 2", reader2, searchTerm3, 100);
reader2.close();
dir.close();
}
private void addDocumentWithFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(Field.Keyword("keyword","test1"));
doc.add(Field.Text("text","test1"));
doc.add(Field.UnIndexed("unindexed","test1"));
doc.add(Field.UnStored("unstored","test1"));
writer.addDocument(doc);
}
private void addDocumentWithDifferentFields(IndexWriter writer) throws IOException
{
Document doc = new Document();
doc.add(Field.Keyword("keyword2","test1"));
doc.add(Field.Text("text2","test1"));
doc.add(Field.UnIndexed("unindexed2","test1"));
doc.add(Field.UnStored("unstored2","test1"));
writer.addDocument(doc);
}
private void addDoc(IndexWriter writer, String value)
{
Document doc = new Document();
doc.add(Field.UnStored("content", value));
try
{
writer.addDocument(doc);
}
catch (IOException e)
{
e.printStackTrace();
}
}
}

View File

@ -1,86 +0,0 @@
package org.apache.lucene.index;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
/**
* @author goller
* @version $Id$
*/
public class TestIndexWriter extends TestCase
{
public void testDocCount()
{
Directory dir = new RAMDirectory();
IndexWriter writer = null;
IndexReader reader = null;
int i;
try {
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
// add 100 documents
for (i = 0; i < 100; i++) {
addDoc(writer);
}
assertEquals(100, writer.docCount());
writer.close();
// delete 40 documents
reader = IndexReader.open(dir);
for (i = 0; i < 40; i++) {
reader.delete(i);
}
reader.close();
// test doc count before segments are merged/index is optimized
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
assertEquals(100, writer.docCount());
writer.close();
reader = IndexReader.open(dir);
assertEquals(100, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
// optimize the index and check that the new doc count is correct
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
writer.optimize();
assertEquals(60, writer.docCount());
writer.close();
// check that the index reader gives the same numbers.
reader = IndexReader.open(dir);
assertEquals(60, reader.maxDoc());
assertEquals(60, reader.numDocs());
reader.close();
}
catch (IOException e) {
e.printStackTrace();
}
}
private void addDoc(IndexWriter writer)
{
Document doc = new Document();
doc.add(Field.UnStored("content", "aaa"));
try {
writer.addDocument(doc);
}
catch (IOException e) {
e.printStackTrace();
}
}
}

View File

@ -1,37 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.InputStream;
import java.io.IOException;
public class TestInputStream extends TestCase {
public void testRead() throws IOException {
InputStream is = new MockInputStream(new byte[] { (byte) 0x80, 0x01,
(byte) 0xFF, 0x7F,
(byte) 0x80, (byte) 0x80, 0x01,
(byte) 0x81, (byte) 0x80, 0x01,
0x06, 'L', 'u', 'c', 'e', 'n', 'e'});
assertEquals(128,is.readVInt());
assertEquals(16383,is.readVInt());
assertEquals(16384,is.readVInt());
assertEquals(16385,is.readVInt());
assertEquals("Lucene",is.readString());
}
}

View File

@ -1,110 +0,0 @@
package org.apache.lucene.index;
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
/**
* @author goller
*/
public class TestSegmentTermEnum extends TestCase
{
Directory dir = new RAMDirectory();
public void testTermEnum()
{
IndexWriter writer = null;
try {
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
// add 100 documents with term : aaa
// add 100 documents with terms: aaa bbb
// Therefore, term 'aaa' has document frequency of 200 and term 'bbb' 100
for (int i = 0; i < 100; i++) {
addDoc(writer, "aaa");
addDoc(writer, "aaa bbb");
}
writer.close();
}
catch (IOException e) {
e.printStackTrace();
}
try {
// verify document frequency of terms in an unoptimized index
verifyDocFreq();
// merge segments by optimizing the index
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
writer.optimize();
writer.close();
// verify document frequency of terms in an optimized index
verifyDocFreq();
}
catch (IOException e2) {
e2.printStackTrace();
}
}
private void verifyDocFreq()
throws IOException
{
IndexReader reader = IndexReader.open(dir);
TermEnum termEnum = null;
// create enumeration of all terms
termEnum = reader.terms();
// go to the first term (aaa)
termEnum.next();
// assert that term is 'aaa'
assertEquals("aaa", termEnum.term().text());
assertEquals(200, termEnum.docFreq());
// go to the second term (bbb)
termEnum.next();
// assert that term is 'bbb'
assertEquals("bbb", termEnum.term().text());
assertEquals(100, termEnum.docFreq());
termEnum.close();
// create enumeration of terms after term 'aaa', including 'aaa'
termEnum = reader.terms(new Term("content", "aaa"));
// assert that term is 'aaa'
assertEquals("aaa", termEnum.term().text());
assertEquals(200, termEnum.docFreq());
// go to term 'bbb'
termEnum.next();
// assert that term is 'bbb'
assertEquals("bbb", termEnum.term().text());
assertEquals(100, termEnum.docFreq());
termEnum.close();
}
private void addDoc(IndexWriter writer, String value)
{
Document doc = new Document();
doc.add(Field.UnStored("content", value));
try {
writer.addDocument(doc);
}
catch (IOException e) {
e.printStackTrace();
}
}
}

View File

@ -1,106 +0,0 @@
package org.apache.lucene.index;
import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
import java.util.Arrays;
public class TestTermVectorsReader extends TestCase {
private TermVectorsWriter writer = null;
//Must be lexicographically sorted, will do in setup, versus trying to maintain here
private String [] testFields = {"f1", "f2", "f3"};
private String [] testTerms = {"this", "is", "a", "test"};
private RAMDirectory dir = new RAMDirectory();
private String seg = "testSegment";
private FieldInfos fieldInfos = new FieldInfos();
public TestTermVectorsReader(String s) {
super(s);
}
protected void setUp() {
for (int i = 0; i < testFields.length; i++) {
fieldInfos.add(testFields[i], true, true);
}
try {
Arrays.sort(testTerms);
for (int j = 0; j < 5; j++) {
writer = new TermVectorsWriter(dir, seg, fieldInfos);
writer.openDocument();
for (int k = 0; k < testFields.length; k++) {
writer.openField(testFields[k]);
for (int i = 0; i < testTerms.length; i++) {
writer.addTerm(testTerms[i], i);
}
writer.closeField();
}
writer.closeDocument();
writer.close();
}
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
protected void tearDown() {
}
public void test() {
//Check to see the files were created properly in setup
assertTrue(writer.isDocumentOpen() == false);
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVD_EXTENSION));
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVX_EXTENSION));
}
public void testReader() {
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
assertTrue(reader != null);
TermFreqVector vector = reader.get(0, testFields[0]);
assertTrue(vector != null);
String [] terms = vector.getTerms();
assertTrue(terms != null);
assertTrue(terms.length == testTerms.length);
for (int i = 0; i < terms.length; i++) {
String term = terms[i];
//System.out.println("Term: " + term);
assertTrue(term.equals(testTerms[i]));
}
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
/**
* Make sure exceptions and bad params are handled appropriately
*/
public void testBadParams() {
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
assertTrue(reader != null);
//Bad document number, good field number
TermFreqVector vector = reader.get(50, testFields[0]);
assertTrue(false);
} catch (IOException e) {
assertTrue(true);
}
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
assertTrue(reader != null);
//good document number, bad field number
TermFreqVector vector = reader.get(0, "f50");
assertTrue(vector == null);
} catch (IOException e) {
assertTrue(false);
}
}
}

View File

@ -1,202 +0,0 @@
package org.apache.lucene.index;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import java.io.IOException;
public class TestTermVectorsWriter extends TestCase {
private String[] testTerms = {"this", "is", "a", "test"};
private String [] testFields = {"f1", "f2", "f3"};
private int[][] positions = new int[testTerms.length][];
private RAMDirectory dir = new RAMDirectory();
private String seg = "testSegment";
private FieldInfos fieldInfos = new FieldInfos();
public TestTermVectorsWriter(String s) {
super(s);
}
protected void setUp() {
for (int i = 0; i < testFields.length; i++) {
fieldInfos.add(testFields[i], true, true);
}
for (int i = 0; i < testTerms.length; i++) {
positions[i] = new int[5];
for (int j = 0; j < positions[i].length; j++) {
positions[i][j] = i * 100;
}
}
}
protected void tearDown() {
}
public void test() {
assertTrue(dir != null);
assertTrue(positions != null);
}
/*public void testWriteNoPositions() {
try {
TermVectorsWriter writer = new TermVectorsWriter(dir, seg, 50);
writer.openDocument();
assertTrue(writer.isDocumentOpen() == true);
writer.openField(0);
assertTrue(writer.isFieldOpen() == true);
for (int i = 0; i < testTerms.length; i++) {
writer.addTerm(testTerms[i], i);
}
writer.closeField();
writer.closeDocument();
writer.close();
assertTrue(writer.isDocumentOpen() == false);
//Check to see the files were created
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVD_EXTENSION));
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVX_EXTENSION));
//Now read it back in
TermVectorsReader reader = new TermVectorsReader(dir, seg);
assertTrue(reader != null);
checkTermVector(reader, 0, 0);
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
} */
public void testWriter() {
try {
TermVectorsWriter writer = new TermVectorsWriter(dir, seg, fieldInfos);
writer.openDocument();
assertTrue(writer.isDocumentOpen() == true);
writeField(writer, testFields[0]);
writer.closeDocument();
writer.close();
assertTrue(writer.isDocumentOpen() == false);
//Check to see the files were created
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVD_EXTENSION));
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVX_EXTENSION));
//Now read it back in
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
assertTrue(reader != null);
checkTermVector(reader, 0, testFields[0]);
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
private void checkTermVector(TermVectorsReader reader, int docNum, String field) throws IOException {
TermFreqVector vector = reader.get(docNum, field);
assertTrue(vector != null);
String[] terms = vector.getTerms();
assertTrue(terms != null);
assertTrue(terms.length == testTerms.length);
for (int i = 0; i < terms.length; i++) {
String term = terms[i];
assertTrue(term.equals(testTerms[i]));
}
}
/**
* Test one document, multiple fields
*/
public void testMultipleFields() {
try {
TermVectorsWriter writer = new TermVectorsWriter(dir, seg, fieldInfos);
writeDocument(writer, testFields.length);
writer.close();
assertTrue(writer.isDocumentOpen() == false);
//Check to see the files were created
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVD_EXTENSION));
assertTrue(dir.fileExists(seg + TermVectorsWriter.TVX_EXTENSION));
//Now read it back in
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
assertTrue(reader != null);
for (int j = 0; j < testFields.length; j++) {
checkTermVector(reader, 0, testFields[j]);
}
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
private void writeDocument(TermVectorsWriter writer, int numFields) throws IOException {
writer.openDocument();
assertTrue(writer.isDocumentOpen() == true);
for (int j = 0; j < numFields; j++) {
writeField(writer, testFields[j]);
}
writer.closeDocument();
assertTrue(writer.isDocumentOpen() == false);
}
/**
*
* @param writer The writer to write to
* @param j The field number
* @throws IOException
*/
private void writeField(TermVectorsWriter writer, String f) throws IOException {
writer.openField(f);
assertTrue(writer.isFieldOpen() == true);
for (int i = 0; i < testTerms.length; i++) {
writer.addTerm(testTerms[i], i);
}
writer.closeField();
}
public void testMultipleDocuments() {
try {
TermVectorsWriter writer = new TermVectorsWriter(dir, seg, fieldInfos);
assertTrue(writer != null);
for (int i = 0; i < 10; i++) {
writeDocument(writer, testFields.length);
}
writer.close();
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
//Do some arbitrary tests
try {
TermVectorsReader reader = new TermVectorsReader(dir, seg, fieldInfos);
for (int i = 0; i < 10; i++) {
assertTrue(reader != null);
checkTermVector(reader, 5, testFields[0]);
checkTermVector(reader, 2, testFields[2]);
}
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
}

View File

@ -1,21 +0,0 @@
package org.apache.lucene.index.store;
import junit.framework.TestCase;
import org.apache.lucene.store.FSDirectory;
import java.io.IOException;
abstract public class FSDirectoryTestCase extends TestCase {
private FSDirectory directory;
protected final FSDirectory getDirectory() throws IOException {
return getDirectory(false);
}
protected final FSDirectory getDirectory(boolean create) throws IOException {
if (directory == null) {
directory = FSDirectory.getDirectory(System.getProperty("test.index.dir"), create);
}
return directory;
}
}

View File

@ -1,495 +0,0 @@
package org.apache.lucene.queryParser;
/**
* Copyright 2002-2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseTokenizer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.DateField;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RangeQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import java.io.IOException;
import java.io.Reader;
import java.text.DateFormat;
import java.util.Calendar;
/**
* Tests QueryParser.
*/
public class TestQueryParser extends TestCase {
public static Analyzer qpAnalyzer = new QPTestAnalyzer();
public static class QPTestFilter extends TokenFilter {
/**
* Filter which discards the token 'stop' and which expands the
* token 'phrase' into 'phrase1 phrase2'
*/
public QPTestFilter(TokenStream in) {
super(in);
}
boolean inPhrase = false;
int savedStart = 0, savedEnd = 0;
public Token next() throws IOException {
if (inPhrase) {
inPhrase = false;
return new Token("phrase2", savedStart, savedEnd);
} else
for (Token token = input.next(); token != null; token = input.next()) {
if (token.termText().equals("phrase")) {
inPhrase = true;
savedStart = token.startOffset();
savedEnd = token.endOffset();
return new Token("phrase1", savedStart, savedEnd);
} else if (!token.termText().equals("stop"))
return token;
}
return null;
}
}
public static class QPTestAnalyzer extends Analyzer {
/** Filters LowerCaseTokenizer with StopFilter. */
public final TokenStream tokenStream(String fieldName, Reader reader) {
return new QPTestFilter(new LowerCaseTokenizer(reader));
}
}
public static class QPTestParser extends QueryParser {
public QPTestParser(String f, Analyzer a) {
super(f, a);
}
protected Query getFuzzyQuery(String field, String termStr, float minSimilarity) throws ParseException {
throw new ParseException("Fuzzy queries not allowed");
}
protected Query getWildcardQuery(String field, String termStr) throws ParseException {
throw new ParseException("Wildcard queries not allowed");
}
}
private int originalMaxClauses;
public void setUp() {
originalMaxClauses = BooleanQuery.getMaxClauseCount();
}
public QueryParser getParser(Analyzer a) throws Exception {
if (a == null)
a = new SimpleAnalyzer();
QueryParser qp = new QueryParser("field", a);
qp.setOperator(QueryParser.DEFAULT_OPERATOR_OR);
return qp;
}
public Query getQuery(String query, Analyzer a) throws Exception {
return getParser(a).parse(query);
}
public void assertQueryEquals(String query, Analyzer a, String result)
throws Exception {
Query q = getQuery(query, a);
String s = q.toString("field");
if (!s.equals(result)) {
fail("Query /" + query + "/ yielded /" + s
+ "/, expecting /" + result + "/");
}
}
public void assertWildcardQueryEquals(String query, boolean lowercase, String result)
throws Exception {
QueryParser qp = getParser(null);
qp.setLowercaseWildcardTerms(lowercase);
Query q = qp.parse(query);
String s = q.toString("field");
if (!s.equals(result)) {
fail("WildcardQuery /" + query + "/ yielded /" + s
+ "/, expecting /" + result + "/");
}
}
public Query getQueryDOA(String query, Analyzer a)
throws Exception {
if (a == null)
a = new SimpleAnalyzer();
QueryParser qp = new QueryParser("field", a);
qp.setOperator(QueryParser.DEFAULT_OPERATOR_AND);
return qp.parse(query);
}
public void assertQueryEqualsDOA(String query, Analyzer a, String result)
throws Exception {
Query q = getQueryDOA(query, a);
String s = q.toString("field");
if (!s.equals(result)) {
fail("Query /" + query + "/ yielded /" + s
+ "/, expecting /" + result + "/");
}
}
public void testSimple() throws Exception {
assertQueryEquals("term term term", null, "term term term");
assertQueryEquals("türm term term", null, "türm term term");
assertQueryEquals("ümlaut", null, "ümlaut");
assertQueryEquals("a AND b", null, "+a +b");
assertQueryEquals("(a AND b)", null, "+a +b");
assertQueryEquals("c OR (a AND b)", null, "c (+a +b)");
assertQueryEquals("a AND NOT b", null, "+a -b");
assertQueryEquals("a AND -b", null, "+a -b");
assertQueryEquals("a AND !b", null, "+a -b");
assertQueryEquals("a && b", null, "+a +b");
assertQueryEquals("a && ! b", null, "+a -b");
assertQueryEquals("a OR b", null, "a b");
assertQueryEquals("a || b", null, "a b");
assertQueryEquals("a OR !b", null, "a -b");
assertQueryEquals("a OR ! b", null, "a -b");
assertQueryEquals("a OR -b", null, "a -b");
assertQueryEquals("+term -term term", null, "+term -term term");
assertQueryEquals("foo:term AND field:anotherTerm", null,
"+foo:term +anotherterm");
assertQueryEquals("term AND \"phrase phrase\"", null,
"+term +\"phrase phrase\"");
assertQueryEquals("\"hello there\"", null, "\"hello there\"");
assertTrue(getQuery("a AND b", null) instanceof BooleanQuery);
assertTrue(getQuery("hello", null) instanceof TermQuery);
assertTrue(getQuery("\"hello there\"", null) instanceof PhraseQuery);
assertQueryEquals("germ term^2.0", null, "germ term^2.0");
assertQueryEquals("(term)^2.0", null, "term^2.0");
assertQueryEquals("(germ term)^2.0", null, "(germ term)^2.0");
assertQueryEquals("term^2.0", null, "term^2.0");
assertQueryEquals("term^2", null, "term^2.0");
assertQueryEquals("\"germ term\"^2.0", null, "\"germ term\"^2.0");
assertQueryEquals("\"term germ\"^2", null, "\"term germ\"^2.0");
assertQueryEquals("(foo OR bar) AND (baz OR boo)", null,
"+(foo bar) +(baz boo)");
assertQueryEquals("((a OR b) AND NOT c) OR d", null,
"(+(a b) -c) d");
assertQueryEquals("+(apple \"steve jobs\") -(foo bar baz)", null,
"+(apple \"steve jobs\") -(foo bar baz)");
assertQueryEquals("+title:(dog OR cat) -author:\"bob dole\"", null,
"+(title:dog title:cat) -author:\"bob dole\"");
}
public void testPunct() throws Exception {
Analyzer a = new WhitespaceAnalyzer();
assertQueryEquals("a&b", a, "a&b");
assertQueryEquals("a&&b", a, "a&&b");
assertQueryEquals(".NET", a, ".NET");
}
public void testSlop() throws Exception {
assertQueryEquals("\"term germ\"~2", null, "\"term germ\"~2");
assertQueryEquals("\"term germ\"~2 flork", null, "\"term germ\"~2 flork");
assertQueryEquals("\"term\"~2", null, "term");
assertQueryEquals("\" \"~2 germ", null, "germ");
assertQueryEquals("\"term germ\"~2^2", null, "\"term germ\"~2^2.0");
}
public void testNumber() throws Exception {
// The numbers go away because SimpleAnalzyer ignores them
assertQueryEquals("3", null, "");
assertQueryEquals("term 1.0 1 2", null, "term");
assertQueryEquals("term term1 term2", null, "term term term");
Analyzer a = new StandardAnalyzer();
assertQueryEquals("3", a, "3");
assertQueryEquals("term 1.0 1 2", a, "term 1.0 1 2");
assertQueryEquals("term term1 term2", a, "term term1 term2");
}
public void testWildcard() throws Exception {
assertQueryEquals("term*", null, "term*");
assertQueryEquals("term*^2", null, "term*^2.0");
assertQueryEquals("term~", null, "term~0.5");
assertQueryEquals("term~0.7", null, "term~0.7");
assertQueryEquals("term~^2", null, "term^2.0~0.5");
assertQueryEquals("term^2~", null, "term^2.0~0.5");
assertQueryEquals("term*germ", null, "term*germ");
assertQueryEquals("term*germ^3", null, "term*germ^3.0");
assertTrue(getQuery("term*", null) instanceof PrefixQuery);
assertTrue(getQuery("term*^2", null) instanceof PrefixQuery);
assertTrue(getQuery("term~", null) instanceof FuzzyQuery);
assertTrue(getQuery("term~0.7", null) instanceof FuzzyQuery);
FuzzyQuery fq = (FuzzyQuery)getQuery("term~0.7", null);
assertEquals(0.7f, fq.getMinSimilarity(), 0.1f);
assertEquals(0, fq.getPrefixLength());
fq = (FuzzyQuery)getQuery("term~", null);
assertEquals(0.5f, fq.getMinSimilarity(), 0.1f);
assertEquals(0, fq.getPrefixLength());
try {
getQuery("term~1.1", null); // value > 1, throws exception
fail();
} catch(ParseException pe) {
// expected exception
}
assertTrue(getQuery("term*germ", null) instanceof WildcardQuery);
/* Tests to see that wild card terms are (or are not) properly
* lower-cased with propery parser configuration
*/
// First prefix queries:
assertWildcardQueryEquals("term*", true, "term*");
assertWildcardQueryEquals("Term*", true, "term*");
assertWildcardQueryEquals("TERM*", true, "term*");
assertWildcardQueryEquals("term*", false, "term*");
assertWildcardQueryEquals("Term*", false, "Term*");
assertWildcardQueryEquals("TERM*", false, "TERM*");
// Then 'full' wildcard queries:
assertWildcardQueryEquals("te?m", true, "te?m");
assertWildcardQueryEquals("Te?m", true, "te?m");
assertWildcardQueryEquals("TE?M", true, "te?m");
assertWildcardQueryEquals("Te?m*gerM", true, "te?m*germ");
assertWildcardQueryEquals("te?m", false, "te?m");
assertWildcardQueryEquals("Te?m", false, "Te?m");
assertWildcardQueryEquals("TE?M", false, "TE?M");
assertWildcardQueryEquals("Te?m*gerM", false, "Te?m*gerM");
}
public void testQPA() throws Exception {
assertQueryEquals("term term term", qpAnalyzer, "term term term");
assertQueryEquals("term +stop term", qpAnalyzer, "term term");
assertQueryEquals("term -stop term", qpAnalyzer, "term term");
assertQueryEquals("drop AND stop AND roll", qpAnalyzer, "+drop +roll");
assertQueryEquals("term phrase term", qpAnalyzer,
"term \"phrase1 phrase2\" term");
assertQueryEquals("term AND NOT phrase term", qpAnalyzer,
"+term -\"phrase1 phrase2\" term");
assertQueryEquals("stop", qpAnalyzer, "");
assertTrue(getQuery("term term term", qpAnalyzer) instanceof BooleanQuery);
assertTrue(getQuery("term +stop", qpAnalyzer) instanceof TermQuery);
}
public void testRange() throws Exception {
assertQueryEquals("[ a TO z]", null, "[a TO z]");
assertTrue(getQuery("[ a TO z]", null) instanceof RangeQuery);
assertQueryEquals("[ a TO z ]", null, "[a TO z]");
assertQueryEquals("{ a TO z}", null, "{a TO z}");
assertQueryEquals("{ a TO z }", null, "{a TO z}");
assertQueryEquals("{ a TO z }^2.0", null, "{a TO z}^2.0");
assertQueryEquals("[ a TO z] OR bar", null, "[a TO z] bar");
assertQueryEquals("[ a TO z] AND bar", null, "+[a TO z] +bar");
assertQueryEquals("( bar blar { a TO z}) ", null, "bar blar {a TO z}");
assertQueryEquals("gack ( bar blar { a TO z}) ", null, "gack (bar blar {a TO z})");
}
public String getDate(String s) throws Exception {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
return DateField.dateToString(df.parse(s));
}
public String getLocalizedDate(int year, int month, int day) {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
Calendar calendar = Calendar.getInstance();
calendar.set(year, month, day);
return df.format(calendar.getTime());
}
public void testDateRange() throws Exception {
String startDate = getLocalizedDate(2002, 1, 1);
String endDate = getLocalizedDate(2002, 1, 4);
assertQueryEquals("[ " + startDate + " TO " + endDate + "]", null,
"[" + getDate(startDate) + " TO " + getDate(endDate) + "]");
assertQueryEquals("{ " + startDate + " " + endDate + " }", null,
"{" + getDate(startDate) + " TO " + getDate(endDate) + "}");
}
public void testEscaped() throws Exception {
Analyzer a = new WhitespaceAnalyzer();
/*assertQueryEquals("\\[brackets", a, "\\[brackets");
assertQueryEquals("\\[brackets", null, "brackets");
assertQueryEquals("\\\\", a, "\\\\");
assertQueryEquals("\\+blah", a, "\\+blah");
assertQueryEquals("\\(blah", a, "\\(blah");
assertQueryEquals("\\-blah", a, "\\-blah");
assertQueryEquals("\\!blah", a, "\\!blah");
assertQueryEquals("\\{blah", a, "\\{blah");
assertQueryEquals("\\}blah", a, "\\}blah");
assertQueryEquals("\\:blah", a, "\\:blah");
assertQueryEquals("\\^blah", a, "\\^blah");
assertQueryEquals("\\[blah", a, "\\[blah");
assertQueryEquals("\\]blah", a, "\\]blah");
assertQueryEquals("\\\"blah", a, "\\\"blah");
assertQueryEquals("\\(blah", a, "\\(blah");
assertQueryEquals("\\)blah", a, "\\)blah");
assertQueryEquals("\\~blah", a, "\\~blah");
assertQueryEquals("\\*blah", a, "\\*blah");
assertQueryEquals("\\?blah", a, "\\?blah");
//assertQueryEquals("foo \\&\\& bar", a, "foo \\&\\& bar");
//assertQueryEquals("foo \\|| bar", a, "foo \\|| bar");
//assertQueryEquals("foo \\AND bar", a, "foo \\AND bar");*/
assertQueryEquals("a\\-b:c", a, "a-b:c");
assertQueryEquals("a\\+b:c", a, "a+b:c");
assertQueryEquals("a\\:b:c", a, "a:b:c");
assertQueryEquals("a\\\\b:c", a, "a\\b:c");
assertQueryEquals("a:b\\-c", a, "a:b-c");
assertQueryEquals("a:b\\+c", a, "a:b+c");
assertQueryEquals("a:b\\:c", a, "a:b:c");
assertQueryEquals("a:b\\\\c", a, "a:b\\c");
assertQueryEquals("a:b\\-c*", a, "a:b-c*");
assertQueryEquals("a:b\\+c*", a, "a:b+c*");
assertQueryEquals("a:b\\:c*", a, "a:b:c*");
assertQueryEquals("a:b\\\\c*", a, "a:b\\c*");
assertQueryEquals("a:b\\-?c", a, "a:b-?c");
assertQueryEquals("a:b\\+?c", a, "a:b+?c");
assertQueryEquals("a:b\\:?c", a, "a:b:?c");
assertQueryEquals("a:b\\\\?c", a, "a:b\\?c");
assertQueryEquals("a:b\\-c~", a, "a:b-c~0.5");
assertQueryEquals("a:b\\+c~", a, "a:b+c~0.5");
assertQueryEquals("a:b\\:c~", a, "a:b:c~0.5");
assertQueryEquals("a:b\\\\c~", a, "a:b\\c~0.5");
assertQueryEquals("[ a\\- TO a\\+ ]", null, "[a- TO a+]");
assertQueryEquals("[ a\\: TO a\\~ ]", null, "[a: TO a~]");
assertQueryEquals("[ a\\\\ TO a\\* ]", null, "[a\\ TO a*]");
}
public void testTabNewlineCarriageReturn()
throws Exception {
assertQueryEqualsDOA("+weltbank +worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("+weltbank\n+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \n+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \n +worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("+weltbank\r+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \r+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \r +worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("+weltbank\r\n+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \r\n+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \r\n +worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \r \n +worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("+weltbank\t+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \t+worlbank", null,
"+weltbank +worlbank");
assertQueryEqualsDOA("weltbank \t +worlbank", null,
"+weltbank +worlbank");
}
public void testSimpleDAO()
throws Exception {
assertQueryEqualsDOA("term term term", null, "+term +term +term");
assertQueryEqualsDOA("term +term term", null, "+term +term +term");
assertQueryEqualsDOA("term term +term", null, "+term +term +term");
assertQueryEqualsDOA("term +term +term", null, "+term +term +term");
assertQueryEqualsDOA("-term term term", null, "-term +term +term");
}
public void testBoost()
throws Exception {
StandardAnalyzer oneStopAnalyzer = new StandardAnalyzer(new String[]{"on"});
QueryParser qp = new QueryParser("field", oneStopAnalyzer);
Query q = qp.parse("on^1.0");
assertNotNull(q);
q = qp.parse("\"hello\"^2.0");
assertNotNull(q);
assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
q = qp.parse("hello^2.0");
assertNotNull(q);
assertEquals(q.getBoost(), (float) 2.0, (float) 0.5);
q = qp.parse("\"on\"^1.0");
assertNotNull(q);
q = QueryParser.parse("the^3", "field", new StandardAnalyzer());
assertNotNull(q);
}
public void testException() throws Exception {
try {
assertQueryEquals("\"some phrase", null, "abc");
fail("ParseException expected, not thrown");
} catch (ParseException expected) {
}
}
public void testCustomQueryParserWildcard() {
try {
new QPTestParser("contents", new WhitespaceAnalyzer()).parse("a?t");
} catch (ParseException expected) {
return;
}
fail("Wildcard queries should not be allowed");
}
public void testCustomQueryParserFuzzy() throws Exception {
try {
new QPTestParser("contents", new WhitespaceAnalyzer()).parse("xunit~");
} catch (ParseException expected) {
return;
}
fail("Fuzzy queries should not be allowed");
}
public void testBooleanQuery() throws Exception {
BooleanQuery.setMaxClauseCount(2);
try {
QueryParser.parse("one two three", "field", new WhitespaceAnalyzer());
fail("ParseException expected due to too many boolean clauses");
} catch (ParseException expected) {
// too many boolean clauses, so ParseException is expected
}
}
public void tearDown() {
BooleanQuery.setMaxClauseCount(originalMaxClauses);
}
}

View File

@ -1,65 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* 20 May 2004: Factored out of spans tests. Please leave this comment
until this class is evt. also used by tests in search package.
*/
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Hits;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.Set;
import java.util.TreeSet;
public class CheckHits {
public static void checkHits(
Query query,
String defaultFieldName,
Searcher searcher,
int[] results,
TestCase testCase)
throws IOException {
Hits hits = searcher.search(query);
Set correct = new TreeSet();
for (int i = 0; i < results.length; i++) {
correct.add(new Integer(results[i]));
}
Set actual = new TreeSet();
for (int i = 0; i < hits.length(); i++) {
actual.add(new Integer(hits.id(i)));
}
testCase.assertEquals(query.toString(defaultFieldName), correct, actual);
}
public static void printDocNrs(Hits hits) throws IOException {
System.out.print("new int[] {");
for (int i = 0; i < hits.length(); i++) {
System.out.print(hits.id(i));
if (i != hits.length()-1)
System.out.print(", ");
}
System.out.println("}");
}
}

View File

@ -1,37 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import java.util.BitSet;
public class MockFilter extends Filter {
private boolean wasCalled;
public BitSet bits(IndexReader reader) {
wasCalled = true;
return new BitSet();
}
public void clear() {
wasCalled = false;
}
public boolean wasCalled() {
return wasCalled;
}
}

View File

@ -1,143 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermEnum;
import java.io.IOException;
import java.io.Serializable;
/**
* An example Comparable for use with the custom sort tests.
* It implements a comparable for "id" sort of values which
* consist of an alphanumeric part and a numeric part, such as:
* <p/>
* <P>ABC-123, A-1, A-7, A-100, B-99999
* <p/>
* <p>Such values cannot be sorted as strings, since A-100 needs
* to come after A-7.
* <p/>
* <p>It could be argued that the "ids" should be rewritten as
* A-0001, A-0100, etc. so they will sort as strings. That is
* a valid alternate way to solve it - but
* this is only supposed to be a simple test case.
* <p/>
* <p>Created: Apr 21, 2004 5:34:47 PM
*
* @author Tim Jones
* @version $Id$
* @since 1.4
*/
public class SampleComparable
implements Comparable, Serializable {
String string_part;
Integer int_part;
public SampleComparable (String s) {
int i = s.indexOf ("-");
string_part = s.substring (0, i);
int_part = new Integer (s.substring (i + 1));
}
public int compareTo (Object o) {
SampleComparable otherid = (SampleComparable) o;
int i = string_part.compareTo (otherid.string_part);
if (i == 0) return int_part.compareTo (otherid.int_part);
return i;
}
public static SortComparatorSource getComparatorSource () {
return new SortComparatorSource () {
public ScoreDocComparator newComparator (final IndexReader reader, String fieldname)
throws IOException {
final String field = fieldname.intern ();
final TermEnum enumerator = reader.terms (new Term (fieldname, ""));
try {
return new ScoreDocComparator () {
protected Comparable[] cachedValues = fillCache (reader, enumerator, field);
public int compare (ScoreDoc i, ScoreDoc j) {
return cachedValues[i.doc].compareTo (cachedValues[j.doc]);
}
public Comparable sortValue (ScoreDoc i) {
return cachedValues[i.doc];
}
public int sortType () {
return SortField.CUSTOM;
}
};
} finally {
enumerator.close ();
}
}
/**
* Returns an array of objects which represent that natural order
* of the term values in the given field.
*
* @param reader Terms are in this index.
* @param enumerator Use this to get the term values and TermDocs.
* @param fieldname Comparables should be for this field.
* @return Array of objects representing natural order of terms in field.
* @throws IOException If an error occurs reading the index.
*/
protected Comparable[] fillCache (IndexReader reader, TermEnum enumerator, String fieldname)
throws IOException {
final String field = fieldname.intern ();
Comparable[] retArray = new Comparable[reader.maxDoc ()];
if (retArray.length > 0) {
TermDocs termDocs = reader.termDocs ();
try {
if (enumerator.term () == null) {
throw new RuntimeException ("no terms in field " + field);
}
do {
Term term = enumerator.term ();
if (term.field () != field) break;
Comparable termval = getComparable (term.text ());
termDocs.seek (enumerator);
while (termDocs.next ()) {
retArray[termDocs.doc ()] = termval;
}
} while (enumerator.next ());
} finally {
termDocs.close ();
}
}
return retArray;
}
Comparable getComparable (String termtext) {
return new SampleComparable (termtext);
}
};
}
public static SortComparator getComparator() {
return new SortComparator() {
protected Comparable getComparable (String termtext) {
return new SampleComparable (termtext);
}
};
}
}

View File

@ -1,104 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.BooleanQuery;
import java.io.IOException;
/**
* @author schnee
* @version $Id$
**/
public class TestBooleanPrefixQuery extends TestCase {
public static void main(String[] args) {
TestRunner.run(suite());
}
public static Test suite() {
return new TestSuite(TestBooleanPrefixQuery.class);
}
public TestBooleanPrefixQuery(String name) {
super(name);
}
public void testMethod() {
RAMDirectory directory = new RAMDirectory();
String[] categories = new String[]{"food",
"foodanddrink",
"foodanddrinkandgoodtimes",
"food and drink"};
Query rw1 = null;
Query rw2 = null;
try {
IndexWriter writer = new IndexWriter(directory, new
WhitespaceAnalyzer(), true);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(Field.Keyword("category", categories[i]));
writer.addDocument(doc);
}
writer.close();
IndexReader reader = IndexReader.open(directory);
PrefixQuery query = new PrefixQuery(new Term("category", "foo"));
rw1 = query.rewrite(reader);
BooleanQuery bq = new BooleanQuery();
bq.add(query, true, false);
rw2 = bq.rewrite(reader);
} catch (IOException e) {
fail(e.getMessage());
}
BooleanQuery bq1 = null;
if (rw1 instanceof BooleanQuery) {
bq1 = (BooleanQuery) rw1;
}
BooleanQuery bq2 = null;
if (rw2 instanceof BooleanQuery) {
bq2 = (BooleanQuery) rw2;
} else {
fail("Rewrite");
}
assertEquals("Number of Clauses Mismatch", bq1.getClauses().length,
bq2.getClauses().length);
}
}

View File

@ -1,48 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
public class TestCachingWrapperFilter extends TestCase {
public void testCachingWorks() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(), true);
writer.close();
IndexReader reader = IndexReader.open(dir);
MockFilter filter = new MockFilter();
CachingWrapperFilter cacher = new CachingWrapperFilter(filter);
// first time, nested filter is called
cacher.bits(reader);
assertTrue("first time", filter.wasCalled());
// second time, nested filter should not be called
filter.clear();
cacher.bits(reader);
assertFalse("second time", filter.wasCalled());
reader.close();
}
}

View File

@ -1,163 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.DateField;
import java.io.IOException;
import junit.framework.TestCase;
/**
* DateFilter JUnit tests.
*
* @author Otis Gospodnetic
* @version $Revision$
*/
public class TestDateFilter
extends TestCase
{
public TestDateFilter(String name)
{
super(name);
}
/**
*
*/
public static void testBefore()
throws IOException
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
long now = System.currentTimeMillis();
Document doc = new Document();
// add time that is in the past
doc.add(Field.Keyword("datefield", DateField.timeToString(now - 1000)));
doc.add(Field.Text("body", "Today is a very sunny day in New York City"));
writer.addDocument(doc);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
DateFilter df1 = DateFilter.Before("datefield", now);
// filter that should discard matches
DateFilter df2 = DateFilter.Before("datefield", now - 999999);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
Hits result;
// ensure that queries return expected results without DateFilter first
result = searcher.search(query1);
assertEquals(0, result.length());
result = searcher.search(query2);
assertEquals(1, result.length());
// run queries with DateFilter
result = searcher.search(query1, df1);
assertEquals(0, result.length());
result = searcher.search(query1, df2);
assertEquals(0, result.length());
result = searcher.search(query2, df1);
assertEquals(1, result.length());
result = searcher.search(query2, df2);
assertEquals(0, result.length());
}
/**
*
*/
public static void testAfter()
throws IOException
{
// create an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
long now = System.currentTimeMillis();
Document doc = new Document();
// add time that is in the future
doc.add(Field.Keyword("datefield", DateField.timeToString(now + 888888)));
doc.add(Field.Text("body", "Today is a very sunny day in New York City"));
writer.addDocument(doc);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
// filter that should preserve matches
DateFilter df1 = DateFilter.After("datefield", now);
// filter that should discard matches
DateFilter df2 = DateFilter.After("datefield", now + 999999);
// search something that doesn't exist with DateFilter
Query query1 = new TermQuery(new Term("body", "NoMatchForThis"));
// search for something that does exists
Query query2 = new TermQuery(new Term("body", "sunny"));
Hits result;
// ensure that queries return expected results without DateFilter first
result = searcher.search(query1);
assertEquals(0, result.length());
result = searcher.search(query2);
assertEquals(1, result.length());
// run queries with DateFilter
result = searcher.search(query1, df1);
assertEquals(0, result.length());
result = searcher.search(query1, df2);
assertEquals(0, result.length());
result = searcher.search(query2, df1);
assertEquals(1, result.length());
result = searcher.search(query2, df2);
assertEquals(0, result.length());
}
}

View File

@ -1,83 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/** Document boost unit test.
*
* @author Doug Cutting
* @version $Revision$
*/
public class TestDocBoost extends TestCase {
public TestDocBoost(String name) {
super(name);
}
public void testDocBoost() throws Exception {
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
Field f1 = Field.Text("field", "word");
Field f2 = Field.Text("field", "word");
f2.setBoost(2.0f);
Document d1 = new Document();
Document d2 = new Document();
Document d3 = new Document();
Document d4 = new Document();
d3.setBoost(3.0f);
d4.setBoost(2.0f);
d1.add(f1); // boost = 1
d2.add(f2); // boost = 2
d3.add(f1); // boost = 3
d4.add(f2); // boost = 4
writer.addDocument(d1);
writer.addDocument(d2);
writer.addDocument(d3);
writer.addDocument(d4);
writer.optimize();
writer.close();
final float[] scores = new float[4];
new IndexSearcher(store).search
(new TermQuery(new Term("field", "word")),
new HitCollector() {
public final void collect(int doc, float score) {
scores[doc] = score;
}
});
float lastScore = 0.0f;
for (int i = 0; i < 4; i++) {
assertTrue(scores[i] > lastScore);
lastScore = scores[i];
}
}
}

View File

@ -1,132 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import java.util.BitSet;
import java.io.IOException;
/**
* FilteredQuery JUnit tests.
*
* <p>Created: Apr 21, 2004 1:21:46 PM
*
* @author Tim Jones
* @version $Id$
* @since 1.4
*/
public class TestFilteredQuery
extends TestCase {
private IndexSearcher searcher;
private RAMDirectory directory;
private Query query;
private Filter filter;
public void setUp()
throws Exception {
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter (directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.add (Field.Text ("field", "one two three four five"));
doc.add (Field.Text ("sorter", "b"));
writer.addDocument (doc);
doc = new Document();
doc.add (Field.Text ("field", "one two three four"));
doc.add (Field.Text ("sorter", "d"));
writer.addDocument (doc);
doc = new Document();
doc.add (Field.Text ("field", "one two three y"));
doc.add (Field.Text ("sorter", "a"));
writer.addDocument (doc);
doc = new Document();
doc.add (Field.Text ("field", "one two x"));
doc.add (Field.Text ("sorter", "c"));
writer.addDocument (doc);
writer.optimize ();
writer.close ();
searcher = new IndexSearcher (directory);
query = new TermQuery (new Term ("field", "three"));
filter = new Filter() {
public BitSet bits (IndexReader reader) throws IOException {
BitSet bitset = new BitSet(5);
bitset.set (1);
bitset.set (3);
return bitset;
}
};
}
public void tearDown()
throws Exception {
searcher.close();
directory.close();
}
public void testFilteredQuery()
throws Exception {
Query filteredquery = new FilteredQuery (query, filter);
Hits hits = searcher.search (filteredquery);
assertEquals (1, hits.length());
assertEquals (1, hits.id(0));
hits = searcher.search (filteredquery, new Sort("sorter"));
assertEquals (1, hits.length());
assertEquals (1, hits.id(0));
filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "one")), filter);
hits = searcher.search (filteredquery);
assertEquals (2, hits.length());
filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "x")), filter);
hits = searcher.search (filteredquery);
assertEquals (1, hits.length());
assertEquals (3, hits.id(0));
filteredquery = new FilteredQuery (new TermQuery (new Term ("field", "y")), filter);
hits = searcher.search (filteredquery);
assertEquals (0, hits.length());
}
/**
* This tests FilteredQuery's rewrite correctness
*/
public void testRangeQuery() throws Exception {
RangeQuery rq = new RangeQuery(
new Term("sorter", "b"), new Term("sorter", "d"), true);
Query filteredquery = new FilteredQuery(rq, filter);
Hits hits = searcher.search(filteredquery);
assertEquals(2, hits.length());
}
}

View File

@ -1,137 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
/**
* Tests {@link FuzzyQuery}.
*
* @author Daniel Naber
*/
public class TestFuzzyQuery extends TestCase {
public void testDefaultFuzziness() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
addDoc("aaaaa", writer);
addDoc("aaaab", writer);
addDoc("aaabb", writer);
addDoc("aabbb", writer);
addDoc("abbbb", writer);
addDoc("bbbbb", writer);
addDoc("ddddd", writer);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
FuzzyQuery query = new FuzzyQuery(new Term("field", "aaaaa"));
Hits hits = searcher.search(query);
assertEquals(3, hits.length());
// not similar enough:
query = new FuzzyQuery(new Term("field", "xxxxx"));
hits = searcher.search(query);
assertEquals(0, hits.length());
query = new FuzzyQuery(new Term("field", "aaccc")); // edit distance to "aaaaa" = 3
hits = searcher.search(query);
assertEquals(0, hits.length());
// query identical to a word in the index:
query = new FuzzyQuery(new Term("field", "aaaaa"));
hits = searcher.search(query);
assertEquals(3, hits.length());
assertEquals(hits.doc(0).get("field"), ("aaaaa"));
// default allows for up to two edits:
assertEquals(hits.doc(1).get("field"), ("aaaab"));
assertEquals(hits.doc(2).get("field"), ("aaabb"));
// query similar to a word in the index:
query = new FuzzyQuery(new Term("field", "aaaac"));
hits = searcher.search(query);
assertEquals(3, hits.length());
assertEquals(hits.doc(0).get("field"), ("aaaaa"));
assertEquals(hits.doc(1).get("field"), ("aaaab"));
assertEquals(hits.doc(2).get("field"), ("aaabb"));
query = new FuzzyQuery(new Term("field", "ddddX"));
hits = searcher.search(query);
assertEquals(1, hits.length());
assertEquals(hits.doc(0).get("field"), ("ddddd"));
// different field = no match:
query = new FuzzyQuery(new Term("anotherfield", "ddddX"));
hits = searcher.search(query);
assertEquals(0, hits.length());
searcher.close();
directory.close();
}
public void testDefaultFuzzinessLong() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
addDoc("aaaaaaa", writer);
addDoc("segment", writer);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
FuzzyQuery query;
// not similar enough:
query = new FuzzyQuery(new Term("field", "xxxxx"));
Hits hits = searcher.search(query);
assertEquals(0, hits.length());
// edit distance to "aaaaaaa" = 3, this matches because the string is longer than
// in testDefaultFuzziness so a bigger difference is allowed:
query = new FuzzyQuery(new Term("field", "aaaaccc"));
hits = searcher.search(query);
assertEquals(1, hits.length());
assertEquals(hits.doc(0).get("field"), ("aaaaaaa"));
// no match, more than half of the characters is wrong:
query = new FuzzyQuery(new Term("field", "aaacccc"));
hits = searcher.search(query);
assertEquals(0, hits.length());
// "student" and "stellent" are indeed similar to "segment" by default:
query = new FuzzyQuery(new Term("field", "student"));
hits = searcher.search(query);
assertEquals(1, hits.length());
query = new FuzzyQuery(new Term("field", "stellent"));
hits = searcher.search(query);
assertEquals(1, hits.length());
searcher.close();
directory.close();
}
private void addDoc(String text, IndexWriter writer) throws IOException {
Document doc = new Document();
doc.add(Field.Text("field", text));
writer.addDocument(doc);
}
}

View File

@ -1,206 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import junit.framework.TestCase;
import java.io.IOException;
/**
* Tests {@link MultiSearcher} class.
*
* @version $Id$
*/
public class TestMultiSearcher extends TestCase
{
public TestMultiSearcher(String name)
{
super(name);
}
/**
* ReturnS a new instance of the concrete MultiSearcher class
* used in this test.
*/
protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers) throws IOException {
return new MultiSearcher(searchers);
}
public void testEmptyIndex()
throws Exception
{
// creating two directories for indices
Directory indexStoreA = new RAMDirectory();
Directory indexStoreB = new RAMDirectory();
// creating a document to store
Document lDoc = new Document();
lDoc.add(Field.Text("fulltext", "Once upon a time....."));
lDoc.add(Field.Keyword("id", "doc1"));
lDoc.add(Field.Keyword("handle", "1"));
// creating a document to store
Document lDoc2 = new Document();
lDoc2.add(Field.Text("fulltext", "in a galaxy far far away....."));
lDoc2.add(Field.Keyword("id", "doc2"));
lDoc2.add(Field.Keyword("handle", "1"));
// creating a document to store
Document lDoc3 = new Document();
lDoc3.add(Field.Text("fulltext", "a bizarre bug manifested itself...."));
lDoc3.add(Field.Keyword("id", "doc3"));
lDoc3.add(Field.Keyword("handle", "1"));
// creating an index writer for the first index
IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(), true);
// creating an index writer for the second index, but writing nothing
IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), true);
//--------------------------------------------------------------------
// scenario 1
//--------------------------------------------------------------------
// writing the documents to the first index
writerA.addDocument(lDoc);
writerA.addDocument(lDoc2);
writerA.addDocument(lDoc3);
writerA.optimize();
writerA.close();
// closing the second index
writerB.close();
// creating the query
Query query = QueryParser.parse("handle:1", "fulltext", new StandardAnalyzer());
// building the searchables
Searcher[] searchers = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers[0] = new IndexSearcher(indexStoreB);
searchers[1] = new IndexSearcher(indexStoreA);
// creating the multiSearcher
Searcher mSearcher = getMultiSearcherInstance(searchers);
// performing the search
Hits hits = mSearcher.search(query);
assertEquals(3, hits.length());
try {
// iterating over the hit documents
for (int i = 0; i < hits.length(); i++) {
Document d = hits.doc(i);
}
}
catch (ArrayIndexOutOfBoundsException e)
{
fail("ArrayIndexOutOfBoundsException thrown: " + e.getMessage());
e.printStackTrace();
} finally{
mSearcher.close();
}
//--------------------------------------------------------------------
// scenario 2
//--------------------------------------------------------------------
// adding one document to the empty index
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false);
writerB.addDocument(lDoc);
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers2 = new Searcher[2];
// VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index
searchers2[0] = new IndexSearcher(indexStoreB);
searchers2[1] = new IndexSearcher(indexStoreA);
// creating the mulitSearcher
Searcher mSearcher2 = getMultiSearcherInstance(searchers2);
// performing the same search
Hits hits2 = mSearcher2.search(query);
assertEquals(4, hits2.length());
try {
// iterating over the hit documents
for (int i = 0; i < hits2.length(); i++) {
// no exception should happen at this point
Document d = hits2.doc(i);
}
}
catch (Exception e)
{
fail("Exception thrown: " + e.getMessage());
e.printStackTrace();
} finally{
mSearcher2.close();
}
//--------------------------------------------------------------------
// scenario 3
//--------------------------------------------------------------------
// deleting the document just added, this will cause a different exception to take place
Term term = new Term("id", "doc1");
IndexReader readerB = IndexReader.open(indexStoreB);
readerB.delete(term);
readerB.close();
// optimizing the index with the writer
writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false);
writerB.optimize();
writerB.close();
// building the searchables
Searcher[] searchers3 = new Searcher[2];
searchers3[0] = new IndexSearcher(indexStoreB);
searchers3[1] = new IndexSearcher(indexStoreA);
// creating the mulitSearcher
Searcher mSearcher3 = getMultiSearcherInstance(searchers3);
// performing the same search
Hits hits3 = mSearcher3.search(query);
assertEquals(3, hits3.length());
try {
// iterating over the hit documents
for (int i = 0; i < hits3.length(); i++) {
Document d = hits3.doc(i);
}
}
catch (IOException e)
{
fail("IOException thrown: " + e.getMessage());
e.printStackTrace();
} finally{
mSearcher3.close();
}
}
}

View File

@ -1,58 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import java.util.Vector;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/** Similarity unit test.
*
* @author Doug Cutting
* @version $Revision$
*/
public class TestNot extends TestCase {
public TestNot(String name) {
super(name);
}
public void testNot() throws Exception {
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
Document d1 = new Document();
d1.add(Field.Text("field", "a b"));
writer.addDocument(d1);
writer.optimize();
writer.close();
Searcher searcher = new IndexSearcher(store);
Query query = QueryParser.parse("a NOT b", "field", new SimpleAnalyzer());
//System.out.println(query);
Hits hits = searcher.search(query);
assertEquals(0, hits.length());
}
}

View File

@ -1,35 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
/**
* Unit tests for the ParallelMultiSearcher
*/
public class TestParallelMultiSearcher extends TestMultiSearcher {
public TestParallelMultiSearcher(String name) {
super(name);
}
protected MultiSearcher getMultiSearcherInstance(Searcher[] searchers)
throws IOException {
return new ParallelMultiSearcher(searchers);
}
}

View File

@ -1,104 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermEnum;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.LinkedList;
/**
* This class tests PhrasePrefixQuery class.
*
* @author Otis Gospodnetic
* @version $Id$
*/
public class TestPhrasePrefixQuery
extends TestCase
{
public TestPhrasePrefixQuery(String name)
{
super(name);
}
/**
*
*/
public void testPhrasePrefix()
throws IOException
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
Document doc1 = new Document();
Document doc2 = new Document();
Document doc3 = new Document();
Document doc4 = new Document();
Document doc5 = new Document();
doc1.add(Field.Text("body", "blueberry pie"));
doc2.add(Field.Text("body", "blueberry strudel"));
doc3.add(Field.Text("body", "blueberry pizza"));
doc4.add(Field.Text("body", "blueberry chewing gum"));
doc5.add(Field.Text("body", "piccadilly circus"));
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.addDocument(doc4);
writer.addDocument(doc5);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(indexStore);
PhrasePrefixQuery query1 = new PhrasePrefixQuery();
PhrasePrefixQuery query2 = new PhrasePrefixQuery();
query1.add(new Term("body", "blueberry"));
query2.add(new Term("body", "strawberry"));
LinkedList termsWithPrefix = new LinkedList();
IndexReader ir = IndexReader.open(indexStore);
// this TermEnum gives "piccadilly", "pie" and "pizza".
String prefix = "pi";
TermEnum te = ir.terms(new Term("body", prefix + "*"));
do {
if (te.term().text().startsWith(prefix))
{
termsWithPrefix.add(te.term());
}
} while (te.next());
query1.add((Term[])termsWithPrefix.toArray(new Term[0]));
query2.add((Term[])termsWithPrefix.toArray(new Term[0]));
Hits result;
result = searcher.search(query1);
assertEquals(2, result.length());
result = searcher.search(query2);
assertEquals(0, result.length());
}
}

View File

@ -1,257 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.analysis.StopAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
/**
* Tests {@link PhraseQuery}.
*
* @see TestPositionIncrement
* @author Erik Hatcher
*/
public class TestPhraseQuery extends TestCase {
private IndexSearcher searcher;
private PhraseQuery query;
private RAMDirectory directory;
public void setUp() throws Exception {
directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.add(Field.Text("field", "one two three four five"));
writer.addDocument(doc);
writer.optimize();
writer.close();
searcher = new IndexSearcher(directory);
query = new PhraseQuery();
}
public void tearDown() throws Exception {
searcher.close();
directory.close();
}
public void testNotCloseEnough() throws Exception {
query.setSlop(2);
query.add(new Term("field", "one"));
query.add(new Term("field", "five"));
Hits hits = searcher.search(query);
assertEquals(0, hits.length());
}
public void testBarelyCloseEnough() throws Exception {
query.setSlop(3);
query.add(new Term("field", "one"));
query.add(new Term("field", "five"));
Hits hits = searcher.search(query);
assertEquals(1, hits.length());
}
/**
* Ensures slop of 0 works for exact matches, but not reversed
*/
public void testExact() throws Exception {
// slop is zero by default
query.add(new Term("field", "four"));
query.add(new Term("field", "five"));
Hits hits = searcher.search(query);
assertEquals("exact match", 1, hits.length());
query = new PhraseQuery();
query.add(new Term("field", "two"));
query.add(new Term("field", "one"));
hits = searcher.search(query);
assertEquals("reverse not exact", 0, hits.length());
}
public void testSlop1() throws Exception {
// Ensures slop of 1 works with terms in order.
query.setSlop(1);
query.add(new Term("field", "one"));
query.add(new Term("field", "two"));
Hits hits = searcher.search(query);
assertEquals("in order", 1, hits.length());
// Ensures slop of 1 does not work for phrases out of order;
// must be at least 2.
query = new PhraseQuery();
query.setSlop(1);
query.add(new Term("field", "two"));
query.add(new Term("field", "one"));
hits = searcher.search(query);
assertEquals("reversed, slop not 2 or more", 0, hits.length());
}
/**
* As long as slop is at least 2, terms can be reversed
*/
public void testOrderDoesntMatter() throws Exception {
query.setSlop(2); // must be at least two for reverse order match
query.add(new Term("field", "two"));
query.add(new Term("field", "one"));
Hits hits = searcher.search(query);
assertEquals("just sloppy enough", 1, hits.length());
query = new PhraseQuery();
query.setSlop(2);
query.add(new Term("field", "three"));
query.add(new Term("field", "one"));
hits = searcher.search(query);
assertEquals("not sloppy enough", 0, hits.length());
}
/**
* slop is the total number of positional moves allowed
* to line up a phrase
*/
public void testMulipleTerms() throws Exception {
query.setSlop(2);
query.add(new Term("field", "one"));
query.add(new Term("field", "three"));
query.add(new Term("field", "five"));
Hits hits = searcher.search(query);
assertEquals("two total moves", 1, hits.length());
query = new PhraseQuery();
query.setSlop(5); // it takes six moves to match this phrase
query.add(new Term("field", "five"));
query.add(new Term("field", "three"));
query.add(new Term("field", "one"));
hits = searcher.search(query);
assertEquals("slop of 5 not close enough", 0, hits.length());
query.setSlop(6);
hits = searcher.search(query);
assertEquals("slop of 6 just right", 1, hits.length());
}
public void testPhraseQueryWithStopAnalyzer() throws Exception {
RAMDirectory directory = new RAMDirectory();
StopAnalyzer stopAnalyzer = new StopAnalyzer();
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true);
Document doc = new Document();
doc.add(Field.Text("field", "the stop words are here"));
writer.addDocument(doc);
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
// valid exact phrase query
PhraseQuery query = new PhraseQuery();
query.add(new Term("field","stop"));
query.add(new Term("field","words"));
Hits hits = searcher.search(query);
assertEquals(1, hits.length());
// currently StopAnalyzer does not leave "holes", so this matches.
query = new PhraseQuery();
query.add(new Term("field", "words"));
query.add(new Term("field", "here"));
hits = searcher.search(query);
assertEquals(1, hits.length());
searcher.close();
}
public void testPhraseQueryInConjunctionScorer() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
Document doc = new Document();
doc.add(new Field("source", "marketing info", true, true, true));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("contents", "foobar", true, true, true));
doc.add(new Field("source", "marketing info", true, true, true));
writer.addDocument(doc);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(directory);
PhraseQuery phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("source", "marketing"));
phraseQuery.add(new Term("source", "info"));
Hits hits = searcher.search(phraseQuery);
assertEquals(2, hits.length());
TermQuery termQuery = new TermQuery(new Term("contents","foobar"));
BooleanQuery booleanQuery = new BooleanQuery();
booleanQuery.add(termQuery, true, false);
booleanQuery.add(phraseQuery, true, false);
hits = searcher.search(booleanQuery);
assertEquals(1, hits.length());
searcher.close();
writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
doc = new Document();
doc.add(new Field("contents", "map entry woo", true, true, true));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("contents", "woo map entry", true, true, true));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("contents", "map foobarword entry woo", true, true, true));
writer.addDocument(doc);
writer.optimize();
writer.close();
searcher = new IndexSearcher(directory);
termQuery = new TermQuery(new Term("contents","woo"));
phraseQuery = new PhraseQuery();
phraseQuery.add(new Term("contents","map"));
phraseQuery.add(new Term("contents","entry"));
hits = searcher.search(termQuery);
assertEquals(3, hits.length());
hits = searcher.search(phraseQuery);
assertEquals(2, hits.length());
booleanQuery = new BooleanQuery();
booleanQuery.add(termQuery, true, false);
booleanQuery.add(phraseQuery, true, false);
hits = searcher.search(booleanQuery);
assertEquals(2, hits.length());
booleanQuery = new BooleanQuery();
booleanQuery.add(phraseQuery, true, false);
booleanQuery.add(termQuery, true, false);
hits = searcher.search(booleanQuery);
assertEquals(2, hits.length());
searcher.close();
directory.close();
}
}

View File

@ -1,135 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.io.Reader;
import java.io.IOException;
import java.io.StringReader;
import junit.framework.TestCase;
/**
* Term position unit test.
*
* @author Doug Cutting
* @version $Revision$
*/
public class TestPositionIncrement extends TestCase {
public void testSetPosition() throws Exception {
Analyzer analyzer = new Analyzer() {
public TokenStream tokenStream(String fieldName, Reader reader) {
return new TokenStream() {
private final String[] TOKENS = {"1", "2", "3", "4", "5"};
private final int[] INCREMENTS = {1, 2, 1, 0, 1};
private int i = 0;
public Token next() throws IOException {
if (i == TOKENS.length)
return null;
Token t = new Token(TOKENS[i], i, i);
t.setPositionIncrement(INCREMENTS[i]);
i++;
return t;
}
};
}
};
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, analyzer, true);
Document d = new Document();
d.add(Field.Text("field", "bogus"));
writer.addDocument(d);
writer.optimize();
writer.close();
IndexSearcher searcher = new IndexSearcher(store);
PhraseQuery q;
Hits hits;
q = new PhraseQuery();
q.add(new Term("field", "1"));
q.add(new Term("field", "2"));
hits = searcher.search(q);
assertEquals(0, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "2"));
q.add(new Term("field", "3"));
hits = searcher.search(q);
assertEquals(1, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "3"));
q.add(new Term("field", "4"));
hits = searcher.search(q);
assertEquals(0, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "2"));
q.add(new Term("field", "4"));
hits = searcher.search(q);
assertEquals(1, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "3"));
q.add(new Term("field", "5"));
hits = searcher.search(q);
assertEquals(1, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "4"));
q.add(new Term("field", "5"));
hits = searcher.search(q);
assertEquals(1, hits.length());
q = new PhraseQuery();
q.add(new Term("field", "2"));
q.add(new Term("field", "5"));
hits = searcher.search(q);
assertEquals(0, hits.length());
}
/**
* Basic analyzer behavior should be to keep sequential terms in one
* increment from one another.
*/
public void testIncrementingPositions() throws Exception {
Analyzer analyzer = new WhitespaceAnalyzer();
TokenStream ts = analyzer.tokenStream("field",
new StringReader("one two three four five"));
while (true) {
Token token = ts.next();
if (token == null) break;
assertEquals(token.termText(), 1, token.getPositionIncrement());
}
}
}

View File

@ -1,56 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/**
* Tests {@link PrefixQuery} class.
*
* @author Erik Hatcher
*/
public class TestPrefixQuery extends TestCase {
public void testPrefixQuery() throws Exception {
RAMDirectory directory = new RAMDirectory();
String[] categories = new String[] {"/Computers",
"/Computers/Mac",
"/Computers/Windows"};
IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true);
for (int i = 0; i < categories.length; i++) {
Document doc = new Document();
doc.add(Field.Keyword("category", categories[i]));
writer.addDocument(doc);
}
writer.close();
PrefixQuery query = new PrefixQuery(new Term("category", "/Computers"));
IndexSearcher searcher = new IndexSearcher(directory);
Hits hits = searcher.search(query);
assertEquals("All documents in /Computers category and below", 3, hits.length());
query = new PrefixQuery(new Term("category", "/Computers/Mac"));
hits = searcher.search(query);
assertEquals("One in /Computers/Mac", 1, hits.length());
}
}

View File

@ -1,66 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
public class TestQueryTermVector extends TestCase {
public TestQueryTermVector(String s) {
super(s);
}
protected void setUp() {
}
protected void tearDown() {
}
public void testConstructor() {
String [] queryTerm = {"foo", "bar", "foo", "again", "foo", "bar", "go", "go", "go"};
//Items are sorted lexicographically
String [] gold = {"again", "bar", "foo", "go"};
int [] goldFreqs = {1, 2, 3, 3};
QueryTermVector result = new QueryTermVector(queryTerm);
assertTrue(result != null);
String [] terms = result.getTerms();
assertTrue(terms.length == 4);
int [] freq = result.getTermFrequencies();
assertTrue(freq.length == 4);
checkGold(terms, gold, freq, goldFreqs);
result = new QueryTermVector(null);
assertTrue(result.getTerms().length == 0);
result = new QueryTermVector("foo bar foo again foo bar go go go", new WhitespaceAnalyzer());
assertTrue(result != null);
terms = result.getTerms();
assertTrue(terms.length == 4);
freq = result.getTermFrequencies();
assertTrue(freq.length == 4);
checkGold(terms, gold, freq, goldFreqs);
}
private void checkGold(String[] terms, String[] gold, int[] freq, int[] goldFreqs) {
for (int i = 0; i < terms.length; i++) {
assertTrue(terms[i].equals(gold[i]));
assertTrue(freq[i] == goldFreqs[i]);
}
}
}

View File

@ -1,96 +0,0 @@
package org.apache.lucene.search;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import junit.framework.TestCase;
import java.io.IOException;
/**
* @author goller
*/
public class TestRangeQuery extends TestCase {
private int docCount = 0;
private RAMDirectory dir;
public void setUp() {
dir = new RAMDirectory();
}
public void testExclusive() throws Exception {
Query query = new RangeQuery(new Term("content", "A"),
new Term("content", "C"),
false);
initializeIndex(new String[] {"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(query);
assertEquals("A,B,C,D, only B in range", 1, hits.length());
searcher.close();
initializeIndex(new String[] {"A", "B", "D"});
searcher = new IndexSearcher(dir);
hits = searcher.search(query);
assertEquals("A,B,D, only B in range", 1, hits.length());
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
hits = searcher.search(query);
assertEquals("C added, still only B in range", 1, hits.length());
searcher.close();
}
public void testInclusive() throws Exception {
Query query = new RangeQuery(new Term("content", "A"),
new Term("content", "C"),
true);
initializeIndex(new String[]{"A", "B", "C", "D"});
IndexSearcher searcher = new IndexSearcher(dir);
Hits hits = searcher.search(query);
assertEquals("A,B,C,D - A,B,C in range", 3, hits.length());
searcher.close();
initializeIndex(new String[]{"A", "B", "D"});
searcher = new IndexSearcher(dir);
hits = searcher.search(query);
assertEquals("A,B,D - A and B in range", 2, hits.length());
searcher.close();
addDoc("C");
searcher = new IndexSearcher(dir);
hits = searcher.search(query);
assertEquals("C added - A, B, C in range", 3, hits.length());
searcher.close();
}
private void initializeIndex(String[] values) throws IOException {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);
for (int i = 0; i < values.length; i++) {
insertDoc(writer, values[i]);
}
writer.close();
}
private void addDoc(String content) throws IOException {
IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
insertDoc(writer, content);
writer.close();
}
private void insertDoc(IndexWriter writer, String content) throws IOException {
Document doc = new Document();
doc.add(Field.Keyword("id", "id" + docCount));
doc.add(Field.UnStored("content", content));
writer.addDocument(doc);
docCount++;
}
}

View File

@ -1,109 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import java.rmi.Naming;
import java.rmi.registry.LocateRegistry;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/**
* @version $Id$
*/
public class TestRemoteSearchable extends TestCase {
public TestRemoteSearchable(String name) {
super(name);
}
private static Searchable getRemote() throws Exception {
try {
return lookupRemote();
} catch (Throwable e) {
startServer();
return lookupRemote();
}
}
private static Searchable lookupRemote() throws Exception {
return (Searchable)Naming.lookup("//localhost/Searchable");
}
private static void startServer() throws Exception {
// construct an index
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore,new SimpleAnalyzer(),true);
Document doc = new Document();
doc.add(Field.Text("test", "test text"));
writer.addDocument(doc);
writer.optimize();
writer.close();
// publish it
LocateRegistry.createRegistry(1099);
Searchable local = new IndexSearcher(indexStore);
RemoteSearchable impl = new RemoteSearchable(local);
Naming.rebind("//localhost/Searchable", impl);
}
private static void search(Query query) throws Exception {
// try to search the published index
Searchable[] searchables = { getRemote() };
Searcher searcher = new MultiSearcher(searchables);
Hits result = searcher.search(query);
assertEquals(1, result.length());
assertEquals("test text", result.doc(0).get("test"));
}
public void testTermQuery() throws Exception {
search(new TermQuery(new Term("test", "test")));
}
public void testBooleanQuery() throws Exception {
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term("test", "test")), true, false);
search(query);
}
public void testPhraseQuery() throws Exception {
PhraseQuery query = new PhraseQuery();
query.add(new Term("test", "test"));
query.add(new Term("test", "text"));
search(query);
}
// Tests bug fix at http://nagoya.apache.org/bugzilla/show_bug.cgi?id=20290
public void testQueryFilter() throws Exception {
// try to search the published index
Searchable[] searchables = { getRemote() };
Searcher searcher = new MultiSearcher(searchables);
Hits hits = searcher.search(
new TermQuery(new Term("test", "text")),
new QueryFilter(new TermQuery(new Term("test", "test"))));
Hits nohits = searcher.search(
new TermQuery(new Term("test", "text")),
new QueryFilter(new TermQuery(new Term("test", "non-existent-term"))));
assertEquals(0, nohits.length());
}
}

View File

@ -1,80 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/** Document boost unit test.
*
* @author Doug Cutting
* @version $Revision$
*/
public class TestSetNorm extends TestCase {
public TestSetNorm(String name) {
super(name);
}
public void testSetNorm() throws Exception {
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
// add the same document four times
Field f1 = Field.Text("field", "word");
Document d1 = new Document();
d1.add(f1);
writer.addDocument(d1);
writer.addDocument(d1);
writer.addDocument(d1);
writer.addDocument(d1);
writer.close();
// reset the boost of each instance of this document
IndexReader reader = IndexReader.open(store);
reader.setNorm(0, "field", 1.0f);
reader.setNorm(1, "field", 2.0f);
reader.setNorm(2, "field", 4.0f);
reader.setNorm(3, "field", 16.0f);
reader.close();
// check that searches are ordered by this boost
final float[] scores = new float[4];
new IndexSearcher(store).search
(new TermQuery(new Term("field", "word")),
new HitCollector() {
public final void collect(int doc, float score) {
scores[doc] = score;
}
});
float lastScore = 0.0f;
for (int i = 0; i < 4; i++) {
assertTrue(scores[i] > lastScore);
lastScore = scores[i];
}
}
}

View File

@ -1,121 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import java.util.Collection;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
/** Similarity unit test.
*
* @author Doug Cutting
* @version $Revision$
*/
public class TestSimilarity extends TestCase {
public TestSimilarity(String name) {
super(name);
}
public static class SimpleSimilarity extends Similarity {
public float lengthNorm(String field, int numTerms) { return 1.0f; }
public float queryNorm(float sumOfSquaredWeights) { return 1.0f; }
public float tf(float freq) { return freq; }
public float sloppyFreq(int distance) { return 2.0f; }
public float idf(Collection terms, Searcher searcher) { return 1.0f; }
public float idf(int docFreq, int numDocs) { return 1.0f; }
public float coord(int overlap, int maxOverlap) { return 1.0f; }
}
public void testSimilarity() throws Exception {
RAMDirectory store = new RAMDirectory();
IndexWriter writer = new IndexWriter(store, new SimpleAnalyzer(), true);
writer.setSimilarity(new SimpleSimilarity());
Document d1 = new Document();
d1.add(Field.Text("field", "a c"));
Document d2 = new Document();
d2.add(Field.Text("field", "a b c"));
writer.addDocument(d1);
writer.addDocument(d2);
writer.optimize();
writer.close();
final float[] scores = new float[4];
Searcher searcher = new IndexSearcher(store);
searcher.setSimilarity(new SimpleSimilarity());
Term a = new Term("field", "a");
Term b = new Term("field", "b");
Term c = new Term("field", "c");
searcher.search
(new TermQuery(b),
new HitCollector() {
public final void collect(int doc, float score) {
assertTrue(score == 1.0f);
}
});
BooleanQuery bq = new BooleanQuery();
bq.add(new TermQuery(a), false, false);
bq.add(new TermQuery(b), false, false);
//System.out.println(bq.toString("field"));
searcher.search
(bq,
new HitCollector() {
public final void collect(int doc, float score) {
//System.out.println("Doc=" + doc + " score=" + score);
assertTrue(score == (float)doc+1);
}
});
PhraseQuery pq = new PhraseQuery();
pq.add(a);
pq.add(c);
//System.out.println(pq.toString("field"));
searcher.search
(pq,
new HitCollector() {
public final void collect(int doc, float score) {
//System.out.println("Doc=" + doc + " score=" + score);
assertTrue(score == 1.0f);
}
});
pq.setSlop(2);
//System.out.println(pq.toString("field"));
searcher.search
(pq,
new HitCollector() {
public final void collect(int doc, float score) {
//System.out.println("Doc=" + doc + " score=" + score);
assertTrue(score == 2.0f);
}
});
}
}

View File

@ -1,588 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.*;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import java.rmi.Naming;
import java.rmi.registry.LocateRegistry;
import java.rmi.registry.Registry;
import java.io.IOException;
import java.io.Serializable;
import java.util.regex.Pattern;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Locale;
import junit.framework.TestCase;
import junit.framework.Test;
import junit.framework.TestSuite;
import junit.textui.TestRunner;
/**
* Unit tests for sorting code.
*
* <p>Created: Feb 17, 2004 4:55:10 PM
*
* @author Tim Jones (Nacimiento Software)
* @since lucene 1.4
* @version $Id$
*/
public class TestSort
extends TestCase
implements Serializable {
private Searcher full;
private Searcher searchX;
private Searcher searchY;
private Query queryX;
private Query queryY;
private Query queryA;
private Query queryF;
private Sort sort;
public TestSort (String name) {
super (name);
}
public static void main (String[] argv) {
if (argv == null || argv.length < 1)
TestRunner.run (suite());
else if ("server".equals (argv[0])) {
TestSort test = new TestSort (null);
try {
test.startServer();
Thread.sleep (500000);
} catch (Exception e) {
System.out.println (e);
e.printStackTrace();
}
}
}
public static Test suite() {
return new TestSuite (TestSort.class);
}
// document data:
// the tracer field is used to determine which document was hit
// the contents field is used to search and sort by relevance
// the int field to sort by int
// the float field to sort by float
// the string field to sort by string
private String[][] data = new String[][] {
// tracer contents int float string custom
{ "A", "x a", "5", "4f", "c", "A-3" },
{ "B", "y a", "5", "3.4028235E38", "i", "B-10" },
{ "C", "x a b c", "2147483647", "1.0", "j", "A-2" },
{ "D", "y a b c", "-1", "0.0f", "a", "C-0" },
{ "E", "x a b c d", "5", "2f", "h", "B-8" },
{ "F", "y a b c d", "2", "3.14159f", "g", "B-1" },
{ "G", "x a b c d", "3", "-1.0", "f", "C-100" },
{ "H", "y a b c d", "0", "1.4E-45", "e", "C-88" },
{ "I", "x a b c d e f", "-2147483648", "1.0e+0", "d", "A-10" },
{ "J", "y a b c d e f", "4", ".5", "b", "C-7" },
{ "Z", "f", null, null, null, null }
};
// create an index of all the documents, or just the x, or just the y documents
private Searcher getIndex (boolean even, boolean odd)
throws IOException {
RAMDirectory indexStore = new RAMDirectory ();
IndexWriter writer = new IndexWriter (indexStore, new SimpleAnalyzer(), true);
for (int i=0; i<data.length; ++i) {
if (((i%2)==0 && even) || ((i%2)==1 && odd)) {
Document doc = new Document(); // store, index, token
doc.add (new Field ("tracer", data[i][0], true, false, false));
doc.add (new Field ("contents", data[i][1], false, true, true));
if (data[i][2] != null) doc.add (new Field ("int", data[i][2], false, true, false));
if (data[i][3] != null) doc.add (new Field ("float", data[i][3], false, true, false));
if (data[i][4] != null) doc.add (new Field ("string", data[i][4], false, true, false));
if (data[i][5] != null) doc.add (new Field ("custom", data[i][5], false, true, false));
writer.addDocument (doc);
}
}
writer.optimize ();
writer.close ();
return new IndexSearcher (indexStore);
}
private Searcher getFullIndex()
throws IOException {
return getIndex (true, true);
}
private Searcher getXIndex()
throws IOException {
return getIndex (true, false);
}
private Searcher getYIndex()
throws IOException {
return getIndex (false, true);
}
private Searcher getEmptyIndex()
throws IOException {
return getIndex (false, false);
}
public void setUp() throws Exception {
full = getFullIndex();
searchX = getXIndex();
searchY = getYIndex();
queryX = new TermQuery (new Term ("contents", "x"));
queryY = new TermQuery (new Term ("contents", "y"));
queryA = new TermQuery (new Term ("contents", "a"));
queryF = new TermQuery (new Term ("contents", "f"));
sort = new Sort();
}
// test the sorts by score and document number
public void testBuiltInSorts() throws Exception {
sort = new Sort();
assertMatches (full, queryX, sort, "ACEGI");
assertMatches (full, queryY, sort, "BDFHJ");
sort.setSort(SortField.FIELD_DOC);
assertMatches (full, queryX, sort, "ACEGI");
assertMatches (full, queryY, sort, "BDFHJ");
}
// test sorts where the type of field is specified
public void testTypedSort() throws Exception {
sort.setSort (new SortField[] { new SortField ("int", SortField.INT), SortField.FIELD_DOC });
assertMatches (full, queryX, sort, "IGAEC");
assertMatches (full, queryY, sort, "DHFJB");
sort.setSort (new SortField[] { new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC });
assertMatches (full, queryX, sort, "GCIEA");
assertMatches (full, queryY, sort, "DHJFB");
sort.setSort (new SortField[] { new SortField ("string", SortField.STRING), SortField.FIELD_DOC });
assertMatches (full, queryX, sort, "AIGEC");
assertMatches (full, queryY, sort, "DJHFB");
}
// test sorts when there's nothing in the index
public void testEmptyIndex() throws Exception {
Searcher empty = getEmptyIndex();
sort = new Sort();
assertMatches (empty, queryX, sort, "");
sort.setSort(SortField.FIELD_DOC);
assertMatches (empty, queryX, sort, "");
sort.setSort (new SortField[] { new SortField ("int", SortField.INT), SortField.FIELD_DOC });
assertMatches (empty, queryX, sort, "");
sort.setSort (new SortField[] { new SortField ("string", SortField.STRING, true), SortField.FIELD_DOC });
assertMatches (empty, queryX, sort, "");
sort.setSort (new SortField[] { new SortField ("float", SortField.FLOAT), new SortField ("string", SortField.STRING) });
assertMatches (empty, queryX, sort, "");
}
// test sorts where the type of field is determined dynamically
public void testAutoSort() throws Exception {
sort.setSort("int");
assertMatches (full, queryX, sort, "IGAEC");
assertMatches (full, queryY, sort, "DHFJB");
sort.setSort("float");
assertMatches (full, queryX, sort, "GCIEA");
assertMatches (full, queryY, sort, "DHJFB");
sort.setSort("string");
assertMatches (full, queryX, sort, "AIGEC");
assertMatches (full, queryY, sort, "DJHFB");
}
// test sorts in reverse
public void testReverseSort() throws Exception {
sort.setSort (new SortField[] { new SortField (null, SortField.SCORE, true), SortField.FIELD_DOC });
assertMatches (full, queryX, sort, "IEGCA");
assertMatches (full, queryY, sort, "JFHDB");
sort.setSort (new SortField (null, SortField.DOC, true));
assertMatches (full, queryX, sort, "IGECA");
assertMatches (full, queryY, sort, "JHFDB");
sort.setSort ("int", true);
assertMatches (full, queryX, sort, "CAEGI");
assertMatches (full, queryY, sort, "BJFHD");
sort.setSort ("float", true);
assertMatches (full, queryX, sort, "AECIG");
assertMatches (full, queryY, sort, "BFJHD");
sort.setSort ("string", true);
assertMatches (full, queryX, sort, "CEGIA");
assertMatches (full, queryY, sort, "BFHJD");
}
// test sorting when the sort field is empty (undefined) for some of the documents
public void testEmptyFieldSort() throws Exception {
sort.setSort ("string");
assertMatches (full, queryF, sort, "ZJI");
sort.setSort ("string", true);
assertMatches (full, queryF, sort, "IJZ");
sort.setSort ("int");
assertMatches (full, queryF, sort, "IZJ");
sort.setSort ("int", true);
assertMatches (full, queryF, sort, "JZI");
sort.setSort ("float");
assertMatches (full, queryF, sort, "ZJI");
sort.setSort ("float", true);
assertMatches (full, queryF, sort, "IJZ");
}
// test sorts using a series of fields
public void testSortCombos() throws Exception {
sort.setSort (new String[] {"int","float"});
assertMatches (full, queryX, sort, "IGEAC");
sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) });
assertMatches (full, queryX, sort, "CEAGI");
sort.setSort (new String[] {"float","string"});
assertMatches (full, queryX, sort, "GICEA");
}
// test using a Locale for sorting strings
public void testLocaleSort() throws Exception {
sort.setSort (new SortField[] { new SortField ("string", Locale.US) });
assertMatches (full, queryX, sort, "AIGEC");
assertMatches (full, queryY, sort, "DJHFB");
sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) });
assertMatches (full, queryX, sort, "CEGIA");
assertMatches (full, queryY, sort, "BFHJD");
}
// test a custom sort function
public void testCustomSorts() throws Exception {
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource()));
assertMatches (full, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true));
assertMatches (full, queryY, sort, "HJDBF");
SortComparator custom = SampleComparable.getComparator();
sort.setSort (new SortField ("custom", custom));
assertMatches (full, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", custom, true));
assertMatches (full, queryY, sort, "HJDBF");
}
// test a variety of sorts using more than one searcher
public void testMultiSort() throws Exception {
MultiSearcher searcher = new MultiSearcher (new Searchable[] { searchX, searchY });
runMultiSorts (searcher);
}
// test a variety of sorts using a parallel multisearcher
public void testParallelMultiSort() throws Exception {
Searcher searcher = new ParallelMultiSearcher (new Searchable[] { searchX, searchY });
runMultiSorts (searcher);
}
// test a variety of sorts using a remote searcher
public void testRemoteSort() throws Exception {
Searchable searcher = getRemote();
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
runMultiSorts (multi);
}
// test custom search when remote
public void testRemoteCustomSort() throws Exception {
Searchable searcher = getRemote();
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource()));
assertMatches (multi, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", SampleComparable.getComparatorSource(), true));
assertMatches (multi, queryY, sort, "HJDBF");
SortComparator custom = SampleComparable.getComparator();
sort.setSort (new SortField ("custom", custom));
assertMatches (multi, queryX, sort, "CAIEG");
sort.setSort (new SortField ("custom", custom, true));
assertMatches (multi, queryY, sort, "HJDBF");
}
// test that the relevancy scores are the same even if
// hits are sorted
public void testNormalizedScores() throws Exception {
// capture relevancy scores
HashMap scoresX = getScores (full.search (queryX));
HashMap scoresY = getScores (full.search (queryY));
HashMap scoresA = getScores (full.search (queryA));
// we'll test searching locally, remote and multi
// note: the multi test depends on each separate index containing
// the same documents as our local index, so the computed normalization
// will be the same. so we make a multi searcher over two equal document
// sets - not realistic, but necessary for testing.
MultiSearcher remote = new MultiSearcher (new Searchable[] { getRemote() });
MultiSearcher multi = new MultiSearcher (new Searchable[] { full, full });
// change sorting and make sure relevancy stays the same
sort = new Sort();
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort(SortField.FIELD_DOC);
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort ("int");
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort ("float");
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort ("string");
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort (new String[] {"int","float"});
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort (new SortField[] { new SortField ("int", true), new SortField (null, SortField.DOC, true) });
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
sort.setSort (new String[] {"float","string"});
assertSameValues (scoresX, getScores(full.search(queryX,sort)));
assertSameValues (scoresX, getScores(remote.search(queryX,sort)));
assertSameValues (scoresX, getScores(multi.search(queryX,sort)));
assertSameValues (scoresY, getScores(full.search(queryY,sort)));
assertSameValues (scoresY, getScores(remote.search(queryY,sort)));
assertSameValues (scoresY, getScores(multi.search(queryY,sort)));
assertSameValues (scoresA, getScores(full.search(queryA,sort)));
assertSameValues (scoresA, getScores(remote.search(queryA,sort)));
//assertSameValues (scoresA, getScores(multi.search(queryA,sort)));
}
// runs a variety of sorts useful for multisearchers
private void runMultiSorts (Searcher multi) throws Exception {
sort.setSort (SortField.FIELD_DOC);
assertMatchesPattern (multi, queryA, sort, "[AB]{2}[CD]{2}[EF]{2}[GH]{2}[IJ]{2}");
sort.setSort (new SortField ("int", SortField.INT));
assertMatchesPattern (multi, queryA, sort, "IDHFGJ[ABE]{3}C");
sort.setSort (new SortField[] {new SortField ("int", SortField.INT), SortField.FIELD_DOC});
assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC");
sort.setSort ("int");
assertMatchesPattern (multi, queryA, sort, "IDHFGJ[AB]{2}EC");
sort.setSort (new SortField[] {new SortField ("float", SortField.FLOAT), SortField.FIELD_DOC});
assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB");
sort.setSort ("float");
assertMatchesPattern (multi, queryA, sort, "GDHJ[CI]{2}EFAB");
sort.setSort ("string");
assertMatches (multi, queryA, sort, "DJAIHGFEBC");
sort.setSort ("int", true);
assertMatchesPattern (multi, queryA, sort, "C[AB]{2}EJGFHDI");
sort.setSort ("float", true);
assertMatchesPattern (multi, queryA, sort, "BAFE[IC]{2}JHDG");
sort.setSort ("string", true);
assertMatches (multi, queryA, sort, "CBEFGHIAJD");
sort.setSort (new SortField[] { new SortField ("string", Locale.US) });
assertMatches (multi, queryA, sort, "DJAIHGFEBC");
sort.setSort (new SortField[] { new SortField ("string", Locale.US, true) });
assertMatches (multi, queryA, sort, "CBEFGHIAJD");
sort.setSort (new String[] {"int","float"});
assertMatches (multi, queryA, sort, "IDHFGJEABC");
sort.setSort (new String[] {"float","string"});
assertMatches (multi, queryA, sort, "GDHJICEFAB");
sort.setSort ("int");
assertMatches (multi, queryF, sort, "IZJ");
sort.setSort ("int", true);
assertMatches (multi, queryF, sort, "JZI");
sort.setSort ("float");
assertMatches (multi, queryF, sort, "ZJI");
sort.setSort ("string");
assertMatches (multi, queryF, sort, "ZJI");
sort.setSort ("string", true);
assertMatches (multi, queryF, sort, "IJZ");
}
// make sure the documents returned by the search match the expected list
private void assertMatches (Searcher searcher, Query query, Sort sort, String expectedResult)
throws IOException {
Hits result = searcher.search (query, sort);
StringBuffer buff = new StringBuffer(10);
int n = result.length();
for (int i=0; i<n; ++i) {
Document doc = result.doc(i);
String[] v = doc.getValues("tracer");
for (int j=0; j<v.length; ++j) {
buff.append (v[j]);
}
}
assertEquals (expectedResult, buff.toString());
}
// make sure the documents returned by the search match the expected list pattern
private void assertMatchesPattern (Searcher searcher, Query query, Sort sort, String pattern)
throws IOException {
Hits result = searcher.search (query, sort);
StringBuffer buff = new StringBuffer(10);
int n = result.length();
for (int i=0; i<n; ++i) {
Document doc = result.doc(i);
String[] v = doc.getValues("tracer");
for (int j=0; j<v.length; ++j) {
buff.append (v[j]);
}
}
// System.out.println ("matching \""+buff+"\" against pattern \""+pattern+"\"");
assertTrue (Pattern.compile(pattern).matcher(buff.toString()).matches());
}
private HashMap getScores (Hits hits)
throws IOException {
HashMap scoreMap = new HashMap();
int n = hits.length();
for (int i=0; i<n; ++i) {
Document doc = hits.doc(i);
String[] v = doc.getValues("tracer");
assertEquals (v.length, 1);
scoreMap.put (v[0], new Float(hits.score(i)));
}
return scoreMap;
}
// make sure all the values in the maps match
private void assertSameValues (HashMap m1, HashMap m2) {
int n = m1.size();
int m = m2.size();
assertEquals (n, m);
Iterator iter = m1.keySet().iterator();
while (iter.hasNext()) {
Object key = iter.next();
assertEquals (m1.get(key), m2.get(key));
}
}
private Searchable getRemote () throws Exception {
try {
return lookupRemote ();
} catch (Throwable e) {
startServer ();
return lookupRemote ();
}
}
private Searchable lookupRemote () throws Exception {
return (Searchable) Naming.lookup ("//localhost/SortedSearchable");
}
private void startServer () throws Exception {
// construct an index
Searcher local = getFullIndex();
// local.search (queryA, new Sort());
// publish it
Registry reg = LocateRegistry.createRegistry (1099);
RemoteSearchable impl = new RemoteSearchable (local);
Naming.rebind ("//localhost/SortedSearchable", impl);
}
}

View File

@ -1,223 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.*;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.English;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
public class TestTermVectors extends TestCase {
private IndexSearcher searcher;
private RAMDirectory directory = new RAMDirectory();
public TestTermVectors(String s) {
super(s);
}
public void setUp() throws Exception {
IndexWriter writer
= new IndexWriter(directory, new SimpleAnalyzer(), true);
//writer.setUseCompoundFile(true);
//writer.infoStream = System.out;
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(Field.Text("field", English.intToEnglish(i), true));
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
}
protected void tearDown() {
}
public void test() {
assertTrue(searcher != null);
}
public void testTermVectors() {
Query query = new TermQuery(new Term("field", "seventy"));
try {
Hits hits = searcher.search(query);
assertEquals(100, hits.length());
for (int i = 0; i < hits.length(); i++)
{
TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(i));
assertTrue(vector != null);
assertTrue(vector.length == 1);
//assertTrue();
}
TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(50));
//System.out.println("Explain: " + searcher.explain(query, hits.id(50)));
//System.out.println("Vector: " + vector[0].toString());
} catch (IOException e) {
assertTrue(false);
}
}
public void testTermPositionVectors() {
Query query = new TermQuery(new Term("field", "fifty"));
try {
Hits hits = searcher.search(query);
assertEquals(100, hits.length());
for (int i = 0; i < hits.length(); i++)
{
TermFreqVector [] vector = searcher.reader.getTermFreqVectors(hits.id(i));
assertTrue(vector != null);
assertTrue(vector.length == 1);
//assertTrue();
}
} catch (IOException e) {
assertTrue(false);
}
}
public void testKnownSetOfDocuments() {
String [] termArray = {"eating", "chocolate", "in", "a", "computer", "lab", "grows", "old", "colored",
"with", "an"};
String test1 = "eating chocolate in a computer lab"; //6 terms
String test2 = "computer in a computer lab"; //5 terms
String test3 = "a chocolate lab grows old"; //5 terms
String test4 = "eating chocolate with a chocolate lab in an old chocolate colored computer lab"; //13 terms
Map test4Map = new HashMap();
test4Map.put("chocolate", new Integer(3));
test4Map.put("lab", new Integer(2));
test4Map.put("eating", new Integer(1));
test4Map.put("computer", new Integer(1));
test4Map.put("with", new Integer(1));
test4Map.put("a", new Integer(1));
test4Map.put("colored", new Integer(1));
test4Map.put("in", new Integer(1));
test4Map.put("an", new Integer(1));
test4Map.put("computer", new Integer(1));
test4Map.put("old", new Integer(1));
Document testDoc1 = new Document();
setupDoc(testDoc1, test1);
Document testDoc2 = new Document();
setupDoc(testDoc2, test2);
Document testDoc3 = new Document();
setupDoc(testDoc3, test3);
Document testDoc4 = new Document();
setupDoc(testDoc4, test4);
Directory dir = new RAMDirectory();
try {
IndexWriter writer = new IndexWriter(dir, new SimpleAnalyzer(), true);
assertTrue(writer != null);
writer.addDocument(testDoc1);
writer.addDocument(testDoc2);
writer.addDocument(testDoc3);
writer.addDocument(testDoc4);
writer.close();
IndexSearcher knownSearcher = new IndexSearcher(dir);
TermEnum termEnum = knownSearcher.reader.terms();
TermDocs termDocs = knownSearcher.reader.termDocs();
//System.out.println("Terms: " + termEnum.size() + " Orig Len: " + termArray.length);
Similarity sim = knownSearcher.getSimilarity();
while (termEnum.next() == true)
{
Term term = termEnum.term();
//System.out.println("Term: " + term);
termDocs.seek(term);
while (termDocs.next())
{
int docId = termDocs.doc();
int freq = termDocs.freq();
//System.out.println("Doc Id: " + docId + " freq " + freq);
TermFreqVector vector = knownSearcher.reader.getTermFreqVector(docId, "field");
float tf = sim.tf(freq);
float idf = sim.idf(term, knownSearcher);
//float qNorm = sim.queryNorm()
//This is fine since we don't have stop words
float lNorm = sim.lengthNorm("field", vector.getTerms().length);
//float coord = sim.coord()
//System.out.println("TF: " + tf + " IDF: " + idf + " LenNorm: " + lNorm);
assertTrue(vector != null);
String[] vTerms = vector.getTerms();
int [] freqs = vector.getTermFrequencies();
for (int i = 0; i < vTerms.length; i++)
{
if (term.text().equals(vTerms[i]) == true)
{
assertTrue(freqs[i] == freq);
}
}
}
//System.out.println("--------");
}
Query query = new TermQuery(new Term("field", "chocolate"));
Hits hits = knownSearcher.search(query);
//doc 3 should be the first hit b/c it is the shortest match
assertTrue(hits.length() == 3);
float score = hits.score(0);
/*System.out.println("Hit 0: " + hits.id(0) + " Score: " + hits.score(0) + " String: " + hits.doc(0).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(0)));
System.out.println("Hit 1: " + hits.id(1) + " Score: " + hits.score(1) + " String: " + hits.doc(1).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(1)));
System.out.println("Hit 2: " + hits.id(2) + " Score: " + hits.score(2) + " String: " + hits.doc(2).toString());
System.out.println("Explain: " + knownSearcher.explain(query, hits.id(2)));*/
assertTrue(testDoc3.toString().equals(hits.doc(0).toString()));
assertTrue(testDoc4.toString().equals(hits.doc(1).toString()));
assertTrue(testDoc1.toString().equals(hits.doc(2).toString()));
TermFreqVector vector = knownSearcher.reader.getTermFreqVector(hits.id(1), "field");
assertTrue(vector != null);
//System.out.println("Vector: " + vector);
String[] terms = vector.getTerms();
int [] freqs = vector.getTermFrequencies();
assertTrue(terms != null && terms.length == 10);
for (int i = 0; i < terms.length; i++) {
String term = terms[i];
//System.out.println("Term: " + term);
int freq = freqs[i];
assertTrue(test4.indexOf(term) != -1);
Integer freqInt = (Integer)test4Map.get(term);
assertTrue(freqInt != null);
assertTrue(freqInt.intValue() == freq);
}
knownSearcher.close();
} catch (IOException e) {
e.printStackTrace();
assertTrue(false);
}
}
private void setupDoc(Document doc, String text)
{
doc.add(Field.Text("field", text, true));
//System.out.println("Document: " + doc);
}
}

View File

@ -1,133 +0,0 @@
package org.apache.lucene.search;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import junit.framework.TestCase;
import java.io.IOException;
/**
* TestWildcard tests the '*' and '?' wildard characters.
*
* @author Otis Gospodnetic
*/
public class TestWildcard
extends TestCase
{
/**
* Creates a new <code>TestWildcard</code> instance.
*
* @param name the name of the test
*/
public TestWildcard(String name)
{
super(name);
}
/**
* Tests Wildcard queries with an asterisk.
*
*/
public void testAsterisk()
throws IOException
{
RAMDirectory indexStore = getIndexStore("body", new String[]
{ "metal", "metals" }
);
IndexSearcher searcher = new IndexSearcher(indexStore);
Query query1 = new TermQuery(new Term("body", "metal"));
Query query2 = new WildcardQuery(new Term("body", "metal*"));
Query query3 = new WildcardQuery(new Term("body", "m*tal"));
Query query4 = new WildcardQuery(new Term("body", "m*tal*"));
Query query5 = new WildcardQuery(new Term("body", "m*tals"));
BooleanQuery query6 = new BooleanQuery();
query6.add(query5, false, false);
BooleanQuery query7 = new BooleanQuery();
query7.add(query3, false, false);
query7.add(query5, false, false);
// Queries do not automatically lower-case search terms:
Query query8 = new WildcardQuery(new Term("body", "M*tal*"));
assertMatches(searcher, query1, 1);
assertMatches(searcher, query2, 2);
assertMatches(searcher, query3, 1);
assertMatches(searcher, query4, 2);
assertMatches(searcher, query5, 1);
assertMatches(searcher, query6, 1);
assertMatches(searcher, query7, 2);
assertMatches(searcher, query8, 0);
}
/**
* Tests Wildcard queries with a question mark.
*
* @exception IOException if an error occurs
*/
public void testQuestionmark()
throws IOException
{
RAMDirectory indexStore = getIndexStore("body", new String[]
{ "metal", "metals", "mXtals", "mXtXls" }
);
IndexSearcher searcher = new IndexSearcher(indexStore);
Query query1 = new WildcardQuery(new Term("body", "m?tal"));
Query query2 = new WildcardQuery(new Term("body", "metal?"));
Query query3 = new WildcardQuery(new Term("body", "metals?"));
Query query4 = new WildcardQuery(new Term("body", "m?t?ls"));
Query query5 = new WildcardQuery(new Term("body", "M?t?ls"));
assertMatches(searcher, query1, 1);
assertMatches(searcher, query2, 2);
assertMatches(searcher, query3, 1);
assertMatches(searcher, query4, 3);
assertMatches(searcher, query5, 0);
}
private RAMDirectory getIndexStore(String field, String[] contents)
throws IOException
{
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new SimpleAnalyzer(), true);
for (int i = 0; i < contents.length; ++i) {
Document doc = new Document();
doc.add(Field.Text(field, contents[i]));
writer.addDocument(doc);
}
writer.optimize();
writer.close();
return indexStore;
}
private void assertMatches(IndexSearcher searcher, Query q, int expectedMatches)
throws IOException
{
Hits result = searcher.search(q);
assertEquals(expectedMatches, result.length());
}
}

View File

@ -1,268 +0,0 @@
package org.apache.lucene.search.spans;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import java.io.IOException;
import org.apache.lucene.util.English;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.search.*;
/**
* Tests basic search capabilities.
*
* <p>Uses a collection of 1000 documents, each the english rendition of their
* document number. For example, the document numbered 333 has text "three
* hundred thirty three".
*
* <p>Tests are each a single query, and its hits are checked to ensure that
* all and only the correct documents are returned, thus providing end-to-end
* testing of the indexing and search code.
*
* @author Doug Cutting
*/
public class TestBasics extends TestCase {
private IndexSearcher searcher;
public void setUp() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer
= new IndexWriter(directory, new SimpleAnalyzer(), true);
//writer.infoStream = System.out;
for (int i = 0; i < 1000; i++) {
Document doc = new Document();
doc.add(Field.Text("field", English.intToEnglish(i)));
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
}
public void testTerm() throws Exception {
Query query = new TermQuery(new Term("field", "seventy"));
checkHits(query, new int[]
{70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 170, 171, 172, 173, 174, 175,
176, 177, 178, 179, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279,
370, 371, 372, 373, 374, 375, 376, 377, 378, 379, 470, 471, 472, 473,
474, 475, 476, 477, 478, 479, 570, 571, 572, 573, 574, 575, 576, 577,
578, 579, 670, 671, 672, 673, 674, 675, 676, 677, 678, 679, 770, 771,
772, 773, 774, 775, 776, 777, 778, 779, 870, 871, 872, 873, 874, 875,
876, 877, 878, 879, 970, 971, 972, 973, 974, 975, 976, 977, 978, 979});
}
public void testTerm2() throws Exception {
Query query = new TermQuery(new Term("field", "seventish"));
checkHits(query, new int[] {});
}
public void testPhrase() throws Exception {
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "seventy"));
query.add(new Term("field", "seven"));
checkHits(query, new int[]
{77, 177, 277, 377, 477, 577, 677, 777, 877, 977});
}
public void testPhrase2() throws Exception {
PhraseQuery query = new PhraseQuery();
query.add(new Term("field", "seventish"));
query.add(new Term("field", "sevenon"));
checkHits(query, new int[] {});
}
public void testBoolean() throws Exception {
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term("field", "seventy")), true, false);
query.add(new TermQuery(new Term("field", "seven")), true, false);
checkHits(query, new int[]
{77, 777, 177, 277, 377, 477, 577, 677, 770, 771, 772, 773, 774, 775,
776, 778, 779, 877, 977});
}
public void testBoolean2() throws Exception {
BooleanQuery query = new BooleanQuery();
query.add(new TermQuery(new Term("field", "sevento")), true, false);
query.add(new TermQuery(new Term("field", "sevenly")), true, false);
checkHits(query, new int[] {});
}
public void testSpanNearExact() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "seventy"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "seven"));
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
0, true);
checkHits(query, new int[]
{77, 177, 277, 377, 477, 577, 677, 777, 877, 977});
assertTrue(searcher.explain(query, 77).getValue() > 0.0f);
assertTrue(searcher.explain(query, 977).getValue() > 0.0f);
}
public void testSpanNearUnordered() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
4, false);
checkHits(query, new int[]
{609, 629, 639, 649, 659, 669, 679, 689, 699,
906, 926, 936, 946, 956, 966, 976, 986, 996});
}
public void testSpanNearOrdered() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "nine"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "six"));
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {term1, term2},
4, true);
checkHits(query, new int[]
{906, 926, 936, 946, 956, 966, 976, 986, 996});
}
public void testSpanNot() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "eight"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "one"));
SpanNearQuery near = new SpanNearQuery(new SpanQuery[] {term1, term2},
4, true);
SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
SpanNotQuery query = new SpanNotQuery(near, term3);
checkHits(query, new int[]
{801, 821, 831, 851, 861, 871, 881, 891});
assertTrue(searcher.explain(query, 801).getValue() > 0.0f);
assertTrue(searcher.explain(query, 891).getValue() > 0.0f);
}
public void testSpanFirst() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "five"));
SpanFirstQuery query = new SpanFirstQuery(term1, 1);
checkHits(query, new int[]
{5, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513,
514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 527,
528, 529, 530, 531, 532, 533, 534, 535, 536, 537, 538, 539, 540, 541,
542, 543, 544, 545, 546, 547, 548, 549, 550, 551, 552, 553, 554, 555,
556, 557, 558, 559, 560, 561, 562, 563, 564, 565, 566, 567, 568, 569,
570, 571, 572, 573, 574, 575, 576, 577, 578, 579, 580, 581, 582, 583,
584, 585, 586, 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597,
598, 599});
assertTrue(searcher.explain(query, 5).getValue() > 0.0f);
assertTrue(searcher.explain(query, 599).getValue() > 0.0f);
}
public void testSpanOr() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "thirty"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "three"));
SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
0, true);
SpanTermQuery term3 = new SpanTermQuery(new Term("field", "forty"));
SpanTermQuery term4 = new SpanTermQuery(new Term("field", "seven"));
SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
0, true);
SpanOrQuery query = new SpanOrQuery(new SpanQuery[] {near1, near2});
checkHits(query, new int[]
{33, 47, 133, 147, 233, 247, 333, 347, 433, 447, 533, 547, 633, 647, 733,
747, 833, 847, 933, 947});
assertTrue(searcher.explain(query, 33).getValue() > 0.0f);
assertTrue(searcher.explain(query, 947).getValue() > 0.0f);
}
public void testSpanExactNested() throws Exception {
SpanTermQuery term1 = new SpanTermQuery(new Term("field", "three"));
SpanTermQuery term2 = new SpanTermQuery(new Term("field", "hundred"));
SpanNearQuery near1 = new SpanNearQuery(new SpanQuery[] {term1, term2},
0, true);
SpanTermQuery term3 = new SpanTermQuery(new Term("field", "thirty"));
SpanTermQuery term4 = new SpanTermQuery(new Term("field", "three"));
SpanNearQuery near2 = new SpanNearQuery(new SpanQuery[] {term3, term4},
0, true);
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {near1, near2},
0, true);
checkHits(query, new int[] {333});
assertTrue(searcher.explain(query, 333).getValue() > 0.0f);
}
public void testSpanNearOr() throws Exception {
SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {t1, t3});
SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
10, true);
checkHits(query, new int[]
{606, 607, 626, 627, 636, 637, 646, 647,
656, 657, 666, 667, 676, 677, 686, 687, 696, 697,
706, 707, 726, 727, 736, 737, 746, 747,
756, 757, 766, 767, 776, 777, 786, 787, 796, 797});
}
public void testSpanComplex1() throws Exception {
SpanTermQuery t1 = new SpanTermQuery(new Term("field","six"));
SpanTermQuery t2 = new SpanTermQuery(new Term("field","hundred"));
SpanNearQuery tt1 = new SpanNearQuery(new SpanQuery[] {t1, t2}, 0,true);
SpanTermQuery t3 = new SpanTermQuery(new Term("field","seven"));
SpanTermQuery t4 = new SpanTermQuery(new Term("field","hundred"));
SpanNearQuery tt2 = new SpanNearQuery(new SpanQuery[] {t3, t4}, 0,true);
SpanTermQuery t5 = new SpanTermQuery(new Term("field","seven"));
SpanTermQuery t6 = new SpanTermQuery(new Term("field","six"));
SpanOrQuery to1 = new SpanOrQuery(new SpanQuery[] {tt1, tt2});
SpanOrQuery to2 = new SpanOrQuery(new SpanQuery[] {t5, t6});
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {to1, to2},
100, true);
checkHits(query, new int[]
{606, 607, 626, 627, 636, 637, 646, 647,
656, 657, 666, 667, 676, 677, 686, 687, 696, 697,
706, 707, 726, 727, 736, 737, 746, 747,
756, 757, 766, 767, 776, 777, 786, 787, 796, 797});
}
private void checkHits(Query query, int[] results) throws IOException {
CheckHits.checkHits(query, "field", searcher, results, this);
}
}

View File

@ -1,97 +0,0 @@
package org.apache.lucene.search.spans;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.WhitespaceAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import junit.framework.TestCase;
import java.io.IOException;
import java.util.Set;
import java.util.TreeSet;
public class TestSpans extends TestCase {
private IndexSearcher searcher;
public static final String field = "field";
public void setUp() throws Exception {
RAMDirectory directory = new RAMDirectory();
IndexWriter writer= new IndexWriter(directory, new WhitespaceAnalyzer(), true);
StringBuffer buffer = new StringBuffer();
for (int i = 0; i < docFields.length; i++) {
Document doc = new Document();
doc.add(Field.Text(field, docFields[i]));
writer.addDocument(doc);
}
writer.close();
searcher = new IndexSearcher(directory);
}
private String[] docFields = {
"w1 w2 w3 w4 w5",
"w1 w3 w2 w3",
"w1 xx w2 yy w3",
"w1 w3 xx w2 yy w3",
""
};
public SpanTermQuery makeSpanTermQuery(String text) {
return new SpanTermQuery(new Term(field, text));
}
private void checkHits(Query query, int[] results) throws IOException {
CheckHits.checkHits(query, field, searcher, results, this);
}
public void orderedSlopTest3(int slop, int[] expectedDocs) throws IOException {
SpanTermQuery w1 = makeSpanTermQuery("w1");
SpanTermQuery w2 = makeSpanTermQuery("w2");
SpanTermQuery w3 = makeSpanTermQuery("w3");
boolean ordered = true;
SpanNearQuery snq = new SpanNearQuery( new SpanQuery[]{w1,w2,w3}, slop, ordered);
checkHits(snq, expectedDocs);
}
public void testSpanNearOrdered01() throws Exception {
orderedSlopTest3(0, new int[] {0});
}
public void testSpanNearOrdered02() throws Exception {
orderedSlopTest3(1, new int[] {0,1});
}
public void testSpanNearOrdered03() throws Exception {
orderedSlopTest3(2, new int[] {0,1,2});
}
public void testSpanNearOrdered04() throws Exception {
orderedSlopTest3(3, new int[] {0,1,2,3});
}
public void testSpanNearOrdered05() throws Exception {
orderedSlopTest3(4, new int[] {0,1,2,3});
}
}

View File

@ -1,102 +0,0 @@
package org.apache.lucene.util;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class English {
public static String intToEnglish(int i) {
StringBuffer result = new StringBuffer();
intToEnglish(i, result);
return result.toString();
}
public static void intToEnglish(int i, StringBuffer result) {
if (i == 0) {
result.append("zero");
return;
}
if (i < 0) {
result.append("minus ");
i = -i;
}
if (i >= 1000000000) { // billions
intToEnglish(i/1000000000, result);
result.append("billion, ");
i = i%1000000000;
}
if (i >= 1000000) { // millions
intToEnglish(i/1000000, result);
result.append("million, ");
i = i%1000000;
}
if (i >= 1000) { // thousands
intToEnglish(i/1000, result);
result.append("thousand, ");
i = i%1000;
}
if (i >= 100) { // hundreds
intToEnglish(i/100, result);
result.append("hundred ");
i = i%100;
}
if (i >= 20) {
switch (i/10) {
case 9 : result.append("ninety"); break;
case 8 : result.append("eighty"); break;
case 7 : result.append("seventy"); break;
case 6 : result.append("sixty"); break;
case 5 : result.append("fifty"); break;
case 4 : result.append("forty"); break;
case 3 : result.append("thirty"); break;
case 2 : result.append("twenty"); break;
}
i = i%10;
if (i == 0)
result.append(" ");
else
result.append("-");
}
switch (i) {
case 19 : result.append("nineteen "); break;
case 18 : result.append("eighteen "); break;
case 17 : result.append("seventeen "); break;
case 16 : result.append("sixteen "); break;
case 15 : result.append("fifteen "); break;
case 14 : result.append("fourteen "); break;
case 13 : result.append("thirteen "); break;
case 12 : result.append("twelve "); break;
case 11 : result.append("eleven "); break;
case 10 : result.append("ten "); break;
case 9 : result.append("nine "); break;
case 8 : result.append("eight "); break;
case 7 : result.append("seven "); break;
case 6 : result.append("six "); break;
case 5 : result.append("five "); break;
case 4 : result.append("four "); break;
case 3 : result.append("three "); break;
case 2 : result.append("two "); break;
case 1 : result.append("one "); break;
case 0 : result.append(""); break;
}
}
public static void main(String[] args) {
System.out.println(intToEnglish(Integer.parseInt(args[0])));
}
}

View File

@ -1,50 +0,0 @@
package org.apache.lucene.util;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
public class StringHelperTest extends TestCase {
public StringHelperTest(String s) {
super(s);
}
protected void setUp() {
}
protected void tearDown() {
}
public void testStringDifference() {
String test1 = "test";
String test2 = "testing";
int result = StringHelper.stringDifference(test1, test2);
assertTrue(result == 4);
test2 = "foo";
result = StringHelper.stringDifference(test1, test2);
assertTrue(result == 0);
test2 = "test";
result = StringHelper.stringDifference(test1, test2);
assertTrue(result == 4);
}
}

View File

@ -1,177 +0,0 @@
package org.apache.lucene.util;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import junit.framework.TestCase;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
/**
* <code>TestBitVector</code> tests the <code>BitVector</code>, obviously.
*
* @author "Peter Mularien" <pmularien@deploy.com>
* @version $Id$
*/
public class TestBitVector extends TestCase
{
public TestBitVector(String s) {
super(s);
}
/**
* Test the default constructor on BitVectors of various sizes.
* @throws Exception
*/
public void testConstructSize() throws Exception {
doTestConstructOfSize(8);
doTestConstructOfSize(20);
doTestConstructOfSize(100);
doTestConstructOfSize(1000);
}
private void doTestConstructOfSize(int n) {
BitVector bv = new BitVector(n);
assertEquals(n,bv.size());
}
/**
* Test the get() and set() methods on BitVectors of various sizes.
* @throws Exception
*/
public void testGetSet() throws Exception {
doTestGetSetVectorOfSize(8);
doTestGetSetVectorOfSize(20);
doTestGetSetVectorOfSize(100);
doTestGetSetVectorOfSize(1000);
}
private void doTestGetSetVectorOfSize(int n) {
BitVector bv = new BitVector(n);
for(int i=0;i<bv.size();i++) {
// ensure a set bit can be git'
assertFalse(bv.get(i));
bv.set(i);
assertTrue(bv.get(i));
}
}
/**
* Test the clear() method on BitVectors of various sizes.
* @throws Exception
*/
public void testClear() throws Exception {
doTestClearVectorOfSize(8);
doTestClearVectorOfSize(20);
doTestClearVectorOfSize(100);
doTestClearVectorOfSize(1000);
}
private void doTestClearVectorOfSize(int n) {
BitVector bv = new BitVector(n);
for(int i=0;i<bv.size();i++) {
// ensure a set bit is cleared
assertFalse(bv.get(i));
bv.set(i);
assertTrue(bv.get(i));
bv.clear(i);
assertFalse(bv.get(i));
}
}
/**
* Test the count() method on BitVectors of various sizes.
* @throws Exception
*/
public void testCount() throws Exception {
doTestCountVectorOfSize(8);
doTestCountVectorOfSize(20);
doTestCountVectorOfSize(100);
doTestCountVectorOfSize(1000);
}
private void doTestCountVectorOfSize(int n) {
BitVector bv = new BitVector(n);
// test count when incrementally setting bits
for(int i=0;i<bv.size();i++) {
assertFalse(bv.get(i));
assertEquals(i,bv.count());
bv.set(i);
assertTrue(bv.get(i));
assertEquals(i+1,bv.count());
}
bv = new BitVector(n);
// test count when setting then clearing bits
for(int i=0;i<bv.size();i++) {
assertFalse(bv.get(i));
assertEquals(0,bv.count());
bv.set(i);
assertTrue(bv.get(i));
assertEquals(1,bv.count());
bv.clear(i);
assertFalse(bv.get(i));
assertEquals(0,bv.count());
}
}
/**
* Test writing and construction to/from Directory.
* @throws Exception
*/
public void testWriteRead() throws Exception {
doTestWriteRead(8);
doTestWriteRead(20);
doTestWriteRead(100);
doTestWriteRead(1000);
}
private void doTestWriteRead(int n) throws Exception {
Directory d = new RAMDirectory();
BitVector bv = new BitVector(n);
// test count when incrementally setting bits
for(int i=0;i<bv.size();i++) {
assertFalse(bv.get(i));
assertEquals(i,bv.count());
bv.set(i);
assertTrue(bv.get(i));
assertEquals(i+1,bv.count());
bv.write(d, "TESTBV");
BitVector compare = new BitVector(d, "TESTBV");
// compare bit vectors with bits set incrementally
assertTrue(doCompare(bv,compare));
}
}
/**
* Compare two BitVectors.
* This should really be an equals method on the BitVector itself.
* @param bv One bit vector
* @param compare The second to compare
*/
private boolean doCompare(BitVector bv, BitVector compare) {
boolean equal = true;
for(int i=0;i<bv.size();i++) {
// bits must be equal
if(bv.get(i)!=compare.get(i)) {
equal = false;
break;
}
}
return equal;
}
}

View File

@ -1,112 +0,0 @@
package org.apache.lucene.util;
/**
* Copyright 2004 The Apache Software Foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.util.Date;
import java.util.Random;
import junit.framework.TestCase;
public class TestPriorityQueue
extends TestCase
{
public TestPriorityQueue(String name)
{
super(name);
}
private static class IntegerQueue
extends PriorityQueue
{
public IntegerQueue(int count)
{
super();
initialize(count);
}
protected boolean lessThan(Object a, Object b)
{
return ((Integer) a).intValue() < ((Integer) b).intValue();
}
}
public void testPQ()
throws Exception
{
testPQ(10000);
}
public static void testPQ(int count)
{
PriorityQueue pq = new IntegerQueue(count);
Random gen = new Random();
int sum = 0, sum2 = 0;
Date start = new Date();
for (int i = 0; i < count; i++)
{
int next = gen.nextInt();
sum += next;
pq.put(new Integer(next));
}
// Date end = new Date();
// System.out.print(((float)(end.getTime()-start.getTime()) / count) * 1000);
// System.out.println(" microseconds/put");
// start = new Date();
int last = Integer.MIN_VALUE;
for (int i = 0; i < count; i++)
{
Integer next = (Integer)pq.pop();
assertTrue(next.intValue() >= last);
last = next.intValue();
sum2 += last;
}
assertEquals(sum, sum2);
// end = new Date();
// System.out.print(((float)(end.getTime()-start.getTime()) / count) * 1000);
// System.out.println(" microseconds/pop");
}
public void testClear()
{
PriorityQueue pq = new IntegerQueue(3);
pq.put(new Integer(2));
pq.put(new Integer(3));
pq.put(new Integer(1));
assertEquals(3, pq.size());
pq.clear();
assertEquals(0, pq.size());
}
public void testFixedSize(){
PriorityQueue pq = new IntegerQueue(3);
pq.insert(new Integer(2));
pq.insert(new Integer(3));
pq.insert(new Integer(1));
pq.insert(new Integer(5));
pq.insert(new Integer(7));
pq.insert(new Integer(1));
assertEquals(3, pq.size());
assertEquals(3, ((Integer) pq.top()).intValue());
}
}

View File

@ -16,8 +16,7 @@ package org.apache.lucene;
* limitations under the License.
*/
import java.io.IOException;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
@ -31,7 +30,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import junit.framework.TestCase;
import java.io.IOException;
/**
* A very simple demo used in the API documentation (src/java/overview.html).
@ -62,7 +61,8 @@ public class TestDemo extends TestCase {
// Now search the index:
IndexSearcher isearcher = new IndexSearcher(directory);
// Parse a simple query that searches for "text":
Query query = QueryParser.parse("text", "fieldname", analyzer);
QueryParser parser = new QueryParser("fieldname", analyzer);
Query query = parser.parse("text");
Hits hits = isearcher.search(query);
assertEquals(1, hits.length());
// Iterate through the results:

View File

@ -113,9 +113,9 @@ class ThreadSafetyTest {
private void searchFor(int n, Searcher searcher)
throws Exception {
System.out.println("Searching for " + n);
QueryParser parser = new QueryParser("contents", ANALYZER);
Hits hits =
searcher.search(QueryParser.parse(English.intToEnglish(n), "contents",
ANALYZER));
searcher.search(parser.parse(English.intToEnglish(n)));
System.out.println("Search for " + n + ": total=" + hits.length());
for (int j = 0; j < Math.min(3, hits.length()); j++) {
System.out.println("Hit for " + n + ": " + hits.doc(j).get("id"));

View File

@ -91,7 +91,7 @@ public class TestBinaryDocument extends TestCase
assertTrue(stringFldCompressedTest.equals(binaryValCompressed));
/** delete the document from index */
reader.delete(0);
reader.deleteDocument(0);
assertEquals(0, reader.numDocs());
reader.close();

View File

@ -16,14 +16,7 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.io.File;
import java.io.IOException;
import java.util.EmptyStackException;
import java.util.Random;
import java.util.Stack;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
@ -33,6 +26,12 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.RAMDirectory;
import java.io.File;
import java.io.IOException;
import java.util.EmptyStackException;
import java.util.Random;
import java.util.Stack;
/**
* Tests for the "IndexModifier" class, including accesses from two threads at the
* same time.
@ -56,7 +55,7 @@ public class TestIndexModifier extends TestCase {
i.optimize();
assertEquals(2, i.docCount());
i.flush();
i.delete(0);
i.deleteDocument(0);
assertEquals(1, i.docCount());
i.flush();
assertEquals(1, i.docCount());
@ -64,7 +63,7 @@ public class TestIndexModifier extends TestCase {
i.addDocument(getDoc());
i.flush();
assertEquals(3, i.docCount());
i.delete(allDocTerm);
i.deleteDocuments(allDocTerm);
assertEquals(0, i.docCount());
i.optimize();
assertEquals(0, i.docCount());
@ -88,7 +87,7 @@ public class TestIndexModifier extends TestCase {
assertFalse(i.getUseCompoundFile());
// test setting properties when internally the reader is opened:
i.delete(allDocTerm);
i.deleteDocuments(allDocTerm);
i.setMaxBufferedDocs(100);
i.setMergeFactor(25);
i.setMaxFieldLength(250000);
@ -241,7 +240,7 @@ class IndexThread extends Thread {
continue;
}
Term delTerm = new Term("id", new Integer(delId).toString());
int delCount = index.delete(delTerm);
int delCount = index.deleteDocuments(delTerm);
if (delCount != 1) {
throw new RuntimeException("Internal error: " + threadNumber + " deleted " + delCount +
" documents, term=" + delTerm);

View File

@ -217,7 +217,7 @@ public class TestIndexReader extends TestCase
// DELETE DOCUMENTS CONTAINING TERM: aaa
int deleted = 0;
reader = IndexReader.open(dir);
deleted = reader.delete(searchTerm);
deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);
@ -290,7 +290,7 @@ public class TestIndexReader extends TestCase
// NOTE: the reader was created when only "aaa" documents were in
int deleted = 0;
try {
deleted = reader.delete(searchTerm);
deleted = reader.deleteDocuments(searchTerm);
fail("Delete allowed on an index reader with stale segment information");
} catch (IOException e) {
/* success */
@ -305,7 +305,7 @@ public class TestIndexReader extends TestCase
assertTermDocsCount("first reader", reader, searchTerm, 100);
assertTermDocsCount("first reader", reader, searchTerm2, 100);
deleted = reader.delete(searchTerm);
deleted = reader.deleteDocuments(searchTerm);
assertEquals("deleted count", 100, deleted);
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));
assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm2));
@ -384,13 +384,13 @@ public class TestIndexReader extends TestCase
writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false);
IndexReader reader = IndexReader.open(dir);
try {
reader.delete(0);
reader.deleteDocument(0);
fail("expected lock");
} catch(IOException e) {
// expected exception
}
IndexReader.unlock(dir); // this should not be done in the real world!
reader.delete(0);
reader.deleteDocument(0);
reader.close();
writer.close();
}
@ -402,8 +402,8 @@ public class TestIndexReader extends TestCase
addDocumentWithFields(writer);
writer.close();
IndexReader reader = IndexReader.open(dir);
reader.delete(0);
reader.delete(1);
reader.deleteDocument(0);
reader.deleteDocument(1);
reader.undeleteAll();
reader.close();
reader = IndexReader.open(dir);
@ -463,7 +463,7 @@ public class TestIndexReader extends TestCase
// delete documents containing term: aaa
// when the reader is closed, the segment info is updated and
// the first reader is now stale
reader2.delete(searchTerm1);
reader2.deleteDocuments(searchTerm1);
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm1));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm2));
assertEquals("after delete 1", 100, reader2.docFreq(searchTerm3));
@ -484,7 +484,7 @@ public class TestIndexReader extends TestCase
// ATTEMPT TO DELETE FROM STALE READER
// delete documents containing term: bbb
try {
reader1.delete(searchTerm2);
reader1.deleteDocuments(searchTerm2);
fail("Delete allowed from a stale index reader");
} catch (IOException e) {
/* success */
@ -500,7 +500,7 @@ public class TestIndexReader extends TestCase
assertTermDocsCount("reopened", reader1, searchTerm2, 100);
assertTermDocsCount("reopened", reader1, searchTerm3, 100);
reader1.delete(searchTerm2);
reader1.deleteDocuments(searchTerm2);
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm1));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm2));
assertEquals("deleted 2", 100, reader1.docFreq(searchTerm3));

View File

@ -39,7 +39,7 @@ public class TestIndexWriter extends TestCase
// delete 40 documents
reader = IndexReader.open(dir);
for (i = 0; i < 40; i++) {
reader.delete(i);
reader.deleteDocument(i);
}
reader.close();

View File

@ -75,7 +75,7 @@ public class TestMultiReader extends TestCase {
MultiReader reader = new MultiReader(dir, sis, false, readers);
assertTrue(reader != null);
assertEquals( 2, reader.numDocs() );
reader.delete(0);
reader.deleteDocument(0);
assertEquals( 1, reader.numDocs() );
reader.undeleteAll();
assertEquals( 2, reader.numDocs() );

Some files were not shown because too many files have changed in this diff Show More