mirror of https://github.com/apache/lucene.git
- Applied a patch from Grant Ingersoll
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@150189 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f37a66bb2f
commit
eb0c2853bb
|
@ -1,60 +1,76 @@
|
||||||
package org.apache.lucene.analysis;
|
package org.apache.lucene.analysis;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copyright 2004 The Apache Software Foundation
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
import junit.framework.TestCase;
|
import junit.framework.TestCase;
|
||||||
|
|
||||||
import java.io.StringReader;
|
import java.io.StringReader;
|
||||||
import java.util.ArrayList;
|
import java.io.IOException;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import java.util.Set;
|
||||||
import org.apache.lucene.index.Term;
|
import java.util.HashSet;
|
||||||
import org.apache.lucene.store.RAMDirectory;
|
|
||||||
import org.apache.lucene.document.Document;
|
|
||||||
import org.apache.lucene.document.Field;
|
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
|
||||||
import org.apache.lucene.search.PhraseQuery;
|
|
||||||
import org.apache.lucene.search.Hits;
|
|
||||||
|
|
||||||
public class TestStopAnalyzer extends TestCase {
|
public class TestStopAnalyzer extends TestCase {
|
||||||
private StopAnalyzer stopAnalyzer = new StopAnalyzer();
|
private StopAnalyzer stop = new StopAnalyzer();
|
||||||
|
|
||||||
public Token[] tokensFromAnalyzer(Analyzer analyzer, String text)
|
private Set inValidTokens = new HashSet();
|
||||||
throws Exception {
|
public TestStopAnalyzer(String s) {
|
||||||
TokenStream stream =
|
super(s);
|
||||||
analyzer.tokenStream("contents", new StringReader(text));
|
|
||||||
ArrayList tokenList = new ArrayList();
|
|
||||||
while (true) {
|
|
||||||
Token token = stream.next();
|
|
||||||
if (token == null) break;
|
|
||||||
|
|
||||||
tokenList.add(token);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (Token[]) tokenList.toArray(new Token[0]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void setUp() {
|
||||||
|
for (int i = 0; i < StopAnalyzer.ENGLISH_STOP_WORDS.length; i++) {
|
||||||
|
inValidTokens.add(StopAnalyzer.ENGLISH_STOP_WORDS[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testPhraseQuery() throws Exception {
|
public void testDefaults() {
|
||||||
RAMDirectory directory = new RAMDirectory();
|
assertTrue(stop != null);
|
||||||
IndexWriter writer = new IndexWriter(directory, stopAnalyzer, true);
|
StringReader reader = new StringReader("This is a test of the english stop analyzer");
|
||||||
Document doc = new Document();
|
TokenStream stream = stop.tokenStream("test", reader);
|
||||||
doc.add(Field.Text("field", "the stop words are here"));
|
assertTrue(stream != null);
|
||||||
writer.addDocument(doc);
|
Token token = null;
|
||||||
writer.close();
|
try {
|
||||||
|
while ((token = stream.next()) != null)
|
||||||
|
{
|
||||||
|
assertTrue(inValidTokens.contains(token.termText()) == false);
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
assertTrue(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
IndexSearcher searcher = new IndexSearcher(directory);
|
public void testStopList() {
|
||||||
|
Set stopWordsSet = new HashSet();
|
||||||
// valid exact phrase query
|
stopWordsSet.add("good");
|
||||||
PhraseQuery query = new PhraseQuery();
|
stopWordsSet.add("test");
|
||||||
query.add(new Term("field","stop"));
|
stopWordsSet.add("analyzer");
|
||||||
query.add(new Term("field","words"));
|
StopAnalyzer newStop = new StopAnalyzer((String[])stopWordsSet.toArray(new String[3]));
|
||||||
Hits hits = searcher.search(query);
|
StringReader reader = new StringReader("This is a good test of the english stop analyzer");
|
||||||
assertEquals(1, hits.length());
|
TokenStream stream = newStop.tokenStream("test", reader);
|
||||||
|
assertTrue(stream != null);
|
||||||
// currently StopAnalyzer does not leave "holes", so this matches.
|
Token token = null;
|
||||||
query = new PhraseQuery();
|
try {
|
||||||
query.add(new Term("field", "words"));
|
while ((token = stream.next()) != null)
|
||||||
query.add(new Term("field", "here"));
|
{
|
||||||
hits = searcher.search(query);
|
String text = token.termText();
|
||||||
assertEquals(1, hits.length());
|
assertTrue(stopWordsSet.contains(text) == false);
|
||||||
|
}
|
||||||
searcher.close();
|
} catch (IOException e) {
|
||||||
|
assertTrue(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue