mirror of https://github.com/apache/lucene.git
LUCENE-1092: fix KeywordAnalyzer.reusableTokenStream so it can successfully be reused
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@605149 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
10c1ec3a66
commit
905674805c
|
@ -17,6 +17,7 @@ package org.apache.lucene.analysis;
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
import java.io.Reader;
|
import java.io.Reader;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -29,12 +30,13 @@ public class KeywordAnalyzer extends Analyzer {
|
||||||
return new KeywordTokenizer(reader);
|
return new KeywordTokenizer(reader);
|
||||||
}
|
}
|
||||||
public TokenStream reusableTokenStream(String fieldName,
|
public TokenStream reusableTokenStream(String fieldName,
|
||||||
final Reader reader) {
|
final Reader reader) throws IOException {
|
||||||
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
|
Tokenizer tokenizer = (Tokenizer) getPreviousTokenStream();
|
||||||
if (tokenizer == null) {
|
if (tokenizer == null) {
|
||||||
tokenizer = new KeywordTokenizer(reader);
|
tokenizer = new KeywordTokenizer(reader);
|
||||||
setPreviousTokenStream(tokenizer);
|
setPreviousTokenStream(tokenizer);
|
||||||
}
|
} else
|
||||||
|
tokenizer.reset(reader);
|
||||||
return tokenizer;
|
return tokenizer;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,4 +55,9 @@ public class KeywordTokenizer extends Tokenizer {
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void reset(Reader input) throws IOException {
|
||||||
|
super.reset(input);
|
||||||
|
this.done = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,10 @@ package org.apache.lucene.analysis;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
|
import org.apache.lucene.index.Term;
|
||||||
|
import org.apache.lucene.index.TermDocs;
|
||||||
import org.apache.lucene.store.RAMDirectory;
|
import org.apache.lucene.store.RAMDirectory;
|
||||||
import org.apache.lucene.document.Document;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.document.Field;
|
import org.apache.lucene.document.Field;
|
||||||
|
@ -61,4 +64,22 @@ public class TestKeywordAnalyzer extends LuceneTestCase {
|
||||||
"+partnum:Q36 +space", query.toString("description"));
|
"+partnum:Q36 +space", query.toString("description"));
|
||||||
assertEquals("doc found!", 1, hits.length());
|
assertEquals("doc found!", 1, hits.length());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMutipleDocument() throws Exception {
|
||||||
|
RAMDirectory dir = new RAMDirectory();
|
||||||
|
IndexWriter writer = new IndexWriter(dir,new KeywordAnalyzer(), true);
|
||||||
|
Document doc = new Document();
|
||||||
|
doc.add(new Field("partnum", "Q36", Field.Store.YES, Field.Index.TOKENIZED));
|
||||||
|
writer.addDocument(doc);
|
||||||
|
doc = new Document();
|
||||||
|
doc.add(new Field("partnum", "Q37", Field.Store.YES, Field.Index.TOKENIZED));
|
||||||
|
writer.addDocument(doc);
|
||||||
|
writer.close();
|
||||||
|
|
||||||
|
IndexReader reader = IndexReader.open(dir);
|
||||||
|
TermDocs td = reader.termDocs(new Term("partnum", "Q36"));
|
||||||
|
assertTrue(td.next());
|
||||||
|
td = reader.termDocs(new Term("partnum", "Q37"));
|
||||||
|
assertTrue(td.next());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue