mirror of https://github.com/apache/lucene.git
LUCENE-1819: MatchAllDocsQuery.toString(field) should produce output that is parsable by the QueryParser.
git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@805624 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f720852e37
commit
7700fa3626
|
@ -490,6 +490,9 @@ Bug fixes
|
|||
23. LUCENE-1801: Changed all Tokenizers or TokenStreams in core/contrib)
|
||||
that are the source of Tokens to always call
|
||||
AttributeSource.clearAttributes() first. (Uwe Schindler)
|
||||
|
||||
24. LUCENE-1819: MatchAllDocsQuery.toString(field) should produce output
|
||||
that is parsable by the QueryParser. (John Wang, Mark Miller)
|
||||
|
||||
New features
|
||||
|
||||
|
|
|
@ -151,7 +151,7 @@ public class MatchAllDocsQuery extends Query {
|
|||
|
||||
public String toString(String field) {
|
||||
StringBuffer buffer = new StringBuffer();
|
||||
buffer.append("MatchAllDocsQuery");
|
||||
buffer.append("*:*");
|
||||
buffer.append(ToStringUtils.boost(getBoost()));
|
||||
return buffer.toString();
|
||||
}
|
||||
|
|
|
@ -18,26 +18,30 @@ package org.apache.lucene.search;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Tests MatchAllDocsQuery.
|
||||
*
|
||||
*/
|
||||
public class TestMatchAllDocsQuery extends LuceneTestCase {
|
||||
|
||||
public void testQuery() throws IOException {
|
||||
private Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_CURRENT);
|
||||
|
||||
public void testQuery() throws Exception {
|
||||
|
||||
RAMDirectory dir = new RAMDirectory();
|
||||
IndexWriter iw = new IndexWriter(dir, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
IndexWriter iw = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
|
||||
iw.setMaxBufferedDocs(2); // force multi-segment
|
||||
addDoc("one", iw, 1f);
|
||||
addDoc("two", iw, 20f);
|
||||
|
@ -95,6 +99,18 @@ public class TestMatchAllDocsQuery extends LuceneTestCase {
|
|||
hits = is.search(new MatchAllDocsQuery(), null, 1000).scoreDocs;
|
||||
assertEquals(2, hits.length);
|
||||
|
||||
// test parsable toString()
|
||||
QueryParser qp = new QueryParser("key", analyzer);
|
||||
hits = is.search(qp.parse(new MatchAllDocsQuery().toString()), null, 1000).scoreDocs;
|
||||
assertEquals(2, hits.length);
|
||||
|
||||
// test parsable toString() with non default boost
|
||||
Query maq = new MatchAllDocsQuery();
|
||||
maq.setBoost(2.3f);
|
||||
Query pq = qp.parse(maq.toString());
|
||||
hits = is.search(pq, null, 1000).scoreDocs;
|
||||
assertEquals(2, hits.length);
|
||||
|
||||
is.close();
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
|
Loading…
Reference in New Issue