mirror of https://github.com/apache/lucene.git
code snippet improvements: fix out of date api usage and add syntax highlighting
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1384034 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
df5e4e7eb6
commit
647b509169
|
@ -141,7 +141,7 @@ This decision matrix should help you:
|
|||
</tr>
|
||||
</table>
|
||||
<h3>Examples</h3>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
public void testHyphenationCompoundWordsDE() throws Exception {
|
||||
String[] dict = { "Rind", "Fleisch", "Draht", "Schere", "Gesetz",
|
||||
"Aufgabe", "Überwachung" };
|
||||
|
|
|
@ -34,10 +34,12 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
To use this with LowerCaseTokenizer, for example, you'd write an
|
||||
analyzer like this:
|
||||
<P>
|
||||
<PRE>
|
||||
<PRE class="prettyprint">
|
||||
class MyAnalyzer extends Analyzer {
|
||||
public final TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
return new PorterStemFilter(new LowerCaseTokenizer(reader));
|
||||
{@literal @Override}
|
||||
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
|
||||
Tokenizer source = new LowerCaseTokenizer(version, reader);
|
||||
return new TokenStreamComponents(source, new PorterStemFilter(source));
|
||||
}
|
||||
}
|
||||
</PRE>
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
* In order to increase search efficiency, this filter puts hyphenated words broken into two lines back together.
|
||||
* This filter should be used on indexing time only.
|
||||
* Example field definition in schema.xml:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* <fieldtype name="text" class="solr.TextField" positionIncrementGap="100">
|
||||
* <analyzer type="index">
|
||||
* <tokenizer class="solr.WhitespaceTokenizerFactory"/>
|
||||
|
|
|
@ -31,13 +31,15 @@ import java.util.Map;
|
|||
*
|
||||
* <p>Example usage:
|
||||
*
|
||||
* <pre>
|
||||
* Map analyzerPerField = new HashMap();
|
||||
* analyzerPerField.put("firstname", new KeywordAnalyzer());
|
||||
* analyzerPerField.put("lastname", new KeywordAnalyzer());
|
||||
* <pre class="prettyprint">
|
||||
* {@code
|
||||
* Map<String,Analyzer> analyzerPerField = new HashMap<String,Analyzer>();
|
||||
* analyzerPerField.put("firstname", new KeywordAnalyzer());
|
||||
* analyzerPerField.put("lastname", new KeywordAnalyzer());
|
||||
*
|
||||
* PerFieldAnalyzerWrapper aWrapper =
|
||||
* new PerFieldAnalyzerWrapper(new StandardAnalyzer(), analyzerPerField);
|
||||
* PerFieldAnalyzerWrapper aWrapper =
|
||||
* new PerFieldAnalyzerWrapper(new StandardAnalyzer(version), analyzerPerField);
|
||||
* }
|
||||
* </pre>
|
||||
*
|
||||
* <p>In this example, StandardAnalyzer will be used for all fields except "firstname"
|
||||
|
|
|
@ -36,24 +36,24 @@ import org.apache.lucene.util.AttributeSource;
|
|||
* It is also useful for doing things like entity extraction or proper noun analysis as
|
||||
* part of the analysis workflow and saving off those tokens for use in another field.
|
||||
*
|
||||
* <pre>
|
||||
TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader1));
|
||||
* <pre class="prettyprint">
|
||||
TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader1));
|
||||
TeeSinkTokenFilter.SinkTokenStream sink1 = source1.newSinkTokenStream();
|
||||
TeeSinkTokenFilter.SinkTokenStream sink2 = source1.newSinkTokenStream();
|
||||
|
||||
TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(reader2));
|
||||
TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader2));
|
||||
source2.addSinkTokenStream(sink1);
|
||||
source2.addSinkTokenStream(sink2);
|
||||
|
||||
TokenStream final1 = new LowerCaseFilter(source1);
|
||||
TokenStream final1 = new LowerCaseFilter(version, source1);
|
||||
TokenStream final2 = source2;
|
||||
TokenStream final3 = new EntityDetect(sink1);
|
||||
TokenStream final4 = new URLDetect(sink2);
|
||||
|
||||
d.add(new Field("f1", final1));
|
||||
d.add(new Field("f2", final2));
|
||||
d.add(new Field("f3", final3));
|
||||
d.add(new Field("f4", final4));
|
||||
d.add(new TextField("f1", final1, Field.Store.NO));
|
||||
d.add(new TextField("f2", final2, Field.Store.NO));
|
||||
d.add(new TextField("f3", final3, Field.Store.NO));
|
||||
d.add(new TextField("f4", final4, Field.Store.NO));
|
||||
* </pre>
|
||||
* In this example, <code>sink1</code> and <code>sink2</code> will both get tokens from both
|
||||
* <code>reader1</code> and <code>reader2</code> after whitespace tokenizer
|
||||
|
@ -63,9 +63,9 @@ d.add(new Field("f4", final4));
|
|||
* add another sink and then pass all tokens to the sinks at once using {@link #consumeAllTokens}.
|
||||
* This TokenFilter is exhausted after this. In the above example, change
|
||||
* the example above to:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
...
|
||||
TokenStream final1 = new LowerCaseFilter(source1.newSinkTokenStream());
|
||||
TokenStream final1 = new LowerCaseFilter(version, source1.newSinkTokenStream());
|
||||
TokenStream final2 = source2.newSinkTokenStream();
|
||||
sink1.consumeAllTokens();
|
||||
sink2.consumeAllTokens();
|
||||
|
|
|
@ -53,17 +53,17 @@
|
|||
<pre class="prettyprint">
|
||||
// "fa" Locale is not supported by Sun JDK 1.4 or 1.5
|
||||
Collator collator = Collator.getInstance(new Locale("ar"));
|
||||
CollationKeyAnalyzer analyzer = new CollationKeyAnalyzer(Version.LUCENE_40, collator);
|
||||
CollationKeyAnalyzer analyzer = new CollationKeyAnalyzer(version, collator);
|
||||
RAMDirectory ramDir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_40, analyzer));
|
||||
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(version, analyzer));
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("content", "\u0633\u0627\u0628",
|
||||
Field.Store.YES, Field.Index.ANALYZED));
|
||||
doc.add(new TextField("content", "\u0633\u0627\u0628", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
IndexReader ir = DirectoryReader.open(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ir);
|
||||
|
||||
QueryParser aqp = new QueryParser(Version.LUCENE_40, "content", analyzer);
|
||||
QueryParser aqp = new QueryParser(version, "content", analyzer);
|
||||
aqp.setAnalyzeRangeTerms(true);
|
||||
|
||||
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
|
||||
|
@ -79,20 +79,21 @@
|
|||
<h3>Danish Sorting</h3>
|
||||
<pre class="prettyprint">
|
||||
Analyzer analyzer
|
||||
= new CollationKeyAnalyzer(Version.LUCENE_40, Collator.getInstance(new Locale("da", "dk")));
|
||||
= new CollationKeyAnalyzer(version, Collator.getInstance(new Locale("da", "dk")));
|
||||
RAMDirectory indexStore = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_40, analyzer));
|
||||
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(version, analyzer));
|
||||
String[] tracer = new String[] { "A", "B", "C", "D", "E" };
|
||||
String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
|
||||
String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
|
||||
for (int i = 0 ; i < data.length ; ++i) {
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("tracer", tracer[i], Field.Store.YES, Field.Index.NO));
|
||||
doc.add(new Field("contents", data[i], Field.Store.NO, Field.Index.ANALYZED));
|
||||
doc.add(new StoredField("tracer", tracer[i]));
|
||||
doc.add(new TextField("contents", data[i], Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.close();
|
||||
IndexSearcher searcher = new IndexSearcher(indexStore, true);
|
||||
IndexReader ir = DirectoryReader.open(indexStore);
|
||||
IndexSearcher searcher = new IndexSearcher(ir);
|
||||
Sort sort = new Sort();
|
||||
sort.setSort(new SortField("contents", SortField.STRING));
|
||||
Query query = new MatchAllDocsQuery();
|
||||
|
@ -107,15 +108,16 @@
|
|||
<pre class="prettyprint">
|
||||
Collator collator = Collator.getInstance(new Locale("tr", "TR"));
|
||||
collator.setStrength(Collator.PRIMARY);
|
||||
Analyzer analyzer = new CollationKeyAnalyzer(Version.LUCENE_40, collator);
|
||||
Analyzer analyzer = new CollationKeyAnalyzer(version, collator);
|
||||
RAMDirectory ramDir = new RAMDirectory();
|
||||
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_40, analyzer));
|
||||
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(version, analyzer));
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("contents", "DIGY", Field.Store.NO, Field.Index.ANALYZED));
|
||||
doc.add(new TextField("contents", "DIGY", Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
QueryParser parser = new QueryParser(Version.LUCENE_40, "contents", analyzer);
|
||||
IndexReader ir = DirectoryReader.open(ramDir);
|
||||
IndexSearcher is = new IndexSearcher(ir);
|
||||
QueryParser parser = new QueryParser(version, "contents", analyzer);
|
||||
Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
|
||||
ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
|
||||
assertEquals("The index Term should be included.", 1, result.length);
|
||||
|
|
|
@ -34,10 +34,11 @@ For benchmarking TREC collections with TREC QRels, take a look at the
|
|||
<p>
|
||||
Here is a sample code used to run the TREC 2006 queries 701-850 on the .Gov2 collection:
|
||||
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
File topicsFile = new File("topics-701-850.txt");
|
||||
File qrelsFile = new File("qrels-701-850.txt");
|
||||
Searcher searcher = new IndexSearcher("index");
|
||||
IndexReader ir = DirectoryReader.open(directory):
|
||||
IndexSearcher searcher = new IndexSearcher(ir);
|
||||
|
||||
int maxResults = 1000;
|
||||
String docNameField = "docname";
|
||||
|
|
|
@ -47,21 +47,17 @@ import org.apache.lucene.util.NumericUtils;
|
|||
*
|
||||
* <p>Here's an example usage, for an <code>int</code> field:
|
||||
*
|
||||
* <pre>
|
||||
* Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value));
|
||||
* field.setOmitNorms(true);
|
||||
* field.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
* <pre class="prettyprint">
|
||||
* Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value), IntField.TYPE_NOT_STORED);
|
||||
* document.add(field);
|
||||
* </pre>
|
||||
*
|
||||
* <p>For optimal performance, re-use the TokenStream and Field instance
|
||||
* for more than one document:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* NumericTokenStream stream = new NumericTokenStream(precisionStep);
|
||||
* Field field = new Field(name, stream);
|
||||
* field.setOmitNorms(true);
|
||||
* field.setIndexOptions(IndexOptions.DOCS_ONLY);
|
||||
* Field field = new Field(name, stream, IntField.TYPE_NOT_STORED);
|
||||
* Document document = new Document();
|
||||
* document.add(field);
|
||||
*
|
||||
|
|
|
@ -79,28 +79,28 @@ import org.apache.lucene.util.BytesRef;
|
|||
<p>Typical Token reuse patterns:
|
||||
<ul>
|
||||
<li> Copying text from a string (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
return reusableToken.reinit(string, startOffset, endOffset[, type]);
|
||||
</pre>
|
||||
</li>
|
||||
<li> Copying some text from a string (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
return reusableToken.reinit(string, 0, string.length(), startOffset, endOffset[, type]);
|
||||
</pre>
|
||||
</li>
|
||||
</li>
|
||||
<li> Copying text from char[] buffer (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
return reusableToken.reinit(buffer, 0, buffer.length, startOffset, endOffset[, type]);
|
||||
</pre>
|
||||
</li>
|
||||
<li> Copying some text from a char[] buffer (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
return reusableToken.reinit(buffer, start, end - start, startOffset, endOffset[, type]);
|
||||
</pre>
|
||||
</li>
|
||||
<li> Copying from one one Token to another (type is reset to {@link #DEFAULT_TYPE} if not specified):<br/>
|
||||
<pre>
|
||||
<pre class="prettyprint">
|
||||
return reusableToken.reinit(source.buffer(), 0, source.length(), source.startOffset(), source.endOffset()[, source.type()]);
|
||||
</pre>
|
||||
</li>
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document byte value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new ByteDocValuesField(name, (byte) 22));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
* are (mostly) unique it's better to use {@link
|
||||
* StraightBytesDocValuesField}. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new DerefBytesDocValuesField(name, new BytesRef("hello")));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document double value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new DoubleDocValuesField(name, 22.0));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -29,14 +29,14 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* This class provides a {@link Field} that enables indexing of double values
|
||||
* for efficient range filtering and sorting. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new DoubleField(name, 6.0, Field.Store.NO));
|
||||
* </pre>
|
||||
*
|
||||
* For optimal performance, re-use the <code>DoubleField</code> and
|
||||
* {@link Document} instance for more than one document:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* DoubleField field = new DoubleField(name, 0.0, Field.Store.NO);
|
||||
* Document document = new Document();
|
||||
* document.add(field);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document float value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new FloatDocValuesField(name, 22f));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -29,14 +29,14 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* This class provides a {@link Field} that enables indexing of float values
|
||||
* for efficient range filtering and sorting. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new FloatField(name, 6.0F, Field.Store.NO));
|
||||
* </pre>
|
||||
*
|
||||
* For optimal performance, re-use the <code>FloatField</code> and
|
||||
* {@link Document} instance for more than one document:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* FloatField field = new FloatField(name, 0.0F, Field.Store.NO);
|
||||
* Document document = new Document();
|
||||
* document.add(field);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document int value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new IntDocValuesField(name, 22));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -29,14 +29,14 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* This class provides a {@link Field} that enables indexing of integer values
|
||||
* for efficient range filtering and sorting. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new IntField(name, 6, Field.Store.NO));
|
||||
* </pre>
|
||||
*
|
||||
* For optimal performance, re-use the <code>IntField</code> and
|
||||
* {@link Document} instance for more than one document:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* IntField field = new IntField(name, 6, Field.Store.NO);
|
||||
* Document document = new Document();
|
||||
* document.add(field);
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document long value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new LongDocValuesField(name, 22L));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -29,14 +29,14 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* This class provides a {@link Field} that enables indexing of long values
|
||||
* for efficient range filtering and sorting. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new LongField(name, 6L, Field.Store.NO));
|
||||
* </pre>
|
||||
*
|
||||
* For optimal performance, re-use the <code>LongField</code> and
|
||||
* {@link Document} instance for more than one document:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* LongField field = new LongField(name, 0L, Field.Store.NO);
|
||||
* Document document = new Document();
|
||||
* document.add(field);
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.index.AtomicReader; // javadocs
|
|||
* in RAM (when loaded via {@link AtomicReader#docValues})
|
||||
* using packed ints. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new PackedLongDocValuesField(name, 22L));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.lucene.index.DocValues;
|
|||
* of a per-document short value for scoring, sorting or value retrieval. Here's an
|
||||
* example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new ShortDocValuesField(name, (short) 22));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
* of a per-document {@link BytesRef} value, indexed for
|
||||
* sorting. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new SortedBytesDocValuesField(name, new BytesRef("hello")));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
* field. If values may be shared it's better to use {@link
|
||||
* DerefBytesDocValuesField}. Here's an example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* document.add(new StraightBytesDocValuesField(name, new BytesRef("hello")));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -210,7 +210,7 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
|||
* Returns true if verbosing is enabled. This method is usually used in
|
||||
* conjunction with {@link #message(String)}, like that:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* if (verbose()) {
|
||||
* message("your message");
|
||||
* }
|
||||
|
|
|
@ -796,7 +796,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit {
|
|||
* docs in the IndexWriter instance) then you can do
|
||||
* something like this:</p>
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* try {
|
||||
* writer.close();
|
||||
* } finally {
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.lucene.util.Version;
|
|||
* All setter methods return {@link IndexWriterConfig} to allow chaining
|
||||
* settings conveniently, for example:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* IndexWriterConfig conf = new IndexWriterConfig(analyzer);
|
||||
* conf.setter1().setter2();
|
||||
* </pre>
|
||||
|
|
|
@ -156,7 +156,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
|
|||
|
||||
/** Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
|
||||
* make it possible to do:
|
||||
* <pre>for (BooleanClause clause : booleanQuery) {}</pre>
|
||||
* <pre class="prettyprint">for (BooleanClause clause : booleanQuery) {}</pre>
|
||||
*/
|
||||
public final Iterator<BooleanClause> iterator() { return clauses().iterator(); }
|
||||
|
||||
|
|
|
@ -80,8 +80,8 @@ import org.apache.lucene.index.IndexReaderContext;
|
|||
* a simple example showing how to collect docIDs into a
|
||||
* BitSet:</p>
|
||||
*
|
||||
* <pre>
|
||||
* Searcher searcher = new IndexSearcher(indexReader);
|
||||
* <pre class="prettyprint">
|
||||
* IndexSearcher searcher = new IndexSearcher(indexReader);
|
||||
* final BitSet bits = new BitSet(indexReader.maxDoc());
|
||||
* searcher.search(query, new Collector() {
|
||||
* private int docBase;
|
||||
|
|
|
@ -72,7 +72,7 @@ public abstract class DocIdSetIterator {
|
|||
* <p>
|
||||
* When <code> target > current</code> it behaves as if written:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* int advance(int target) {
|
||||
* int doc;
|
||||
* while ((doc = nextDoc()) < target) {
|
||||
|
|
|
@ -30,8 +30,8 @@ final class HitQueue extends PriorityQueue<ScoreDoc> {
|
|||
* <b>NOTE:</b> in case <code>prePopulate</code> is true, you should pop
|
||||
* elements from the queue using the following code example:
|
||||
*
|
||||
* <pre>
|
||||
* PriorityQueue pq = new HitQueue(10, true); // pre-populate.
|
||||
* <pre class="prettyprint">
|
||||
* PriorityQueue<ScoreDoc> pq = new HitQueue(10, true); // pre-populate.
|
||||
* ScoreDoc top = pq.top();
|
||||
*
|
||||
* // Add/Update one element.
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
*
|
||||
* <p> Typical usage looks like this:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* ... open your own writer ...
|
||||
*
|
||||
* NRTManager manager = new NRTManager(writer);
|
||||
|
@ -45,7 +45,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
*
|
||||
* Then, for each incoming query, do this:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* // For each incoming query:
|
||||
* IndexSearcher searcher = manager.get();
|
||||
* try {
|
||||
|
@ -58,7 +58,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
* You should make changes using the <code>NRTManager</code>; if you later need to obtain
|
||||
* a searcher reflecting those changes:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* // ... or updateDocument, deleteDocuments, etc:
|
||||
* long gen = manager.addDocument(...);
|
||||
*
|
||||
|
@ -73,7 +73,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
*
|
||||
*
|
||||
* When you are done be sure to close both the manager and the reopen thrad:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* reopenThread.close();
|
||||
* manager.close();
|
||||
* </pre>
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.lucene.util.NumericUtils; // for javadocs
|
|||
* <p>You create a new NumericRangeFilter with the static
|
||||
* factory methods, eg:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Filter f = NumericRangeFilter.newFloatRange("weight", 0.03f, 0.10f, true, true);
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.lucene.index.Term; // for javadocs
|
|||
* <p>You create a new NumericRangeQuery with the static
|
||||
* factory methods, eg:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Query q = NumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
*
|
||||
* Use it like this:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* SearcherLifetimeManager mgr = new SearcherLifetimeManager();
|
||||
* </pre>
|
||||
*
|
||||
|
@ -44,7 +44,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
* using {@link SearcherManager} or {@link NRTManager}), and
|
||||
* then record this searcher:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* // Record the current searcher, and save the returend
|
||||
* // token into user's search results (eg as a hidden
|
||||
* // HTML form field):
|
||||
|
@ -55,7 +55,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
* clicks next page, drills down/up, etc., take the token
|
||||
* that you saved from the previous search and:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* // If possible, obtain the same searcher as the last
|
||||
* // search:
|
||||
* IndexSearcher searcher = mgr.acquire(token);
|
||||
|
@ -78,7 +78,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
* that's periodically reopening your searchers, you should
|
||||
* periodically prune old searchers:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* mgr.prune(new PruneByAge(600.0));
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -86,7 +86,7 @@ public class TimeLimitingCollector extends Collector {
|
|||
* set the baseline through this method in your prelude.
|
||||
* <p>
|
||||
* Example usage:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Counter clock = ...;
|
||||
* long baseline = clock.get();
|
||||
* // ... prepare search
|
||||
|
|
|
@ -597,7 +597,7 @@ public abstract class TFIDFSimilarity extends Similarity {
|
|||
* <p>
|
||||
* The default implementation uses:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* idf(docFreq, searcher.maxDoc());
|
||||
* </pre>
|
||||
*
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.apache.lucene.util.ToStringUtils;
|
|||
*
|
||||
* <p>a SpanNearQuery with a slop of 0 can be applied across two
|
||||
* {@link SpanTermQuery} objects as follows:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* SpanQuery q1 = new SpanTermQuery(new Term("studentfirstname", "james"));
|
||||
* SpanQuery q2 = new SpanTermQuery(new Term("studentsurname", "jones"));
|
||||
* SpanQuery q2m = new FieldMaskingSpanQuery(q2, "studentfirstname");
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.apache.lucene.util.Bits;
|
|||
* the expanded terms, but this can be customized.
|
||||
* <p>
|
||||
* Example:
|
||||
* <blockquote><pre>
|
||||
* <blockquote><pre class="prettyprint">
|
||||
* {@code
|
||||
* WildcardQuery wildcard = new WildcardQuery(new Term("field", "bro?n"));
|
||||
* SpanQuery spanWildcard = new SpanMultiTermQueryWrapper<WildcardQuery>(wildcard);
|
||||
|
|
|
@ -30,7 +30,7 @@ public abstract class Spans {
|
|||
|
||||
/** Skips to the first match beyond the current, whose document number is
|
||||
* greater than or equal to <i>target</i>. <p>Returns true iff there is such
|
||||
* a match. <p>Behaves as if written: <pre>
|
||||
* a match. <p>Behaves as if written: <pre class="prettyprint">
|
||||
* boolean skipTo(int target) {
|
||||
* do {
|
||||
* if (!next())
|
||||
|
|
|
@ -180,7 +180,7 @@ public abstract class Directory implements Closeable {
|
|||
* If you want to copy the entire source directory to the destination one, you
|
||||
* can do so like this:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Directory to; // the directory to copy to
|
||||
* for (String file : dir.listAll()) {
|
||||
* dir.copy(to, file, newFile); // newFile can be either file, or a new name
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.lucene.util.ThreadInterruptedException;
|
|||
import java.io.IOException;
|
||||
|
||||
/** An interprocess mutex lock.
|
||||
* <p>Typical use might look like:<pre>
|
||||
* <p>Typical use might look like:<pre class="prettyprint">
|
||||
* new Lock.With(directory.makeLock("my.lock")) {
|
||||
* public Object doBody() {
|
||||
* <i>... code to execute while locked ...</i>
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
*
|
||||
* <p>Here's a simple example usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Directory fsDir = FSDirectory.open(new File("/path/to/index"));
|
||||
* NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
|
||||
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_32, analyzer);
|
||||
|
|
|
@ -75,7 +75,7 @@ public abstract class AttributeImpl implements Cloneable, Attribute {
|
|||
* implementations.
|
||||
*
|
||||
* <p>Custom implementations look like this (e.g. for a combined attribute implementation):
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* public void reflectWith(AttributeReflector reflector) {
|
||||
* reflector.reflect(CharTermAttribute.class, "term", term());
|
||||
* reflector.reflect(PositionIncrementAttribute.class, "positionIncrement", getPositionIncrement());
|
||||
|
|
|
@ -130,7 +130,7 @@ public final class BytesRef implements Comparable<BytesRef>,Cloneable {
|
|||
|
||||
/** Calculates the hash code as required by TermsHash during indexing.
|
||||
* <p>It is defined as:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* int hash = 0;
|
||||
* for (int i = offset; i < offset + length; i++) {
|
||||
* hash = 31*hash + bytes[i];
|
||||
|
|
|
@ -281,7 +281,7 @@ public final class BytesRefHash {
|
|||
* <p>
|
||||
* Hashcode is defined as:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* int hash = 0;
|
||||
* for (int i = offset; i < offset + length; i++) {
|
||||
* hash = 31 * hash + bytes[i];
|
||||
|
|
|
@ -56,7 +56,7 @@ public final class IOUtils {
|
|||
* may be null, they are ignored. After everything is closed, method either throws <tt>priorException</tt>,
|
||||
* if one is supplied, or the first of suppressed exceptions, or completes normally.</p>
|
||||
* <p>Sample usage:<br/>
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* Closeable resource1 = null, resource2 = null, resource3 = null;
|
||||
* ExpectedException priorE = null;
|
||||
* try {
|
||||
|
|
|
@ -101,7 +101,7 @@ public abstract class PriorityQueue<T> {
|
|||
* If this method is extended to return a non-null value, then the following
|
||||
* usage pattern is recommended:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* // extends getSentinelObject() to return a non-null value.
|
||||
* PriorityQueue<MyObject> pq = new MyQueue<MyObject>(numHits);
|
||||
* // save the 'top' element, which is guaranteed to not be null.
|
||||
|
@ -191,14 +191,14 @@ public abstract class PriorityQueue<T> {
|
|||
* Should be called when the Object at top changes values. Still log(n) worst
|
||||
* case, but it's at least twice as fast to
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* pq.top().change();
|
||||
* pq.updateTop();
|
||||
* </pre>
|
||||
*
|
||||
* instead of
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* o = pq.pop();
|
||||
* o.change();
|
||||
* pq.push(o);
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.Set;
|
|||
* The cost of reflection is minimized by the following usage of this class:</p>
|
||||
* <p>Define <strong>static final</strong> fields in the base class ({@code BaseClass}),
|
||||
* where the old and new method are declared:</p>
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* static final VirtualMethod<BaseClass> newMethod =
|
||||
* new VirtualMethod<BaseClass>(BaseClass.class, "newName", parameters...);
|
||||
* static final VirtualMethod<BaseClass> oldMethod =
|
||||
|
@ -41,7 +41,7 @@ import java.util.Set;
|
|||
* If you try to create a second instance of for the same method/{@code baseClass} combination, an exception is thrown.
|
||||
* <p>To detect if e.g. the old method was overridden by a more far subclass on the inheritance path to the current
|
||||
* instance's class, use a <strong>non-static</strong> field:</p>
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* final boolean isDeprecatedMethodOverridden =
|
||||
* oldMethod.getImplementationDistance(this.getClass()) > newMethod.getImplementationDistance(this.getClass());
|
||||
*
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util.encoding.IntDecoder;
|
|||
* The iterator then consumes the payload information of each document and
|
||||
* decodes it into categories. A typical use case of this class is:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* IndexReader reader = [open your reader];
|
||||
* Term t = new Term("field", "where-payload-exists");
|
||||
* CategoryListIterator cli = new PayloadIntDecodingIterator(reader, t);
|
||||
|
|
|
@ -44,7 +44,7 @@ import java.io.OutputStream;
|
|||
* <p>
|
||||
* For the reasons mentioned above, these encoders are usually chained with
|
||||
* {@link UniqueValuesIntEncoder} and {@link DGapIntEncoder} in the following
|
||||
* manner: <code><pre>
|
||||
* manner: <code><pre class="prettyprint">
|
||||
* IntEncoder fourFlags =
|
||||
* new SortingEncoderFilter(new UniqueValuesIntEncoder(new DGapIntEncoder(new FlagsIntEncoderImpl())));
|
||||
* </code></pre>
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.io.OutputStream;
|
|||
* An abstract implementation of {@link IntEncoder} which is served as a filter
|
||||
* on the values to encode. An encoder filter wraps another {@link IntEncoder}
|
||||
* which does the actual encoding. This allows for chaining filters and
|
||||
* encoders, such as: <code><pre>
|
||||
* encoders, such as: <code><pre class="prettyprint">
|
||||
* new UniqueValuesIntEncoder(new DGapIntEncoder(new VInt8IntEnoder()));
|
||||
* {@link UniqueValuesIntEncoder} followed by {@link DGapIntEncoder}
|
||||
</pre></code>
|
||||
|
|
|
@ -36,7 +36,7 @@ encoders buffer values in-memory and encode in batches in order to
|
|||
optimize the encoding, and not closing them may result in loss of
|
||||
information or corrupt stream.
|
||||
<p>A proper and typical usage of an encoder looks like this:
|
||||
<blockquote><pre><code>
|
||||
<blockquote><pre class="prettyprint"><code>
|
||||
int[] data = <the values to encode>
|
||||
IntEncoder encoder = new VInt8IntEncoder();
|
||||
OutputStream out = new ByteArrayOutputStream();
|
||||
|
@ -59,7 +59,7 @@ As mentioned above, not all encoders have a matching decoder (like some
|
|||
encoder filters which are explained next), however every encoder should
|
||||
return a decoder following a call to that method. To complete the
|
||||
example above, one can easily iterate over the decoded values like this:
|
||||
<blockquote><pre><code>
|
||||
<blockquote><pre class="prettyprint"><code>
|
||||
IntDecoder d = e.createMatchingDecoder();
|
||||
d.reInit(new ByteArrayInputStream(bytes));
|
||||
long val;
|
||||
|
@ -94,7 +94,7 @@ in scenarios where an application uses different encoders for different
|
|||
streams, and wants to manage some sort of mapping between an encoder ID
|
||||
to an IntEncoder/Decoder implementation, so a proper decoder will be
|
||||
initialized on the fly:
|
||||
<blockquote><pre><code>
|
||||
<blockquote><pre class="prettyprint"><code>
|
||||
public class TaggingIntEncoder extends IntEncoderFilter {
|
||||
|
||||
public TaggingIntEncoder(IntEncoder encoder) {
|
||||
|
@ -128,7 +128,7 @@ public class TaggingIntEncoder extends IntEncoderFilter {
|
|||
}
|
||||
</code></pre></blockquote>
|
||||
And the matching decoder:
|
||||
<blockquote><pre><code>
|
||||
<blockquote><pre class="prettyprint"><code>
|
||||
public class TaggingIntDecoder extends IntDecoder {
|
||||
|
||||
// Will be initialized upon calling reInit.
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseIn
|
|||
* An implementation class of {@link FragListBuilder} that generates one {@link WeightedFragInfo} object.
|
||||
* Typical use case of this class is that you can get an entire field contents
|
||||
* by using both of this class and {@link SimpleFragmentsBuilder}.<br/>
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* FastVectorHighlighter h = new FastVectorHighlighter( true, true,
|
||||
* new SingleFragListBuilder(), new SimpleFragmentsBuilder() );
|
||||
* </pre>
|
||||
|
|
|
@ -78,7 +78,7 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
* numbers of queries over comparatively small transient realtime data (prospective
|
||||
* search).
|
||||
* For example as in
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* float score = search(String text, Query query)
|
||||
* </pre>
|
||||
* <p>
|
||||
|
@ -114,13 +114,12 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
*
|
||||
* <h4>Example Usage</h4>
|
||||
*
|
||||
* <pre>
|
||||
* Analyzer analyzer = PatternAnalyzer.DEFAULT_ANALYZER;
|
||||
* //Analyzer analyzer = new SimpleAnalyzer();
|
||||
* <pre class="prettyprint">
|
||||
* Analyzer analyzer = new SimpleAnalyzer(version);
|
||||
* MemoryIndex index = new MemoryIndex();
|
||||
* index.addField("content", "Readings about Salmons and other select Alaska fishing Manuals", analyzer);
|
||||
* index.addField("author", "Tales of James", analyzer);
|
||||
* QueryParser parser = new QueryParser("content", analyzer);
|
||||
* QueryParser parser = new QueryParser(version, "content", analyzer);
|
||||
* float score = index.search(parser.parse("+author:james +salmon~ +fish* manual~"));
|
||||
* if (score > 0.0f) {
|
||||
* System.out.println("it's a match");
|
||||
|
@ -133,7 +132,7 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
*
|
||||
* <h4>Example XQuery Usage</h4>
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* (: An XQuery that finds all books authored by James that have something to do with "salmon fishing manuals", sorted by relevance :)
|
||||
* declare namespace lucene = "java:nux.xom.pool.FullTextUtil";
|
||||
* declare variable $query := "+salmon~ +fish* manual~"; (: any arbitrary Lucene query can go here :)
|
||||
|
@ -149,7 +148,7 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
*
|
||||
* An instance can be queried multiple times with the same or different queries,
|
||||
* but an instance is not thread-safe. If desired use idioms such as:
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* MemoryIndex index = ...
|
||||
* synchronized (index) {
|
||||
* // read and/or write index (i.e. add fields and/or query)
|
||||
|
|
|
@ -129,7 +129,7 @@ public class BooleanFilter extends Filter implements Iterable<FilterClause> {
|
|||
|
||||
/** Returns an iterator on the clauses in this query. It implements the {@link Iterable} interface to
|
||||
* make it possible to do:
|
||||
* <pre>for (FilterClause clause : booleanFilter) {}</pre>
|
||||
* <pre class="prettyprint">for (FilterClause clause : booleanFilter) {}</pre>
|
||||
*/
|
||||
public final Iterator<FilterClause> iterator() {
|
||||
return clauses().iterator();
|
||||
|
|
|
@ -253,8 +253,9 @@ public class MultiFieldQueryParser extends QueryParser
|
|||
* Parses a query, searching on the fields specified.
|
||||
* Use this if you need to specify certain fields as required,
|
||||
* and others as prohibited.
|
||||
* <p><pre>
|
||||
* <p>
|
||||
* Usage:
|
||||
* <pre class="prettyprint">
|
||||
* <code>
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
|
||||
|
@ -300,8 +301,9 @@ public class MultiFieldQueryParser extends QueryParser
|
|||
* Parses a query, searching on the fields specified.
|
||||
* Use this if you need to specify certain fields as required,
|
||||
* and others as prohibited.
|
||||
* <p><pre>
|
||||
* <p>
|
||||
* Usage:
|
||||
* <pre class="prettyprint">
|
||||
* <code>
|
||||
* String[] query = {"query1", "query2", "query3"};
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax.Type
|
|||
* <p>
|
||||
* Example how the text parser creates these objects:
|
||||
* </p>
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* List values = ArrayList();
|
||||
* values.add(new PathQueryNode.QueryText("company", 1, 7));
|
||||
* values.add(new PathQueryNode.QueryText("USA", 9, 12));
|
||||
|
|
|
@ -35,9 +35,9 @@ final public class QueryParserUtil {
|
|||
* If x fields are specified, this effectively constructs:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* <code>
|
||||
* (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
|
||||
* </code>
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param queries
|
||||
|
@ -75,23 +75,23 @@ final public class QueryParserUtil {
|
|||
* specify certain fields as required, and others as prohibited.
|
||||
* <p>
|
||||
*
|
||||
* <pre>
|
||||
* Usage:
|
||||
* <code>
|
||||
* <pre class="prettyprint">
|
||||
* <code>
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
|
||||
* BooleanClause.Occur.MUST,
|
||||
* BooleanClause.Occur.MUST_NOT};
|
||||
* MultiFieldQueryParser.parse("query", fields, flags, analyzer);
|
||||
* </code>
|
||||
* </code>
|
||||
* </pre>
|
||||
*<p>
|
||||
* The code above would construct a query:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* <code>
|
||||
* (filename:query) +(contents:query) -(description:query)
|
||||
* </code>
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param query
|
||||
|
@ -131,24 +131,24 @@ final public class QueryParserUtil {
|
|||
* specify certain fields as required, and others as prohibited.
|
||||
* <p>
|
||||
*
|
||||
* <pre>
|
||||
* Usage:
|
||||
* <code>
|
||||
* <pre class="prettyprint">
|
||||
* <code>
|
||||
* String[] query = {"query1", "query2", "query3"};
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
|
||||
* BooleanClause.Occur.MUST,
|
||||
* BooleanClause.Occur.MUST_NOT};
|
||||
* MultiFieldQueryParser.parse(query, fields, flags, analyzer);
|
||||
* </code>
|
||||
* </code>
|
||||
* </pre>
|
||||
*<p>
|
||||
* The code above would construct a query:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* <code>
|
||||
* (filename:query1) +(contents:query2) -(description:query3)
|
||||
* </code>
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param queries
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.apache.lucene.util.Version;
|
|||
*
|
||||
* <p>Example Usage:
|
||||
*
|
||||
* <pre>
|
||||
* <pre class="prettyprint">
|
||||
* SpellChecker spellchecker = new SpellChecker(spellIndexDirectory);
|
||||
* // To index a field of a user index:
|
||||
* spellchecker.indexDictionary(new LuceneDictionary(my_lucene_reader, a_field));
|
||||
|
|
Loading…
Reference in New Issue