mirror of https://github.com/apache/lucene.git
LUCENE-2781: drop deprecations from trunk
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1040463 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
14305d9e3c
commit
a58c26978f
|
@ -1,38 +0,0 @@
|
|||
package org.apache.lucene.benchmark.byTask.feeds;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.benchmark.byTask.utils.Config;
|
||||
|
||||
/**
|
||||
* A {@link DocMaker} which reads the English Wikipedia dump. Uses
|
||||
* {@link EnwikiContentSource} as its content source, regardless if a different
|
||||
* content source was defined in the configuration.
|
||||
* @deprecated Please use {@link DocMaker} instead, with content.source=EnwikiContentSource
|
||||
*/
|
||||
@Deprecated
|
||||
public class EnwikiDocMaker extends DocMaker {
|
||||
@Override
|
||||
public void setConfig(Config config) {
|
||||
super.setConfig(config);
|
||||
// Override whatever content source was set in the config
|
||||
source = new EnwikiContentSource();
|
||||
source.setConfig(config);
|
||||
System.out.println("NOTE: EnwikiDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=EnwikiContentSource");
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
package org.apache.lucene.benchmark.byTask.feeds;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.benchmark.byTask.utils.Config;
|
||||
|
||||
/**
|
||||
* A DocMaker reading one line at a time as a Document from a single file. This
|
||||
* saves IO cost (over DirContentSource) of recursing through a directory and
|
||||
* opening a new file for every document. It also re-uses its Document and Field
|
||||
* instance to improve indexing speed.<br>
|
||||
* The expected format of each line is (arguments are separated by <TAB>):
|
||||
* <i>title, date, body</i>. If a line is read in a different format, a
|
||||
* {@link RuntimeException} will be thrown. In general, you should use this doc
|
||||
* maker with files that were created with
|
||||
* {@link org.apache.lucene.benchmark.byTask.tasks.WriteLineDocTask}.<br>
|
||||
* <br>
|
||||
* Config properties:
|
||||
* <ul>
|
||||
* <li>doc.random.id.limit=N (default -1) -- create random docid in the range
|
||||
* 0..N; this is useful with UpdateDoc to test updating random documents; if
|
||||
* this is unspecified or -1, then docid is sequentially assigned
|
||||
* </ul>
|
||||
* @deprecated Please use {@link DocMaker} instead, with content.source=LineDocSource
|
||||
*/
|
||||
@Deprecated
|
||||
public class LineDocMaker extends DocMaker {
|
||||
@Override
|
||||
public void setConfig(Config config) {
|
||||
super.setConfig(config);
|
||||
source = new LineDocSource();
|
||||
source.setConfig(config);
|
||||
System.out.println("NOTE: LineDocMaker is deprecated; please use DocMaker instead (which is the default if you don't specify doc.maker) with content.source=LineDocSource");
|
||||
}
|
||||
}
|
|
@ -21,12 +21,7 @@ import java.io.ByteArrayInputStream;
|
|||
import java.io.DataInputStream;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
@ -195,6 +190,10 @@ public class JEDirectory extends Directory {
|
|||
return new JEIndexInput(this, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Lock makeLock(String name) {
|
||||
return new JELock();
|
||||
|
|
|
@ -21,12 +21,7 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.Collections;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.Lock;
|
||||
|
@ -211,6 +206,10 @@ public class DbDirectory extends Directory {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexInput openInput(String name)
|
||||
throws IOException
|
||||
{
|
||||
|
|
|
@ -204,7 +204,7 @@ public class SimpleCharStream
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndColumn
|
||||
*/
|
||||
|
||||
|
@ -213,7 +213,7 @@ public class SimpleCharStream
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndLine
|
||||
*/
|
||||
|
||||
|
|
|
@ -37,17 +37,7 @@ public class TextFragment
|
|||
this.textStartPos = textStartPos;
|
||||
this.fragNum = fragNum;
|
||||
}
|
||||
/**
|
||||
* @deprecated Use {@link #TextFragment(CharSequence, int, int)} instead.
|
||||
* This constructor will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public TextFragment(StringBuffer markedUpText,int textStartPos, int fragNum)
|
||||
{
|
||||
this.markedUpText=markedUpText;
|
||||
this.textStartPos = textStartPos;
|
||||
this.fragNum = fragNum;
|
||||
}
|
||||
|
||||
void setScore(float score)
|
||||
{
|
||||
this.score=score;
|
||||
|
|
|
@ -107,25 +107,12 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder {
|
|||
return fragments.toArray( new String[fragments.size()] );
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected String[] getFieldValues( IndexReader reader, int docId, String fieldName) throws IOException {
|
||||
Document doc = reader.document( docId, new MapFieldSelector( new String[]{ fieldName } ) );
|
||||
return doc.getValues( fieldName ); // according to Document class javadoc, this never returns null
|
||||
}
|
||||
|
||||
protected Field[] getFields( IndexReader reader, int docId, String fieldName) throws IOException {
|
||||
// according to javadoc, doc.getFields(fieldName) cannot be used with lazy loaded field???
|
||||
Document doc = reader.document( docId, new MapFieldSelector( new String[]{ fieldName } ) );
|
||||
Document doc = reader.document( docId, new MapFieldSelector(fieldName) );
|
||||
return doc.getFields( fieldName ); // according to Document class javadoc, this never returns null
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected String makeFragment( StringBuilder buffer, int[] index, String[] values, WeightedFragInfo fragInfo ){
|
||||
final int s = fragInfo.startOffset;
|
||||
return makeFragment( fragInfo, getFragmentSource( buffer, index, values, s, fragInfo.endOffset ), s,
|
||||
preTags, postTags, NULL_ENCODER );
|
||||
}
|
||||
|
||||
protected String makeFragment( StringBuilder buffer, int[] index, Field[] values, WeightedFragInfo fragInfo,
|
||||
String[] preTags, String[] postTags, Encoder encoder ){
|
||||
final int s = fragInfo.startOffset;
|
||||
|
@ -151,18 +138,6 @@ public abstract class BaseFragmentsBuilder implements FragmentsBuilder {
|
|||
return fragment.toString();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected String getFragmentSource( StringBuilder buffer, int[] index, String[] values,
|
||||
int startOffset, int endOffset ){
|
||||
while( buffer.length() < endOffset && index[0] < values.length ){
|
||||
buffer.append( values[index[0]] );
|
||||
buffer.append( multiValuedSeparator );
|
||||
index[0]++;
|
||||
}
|
||||
int eo = buffer.length() < endOffset ? buffer.length() : endOffset;
|
||||
return buffer.substring( startOffset, eo );
|
||||
}
|
||||
|
||||
protected String getFragmentSource( StringBuilder buffer, int[] index, Field[] values,
|
||||
int startOffset, int endOffset ){
|
||||
while( buffer.length() < endOffset && index[0] < values.length ){
|
||||
|
|
|
@ -70,12 +70,7 @@ import org.apache.lucene.search.WildcardQuery;
|
|||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.highlight.SynonymTokenizer.TestHighlightRunner;
|
||||
import org.apache.lucene.search.regex.RegexQuery;
|
||||
import org.apache.lucene.search.regex.SpanRegexQuery;
|
||||
import org.apache.lucene.search.spans.SpanNearQuery;
|
||||
import org.apache.lucene.search.spans.SpanNotQuery;
|
||||
import org.apache.lucene.search.spans.SpanOrQuery;
|
||||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.search.spans.*;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.automaton.BasicAutomata;
|
||||
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
|
@ -300,8 +295,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
}
|
||||
|
||||
public void testSpanRegexQuery() throws Exception {
|
||||
query = new SpanOrQuery(new SpanQuery [] {
|
||||
new SpanRegexQuery(new Term(FIELD_NAME, "ken.*")) });
|
||||
query = new SpanOrQuery(new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(new Term(FIELD_NAME, "ken.*"))));
|
||||
searcher = new IndexSearcher(ramDir, true);
|
||||
hits = searcher.search(query, 100);
|
||||
int maxNumFragmentsRequired = 2;
|
||||
|
@ -698,8 +692,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
|
||||
int maxNumFragmentsRequired = 2;
|
||||
String fragmentSeparator = "...";
|
||||
QueryScorer scorer = null;
|
||||
TokenStream tokenStream = null;
|
||||
QueryScorer scorer;
|
||||
TokenStream tokenStream;
|
||||
|
||||
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
|
||||
|
||||
|
@ -726,8 +720,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
|
||||
int maxNumFragmentsRequired = 2;
|
||||
String fragmentSeparator = "...";
|
||||
QueryScorer scorer = null;
|
||||
TokenStream tokenStream = null;
|
||||
QueryScorer scorer;
|
||||
TokenStream tokenStream;
|
||||
|
||||
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
|
||||
|
||||
|
@ -754,8 +748,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
String text = searcher.doc(hits.scoreDocs[i].doc).get(HighlighterTest.FIELD_NAME);
|
||||
int maxNumFragmentsRequired = 2;
|
||||
String fragmentSeparator = "...";
|
||||
QueryScorer scorer = null;
|
||||
TokenStream tokenStream = null;
|
||||
QueryScorer scorer;
|
||||
TokenStream tokenStream;
|
||||
|
||||
tokenStream = analyzer.tokenStream(HighlighterTest.FIELD_NAME, new StringReader(text));
|
||||
|
||||
|
@ -820,8 +814,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
Highlighter hg = new Highlighter(new SimpleHTMLFormatter(), new QueryTermScorer(query));
|
||||
hg.setTextFragmenter(new NullFragmenter());
|
||||
|
||||
String match = null;
|
||||
match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
|
||||
String match = hg.getBestFragment(analyzer, "data", "help me [54-65]");
|
||||
assertEquals("<B>help</B> me [54-65]", match);
|
||||
|
||||
}
|
||||
|
@ -1133,7 +1126,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
|
||||
TermQuery query = new TermQuery(new Term("data", goodWord));
|
||||
|
||||
String match = null;
|
||||
String match;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(goodWord);
|
||||
for (int i = 0; i < 10000; i++) {
|
||||
|
@ -1246,8 +1239,7 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
public void run() throws Exception {
|
||||
doSearching("AnInvalidQueryWhichShouldYieldNoResults");
|
||||
|
||||
for (int i = 0; i < texts.length; i++) {
|
||||
String text = texts[i];
|
||||
for (String text : texts) {
|
||||
TokenStream tokenStream = analyzer.tokenStream(FIELD_NAME, new StringReader(text));
|
||||
Highlighter highlighter = getHighlighter(query, FIELD_NAME, tokenStream,
|
||||
HighlighterTest.this);
|
||||
|
@ -1716,8 +1708,8 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
|
|||
ramDir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(ramDir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET, true)));
|
||||
for (int i = 0; i < texts.length; i++) {
|
||||
addDoc(writer, texts[i]);
|
||||
for (String text : texts) {
|
||||
addDoc(writer, text);
|
||||
}
|
||||
Document doc = new Document();
|
||||
NumericField nfield = new NumericField(NUMERIC_FIELD_NAME, Store.YES, true);
|
||||
|
@ -1881,7 +1873,7 @@ final class SynonymTokenizer extends TokenStream {
|
|||
}
|
||||
|
||||
public Highlighter getHighlighter(Query query, String fieldName, TokenStream stream, Formatter formatter, boolean expanMultiTerm) {
|
||||
Scorer scorer = null;
|
||||
Scorer scorer;
|
||||
if (mode == QUERY) {
|
||||
scorer = new QueryScorer(query, fieldName);
|
||||
if(!expanMultiTerm) {
|
||||
|
|
|
@ -64,8 +64,6 @@ public class InstantiatedIndexWriter implements Closeable {
|
|||
|
||||
private PrintStream infoStream = null;
|
||||
|
||||
private int maxFieldLength = IndexWriter.DEFAULT_MAX_FIELD_LENGTH;
|
||||
|
||||
private final InstantiatedIndex index;
|
||||
private final Analyzer analyzer;
|
||||
|
||||
|
@ -431,9 +429,7 @@ public class InstantiatedIndexWriter implements Closeable {
|
|||
};
|
||||
|
||||
/**
|
||||
* Adds a document to this index. If the document contains more than
|
||||
* {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
|
||||
* discarded.
|
||||
* Adds a document to this index.
|
||||
*/
|
||||
public void addDocument(Document doc) throws IOException {
|
||||
addDocument(doc, getAnalyzer());
|
||||
|
@ -441,9 +437,7 @@ public class InstantiatedIndexWriter implements Closeable {
|
|||
|
||||
/**
|
||||
* Adds a document to this index, using the provided analyzer instead of the
|
||||
* value of {@link #getAnalyzer()}. If the document contains more than
|
||||
* {@link #setMaxFieldLength(int)} terms for a given field, the remainder are
|
||||
* discarded.
|
||||
* value of {@link #getAnalyzer()}.
|
||||
*
|
||||
* @param doc
|
||||
* @param analyzer
|
||||
|
@ -555,9 +549,6 @@ public class InstantiatedIndexWriter implements Closeable {
|
|||
}
|
||||
tokens.add(token); // the vector will be built on commit.
|
||||
fieldSetting.fieldLength++;
|
||||
if (fieldSetting.fieldLength > maxFieldLength) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
tokenStream.end();
|
||||
tokenStream.close();
|
||||
|
@ -666,14 +657,6 @@ public class InstantiatedIndexWriter implements Closeable {
|
|||
addDocument(doc, analyzer);
|
||||
}
|
||||
|
||||
public int getMaxFieldLength() {
|
||||
return maxFieldLength;
|
||||
}
|
||||
|
||||
public void setMaxFieldLength(int maxFieldLength) {
|
||||
this.maxFieldLength = maxFieldLength;
|
||||
}
|
||||
|
||||
public Similarity getSimilarity() {
|
||||
return similarity;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Date;
|
|||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.lucene.search.DefaultSimilarity;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
@ -52,19 +53,21 @@ public class FieldNormModifier {
|
|||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
if (args.length < 3) {
|
||||
System.err.println("Usage: FieldNormModifier <index> <package.SimilarityClassName | -n> <field1> [field2] ...");
|
||||
System.err.println("Usage: FieldNormModifier <index> <package.SimilarityClassName | -d> <field1> [field2] ...");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
Similarity s = null;
|
||||
if (!args[1].equals("-n")) {
|
||||
try {
|
||||
s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
if (args[1].equals("-d"))
|
||||
args[1] = DefaultSimilarity.class.getName();
|
||||
|
||||
try {
|
||||
s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
Directory d = FSDirectory.open(new File(args[0]));
|
||||
|
@ -142,11 +145,7 @@ public class FieldNormModifier {
|
|||
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (delDocs == null || !delDocs.get(d)) {
|
||||
if (sim == null) {
|
||||
subReader.setNorm(d, fieldName, Similarity.encodeNorm(1.0f));
|
||||
} else {
|
||||
subReader.setNorm(d, fieldName, sim.encodeNormValue(sim.lengthNorm(fieldName, termCounts[d])));
|
||||
}
|
||||
subReader.setNorm(d, fieldName, sim.encodeNormValue(sim.lengthNorm(fieldName, termCounts[d])));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ public class MultiPassIndexSplitter {
|
|||
new WhitespaceAnalyzer(Version.LUCENE_CURRENT))
|
||||
.setOpenMode(OpenMode.CREATE));
|
||||
System.err.println("Writing part " + (i + 1) + " ...");
|
||||
w.addIndexes(new IndexReader[]{input});
|
||||
w.addIndexes(input);
|
||||
w.close();
|
||||
}
|
||||
System.err.println("Done.");
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
package org.apache.lucene.misc;
|
||||
|
||||
/**
|
||||
* Copyright 2006 The Apache Software Foundation
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.search.Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Date;
|
||||
|
||||
/**
|
||||
* Given a directory, a Similarity, and a list of fields, updates the
|
||||
* fieldNorms in place for every document using the Similarity.lengthNorm.
|
||||
*
|
||||
* <p>
|
||||
* NOTE: This only works if you do <b>not</b> use field/document boosts in your
|
||||
* index.
|
||||
* </p>
|
||||
*
|
||||
* @version $Id$
|
||||
* @deprecated Use {@link org.apache.lucene.index.FieldNormModifier}
|
||||
*/
|
||||
@Deprecated
|
||||
public class LengthNormModifier {
|
||||
|
||||
/**
|
||||
* Command Line Execution method.
|
||||
*
|
||||
* <pre>
|
||||
* Usage: LengthNormModifier /path/index package.SimilarityClassName field1 field2 ...
|
||||
* </pre>
|
||||
*/
|
||||
public static void main(String[] args) throws IOException {
|
||||
if (args.length < 3) {
|
||||
System.err.println("Usage: LengthNormModifier <index> <package.SimilarityClassName> <field1> [field2] ...");
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
Similarity s = null;
|
||||
try {
|
||||
s = Class.forName(args[1]).asSubclass(Similarity.class).newInstance();
|
||||
} catch (Exception e) {
|
||||
System.err.println("Couldn't instantiate similarity with empty constructor: " + args[1]);
|
||||
e.printStackTrace(System.err);
|
||||
}
|
||||
|
||||
File index = new File(args[0]);
|
||||
Directory d = FSDirectory.open(index);
|
||||
|
||||
LengthNormModifier lnm = new LengthNormModifier(d, s);
|
||||
|
||||
for (int i = 2; i < args.length; i++) {
|
||||
System.out.print("Updating field: " + args[i] + " " + (new Date()).toString() + " ... ");
|
||||
lnm.reSetNorms(args[i]);
|
||||
System.out.println(new Date().toString());
|
||||
}
|
||||
|
||||
d.close();
|
||||
}
|
||||
|
||||
|
||||
private Directory dir;
|
||||
private Similarity sim;
|
||||
|
||||
/**
|
||||
* Constructor for code that wishes to use this class progaomatically.
|
||||
*
|
||||
* @param d The Directory to modify
|
||||
* @param s The Similarity to use in <code>reSetNorms</code>
|
||||
*/
|
||||
public LengthNormModifier(Directory d, Similarity s) {
|
||||
dir = d;
|
||||
sim = s;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resets the norms for the specified field.
|
||||
*
|
||||
* <p>
|
||||
* Opens a new IndexReader on the Directory given to this instance,
|
||||
* modifies the norms using the Similarity given to this instance,
|
||||
* and closes the IndexReader.
|
||||
* </p>
|
||||
*
|
||||
* @param field the field whose norms should be reset
|
||||
*/
|
||||
public void reSetNorms(String field) throws IOException {
|
||||
String fieldName = StringHelper.intern(field);
|
||||
int[] termCounts = new int[0];
|
||||
|
||||
IndexReader reader = IndexReader.open(dir, false);
|
||||
try {
|
||||
|
||||
termCounts = new int[reader.maxDoc()];
|
||||
Bits delDocs = MultiFields.getDeletedDocs(reader);
|
||||
DocsEnum docs = null;
|
||||
|
||||
Terms terms = MultiFields.getTerms(reader, field);
|
||||
if (terms != null) {
|
||||
TermsEnum termsEnum = terms.iterator();
|
||||
while(termsEnum.next() != null) {
|
||||
docs = termsEnum.docs(delDocs, docs);
|
||||
int doc;
|
||||
while ((doc = docs.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
|
||||
termCounts[doc] += docs.freq();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int d = 0; d < termCounts.length; d++) {
|
||||
if (!delDocs.get(d)) {
|
||||
byte norm = Similarity.encodeNorm(sim.lengthNorm(fieldName, termCounts[d]));
|
||||
reader.setNorm(d, fieldName, norm);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.close();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -106,27 +106,15 @@ public class BooleanFilter extends Filter
|
|||
}
|
||||
|
||||
if (res !=null)
|
||||
return finalResult(res, reader.maxDoc());
|
||||
return res;
|
||||
|
||||
return DocIdSet.EMPTY_DOCIDSET;
|
||||
}
|
||||
|
||||
/** Provide a SortedVIntList when it is definitely smaller
|
||||
* than an OpenBitSet.
|
||||
* @deprecated Either use CachingWrapperFilter, or
|
||||
* switch to a different DocIdSet implementation yourself.
|
||||
* This method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new FilterClause to the Boolean Filter container
|
||||
* @param filterClause A FilterClause object containing a Filter and an Occur parameter
|
||||
*/
|
||||
|
||||
public void add(FilterClause filterClause)
|
||||
{
|
||||
if (filterClause.getOccur().equals(Occur.MUST)) {
|
||||
|
|
|
@ -149,18 +149,6 @@ public class ChainedFilter extends Filter
|
|||
return result;
|
||||
}
|
||||
|
||||
/** Provide a SortedVIntList when it is definitely
|
||||
* smaller than an OpenBitSet
|
||||
* @deprecated Either use CachingWrapperFilter, or
|
||||
* switch to a different DocIdSet implementation yourself.
|
||||
* This method will be removed in Lucene 4.0
|
||||
**/
|
||||
@Deprecated
|
||||
protected final DocIdSet finalResult(OpenBitSetDISI result, int maxDocs) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Delegates to each filter in the chain.
|
||||
* @param reader IndexReader
|
||||
|
@ -175,7 +163,7 @@ public class ChainedFilter extends Filter
|
|||
{
|
||||
doChain(result, logic, chain[index[0]].getDocIdSet(reader));
|
||||
}
|
||||
return finalResult(result, reader.maxDoc());
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,7 +183,7 @@ public class ChainedFilter extends Filter
|
|||
{
|
||||
doChain(result, logic[index[0]], chain[index[0]].getDocIdSet(reader));
|
||||
}
|
||||
return finalResult(result, reader.maxDoc());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
package org.apache.lucene.search.regex;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
|
||||
/**
|
||||
* A SpanQuery version of {@link RegexQuery} allowing regular expression
|
||||
* queries to be nested within other SpanQuery subclasses.
|
||||
* @deprecated Use <code>new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery())</code> instead.
|
||||
* This query will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public class SpanRegexQuery extends SpanMultiTermQueryWrapper<RegexQuery> implements RegexQueryCapable {
|
||||
private final RegexCapabilities regexImpl = new JavaUtilRegexCapabilities();
|
||||
|
||||
public SpanRegexQuery(Term term) {
|
||||
super(new RegexQuery(term));
|
||||
}
|
||||
|
||||
public Term getTerm() { return query.getTerm(); }
|
||||
|
||||
public void setRegexImplementation(RegexCapabilities impl) {
|
||||
query.setRegexImplementation(impl);
|
||||
}
|
||||
|
||||
public RegexCapabilities getRegexImplementation() {
|
||||
return query.getRegexImplementation();
|
||||
}
|
||||
}
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.search.regex;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
@ -73,10 +74,10 @@ public class TestRegexQuery extends LuceneTestCase {
|
|||
}
|
||||
|
||||
private int spanRegexQueryNrHits(String regex1, String regex2, int slop, boolean ordered) throws Exception {
|
||||
SpanRegexQuery srq1 = new SpanRegexQuery( newTerm(regex1));
|
||||
SpanRegexQuery srq2 = new SpanRegexQuery( newTerm(regex2));
|
||||
SpanQuery srq1 = new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(newTerm(regex1)));
|
||||
SpanQuery srq2 = new SpanMultiTermQueryWrapper<RegexQuery>(new RegexQuery(newTerm(regex2)));
|
||||
SpanNearQuery query = new SpanNearQuery( new SpanQuery[]{srq1, srq2}, slop, ordered);
|
||||
|
||||
|
||||
return searcher.search(query, null, 1000).totalHits;
|
||||
}
|
||||
|
||||
|
|
|
@ -112,67 +112,6 @@ public class TestSpanRegexQuery extends LuceneTestCase {
|
|||
indexStoreB.close();
|
||||
}
|
||||
|
||||
/** remove in lucene 4.0 */
|
||||
@Deprecated
|
||||
public void testSpanRegexOld() throws Exception {
|
||||
Directory directory = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
Document doc = new Document();
|
||||
// doc.add(newField("field", "the quick brown fox jumps over the lazy dog",
|
||||
// Field.Store.NO, Field.Index.ANALYZED));
|
||||
// writer.addDocument(doc);
|
||||
// doc = new Document();
|
||||
doc.add(newField("field", "auto update", Field.Store.NO,
|
||||
Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newField("field", "first auto update", Field.Store.NO,
|
||||
Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
writer.optimize();
|
||||
writer.close();
|
||||
|
||||
IndexSearcher searcher = new IndexSearcher(directory, true);
|
||||
SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "aut.*"));
|
||||
SpanFirstQuery sfq = new SpanFirstQuery(srq, 1);
|
||||
// SpanNearQuery query = new SpanNearQuery(new SpanQuery[] {srq, stq}, 6,
|
||||
// true);
|
||||
int numHits = searcher.search(sfq, null, 1000).totalHits;
|
||||
assertEquals(1, numHits);
|
||||
searcher.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
/** remove in lucene 4.0 */
|
||||
@Deprecated
|
||||
public void testSpanRegexBugOld() throws CorruptIndexException, IOException {
|
||||
createRAMDirectories();
|
||||
|
||||
SpanRegexQuery srq = new SpanRegexQuery(new Term("field", "a.*"));
|
||||
SpanRegexQuery stq = new SpanRegexQuery(new Term("field", "b.*"));
|
||||
SpanNearQuery query = new SpanNearQuery(new SpanQuery[] { srq, stq }, 6,
|
||||
true);
|
||||
|
||||
// 1. Search the same store which works
|
||||
IndexSearcher[] arrSearcher = new IndexSearcher[2];
|
||||
arrSearcher[0] = new IndexSearcher(indexStoreA, true);
|
||||
arrSearcher[1] = new IndexSearcher(indexStoreB, true);
|
||||
MultiSearcher searcher = new MultiSearcher(arrSearcher);
|
||||
int numHits = searcher.search(query, null, 1000).totalHits;
|
||||
arrSearcher[0].close();
|
||||
arrSearcher[1].close();
|
||||
|
||||
// Will fail here
|
||||
// We expect 2 but only one matched
|
||||
// The rewriter function only write it once on the first IndexSearcher
|
||||
// So it's using term: a1 b1 to search on the second IndexSearcher
|
||||
// As a result, it won't match the document in the second IndexSearcher
|
||||
assertEquals(2, numHits);
|
||||
indexStoreA.close();
|
||||
indexStoreB.close();
|
||||
}
|
||||
|
||||
private void createRAMDirectories() throws CorruptIndexException,
|
||||
LockObtainFailedException, IOException {
|
||||
// creating a document to store
|
||||
|
|
|
@ -17,11 +17,7 @@ package org.apache.lucene.queryParser.core.processors;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.queryParser.core.QueryNodeException;
|
||||
import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
|
||||
|
@ -96,23 +92,6 @@ public class QueryNodeProcessorPipeline implements QueryNodeProcessor,
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a processor to the pipeline, it's always added to the end of the
|
||||
* pipeline.
|
||||
*
|
||||
* @deprecated this class now conforms to {@link List} interface, so use
|
||||
* {@link #add(QueryNodeProcessor)} instead
|
||||
*
|
||||
* @param processor the processor to be added
|
||||
*/
|
||||
@Deprecated
|
||||
public void addProcessor(QueryNodeProcessor processor) {
|
||||
this.processors.add(processor);
|
||||
|
||||
processor.setQueryConfigHandler(this.queryConfig);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* For reference about this method check:
|
||||
* {@link QueryNodeProcessor#setQueryConfigHandler(QueryConfigHandler)}.
|
||||
|
|
|
@ -1,269 +0,0 @@
|
|||
package org.apache.lucene.queryParser.standard;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
/**
|
||||
* This class behaves as the as the lucene 2.4 MultiFieldQueryParser class, but uses the new
|
||||
* query parser interface instead of the old one. <br/>
|
||||
* <br/>
|
||||
* This class should be used when the new query parser features are needed and
|
||||
* also keep at the same time the old query parser interface. <br/>
|
||||
*
|
||||
* @deprecated this class will be removed soon, it's a temporary class to be
|
||||
* used along the transition from the old query parser to the new
|
||||
* one
|
||||
*/
|
||||
@Deprecated
|
||||
public class MultiFieldQueryParserWrapper extends QueryParserWrapper {
|
||||
|
||||
/**
|
||||
* Creates a MultiFieldQueryParser. Allows passing of a map with term to
|
||||
* Boost, and the boost to apply to each term.
|
||||
*
|
||||
* <p>
|
||||
* It will, when parse(String query) is called, construct a query like this
|
||||
* (assuming the query consists of two terms and you specify the two fields
|
||||
* <code>title</code> and <code>body</code>):
|
||||
* </p>
|
||||
*
|
||||
* <code>
|
||||
* (title:term1 body:term1) (title:term2 body:term2)
|
||||
* </code>
|
||||
*
|
||||
* <p>
|
||||
* When setDefaultOperator(AND_OPERATOR) is set, the result will be:
|
||||
* </p>
|
||||
*
|
||||
* <code>
|
||||
* +(title:term1 body:term1) +(title:term2 body:term2)
|
||||
* </code>
|
||||
*
|
||||
* <p>
|
||||
* When you pass a boost (title=>5 body=>10) you can get
|
||||
* </p>
|
||||
*
|
||||
* <code>
|
||||
* +(title:term1^5.0 body:term1^10.0) +(title:term2^5.0 body:term2^10.0)
|
||||
* </code>
|
||||
*
|
||||
* <p>
|
||||
* In other words, all the query's terms must appear, but it doesn't matter in
|
||||
* what fields they appear.
|
||||
* </p>
|
||||
*/
|
||||
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer, Map<String, Float> boosts) {
|
||||
this(fields, analyzer);
|
||||
StandardQueryParser qpHelper = getQueryParserHelper();
|
||||
|
||||
qpHelper.setMultiFields(fields);
|
||||
qpHelper.setFieldsBoost(boosts);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a MultiFieldQueryParser.
|
||||
*
|
||||
* <p>
|
||||
* It will, when parse(String query) is called, construct a query like this
|
||||
* (assuming the query consists of two terms and you specify the two fields
|
||||
* <code>title</code> and <code>body</code>):
|
||||
* </p>
|
||||
*
|
||||
* <code>
|
||||
* (title:term1 body:term1) (title:term2 body:term2)
|
||||
* </code>
|
||||
*
|
||||
* <p>
|
||||
* When setDefaultOperator(AND_OPERATOR) is set, the result will be:
|
||||
* </p>
|
||||
*
|
||||
* <code>
|
||||
* +(title:term1 body:term1) +(title:term2 body:term2)
|
||||
* </code>
|
||||
*
|
||||
* <p>
|
||||
* In other words, all the query's terms must appear, but it doesn't matter in
|
||||
* what fields they appear.
|
||||
* </p>
|
||||
*/
|
||||
public MultiFieldQueryParserWrapper(String[] fields, Analyzer analyzer) {
|
||||
super(null, analyzer);
|
||||
|
||||
StandardQueryParser qpHelper = getQueryParserHelper();
|
||||
qpHelper.setAnalyzer(analyzer);
|
||||
|
||||
qpHelper.setMultiFields(fields);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query which searches on the fields specified.
|
||||
* <p>
|
||||
* If x fields are specified, this effectively constructs:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* (field1:query1) (field2:query2) (field3:query3)...(fieldx:queryx)
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param queries
|
||||
* Queries strings to parse
|
||||
* @param fields
|
||||
* Fields to search on
|
||||
* @param analyzer
|
||||
* Analyzer to use
|
||||
* @throws ParseException
|
||||
* if query parsing fails
|
||||
* @throws IllegalArgumentException
|
||||
* if the length of the queries array differs from the length of the
|
||||
* fields array
|
||||
*/
|
||||
public static Query parse(String[] queries, String[] fields, Analyzer analyzer)
|
||||
throws ParseException {
|
||||
if (queries.length != fields.length)
|
||||
throw new IllegalArgumentException("queries.length != fields.length");
|
||||
BooleanQuery bQuery = new BooleanQuery();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
|
||||
Query q = qp.parse(queries[i]);
|
||||
if (q != null && // q never null, just being defensive
|
||||
(!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
|
||||
bQuery.add(q, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
}
|
||||
return bQuery;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query, searching on the fields specified. Use this if you need to
|
||||
* specify certain fields as required, and others as prohibited.
|
||||
* <p>
|
||||
*
|
||||
* <pre>
|
||||
* Usage:
|
||||
* <code>
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
|
||||
* BooleanClause.Occur.MUST,
|
||||
* BooleanClause.Occur.MUST_NOT};
|
||||
* MultiFieldQueryParser.parse("query", fields, flags, analyzer);
|
||||
* </code>
|
||||
* </pre>
|
||||
*<p>
|
||||
* The code above would construct a query:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* (filename:query) +(contents:query) -(description:query)
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param query
|
||||
* Query string to parse
|
||||
* @param fields
|
||||
* Fields to search on
|
||||
* @param flags
|
||||
* Flags describing the fields
|
||||
* @param analyzer
|
||||
* Analyzer to use
|
||||
* @throws ParseException
|
||||
* if query parsing fails
|
||||
* @throws IllegalArgumentException
|
||||
* if the length of the fields array differs from the length of the
|
||||
* flags array
|
||||
*/
|
||||
public static Query parse(String query, String[] fields,
|
||||
BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
|
||||
if (fields.length != flags.length)
|
||||
throw new IllegalArgumentException("fields.length != flags.length");
|
||||
BooleanQuery bQuery = new BooleanQuery();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
|
||||
Query q = qp.parse(query);
|
||||
if (q != null && // q never null, just being defensive
|
||||
(!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
|
||||
bQuery.add(q, flags[i]);
|
||||
}
|
||||
}
|
||||
return bQuery;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query, searching on the fields specified. Use this if you need to
|
||||
* specify certain fields as required, and others as prohibited.
|
||||
* <p>
|
||||
*
|
||||
* <pre>
|
||||
* Usage:
|
||||
* <code>
|
||||
* String[] query = {"query1", "query2", "query3"};
|
||||
* String[] fields = {"filename", "contents", "description"};
|
||||
* BooleanClause.Occur[] flags = {BooleanClause.Occur.SHOULD,
|
||||
* BooleanClause.Occur.MUST,
|
||||
* BooleanClause.Occur.MUST_NOT};
|
||||
* MultiFieldQueryParser.parse(query, fields, flags, analyzer);
|
||||
* </code>
|
||||
* </pre>
|
||||
*<p>
|
||||
* The code above would construct a query:
|
||||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* (filename:query1) +(contents:query2) -(description:query3)
|
||||
* </code>
|
||||
* </pre>
|
||||
*
|
||||
* @param queries
|
||||
* Queries string to parse
|
||||
* @param fields
|
||||
* Fields to search on
|
||||
* @param flags
|
||||
* Flags describing the fields
|
||||
* @param analyzer
|
||||
* Analyzer to use
|
||||
* @throws ParseException
|
||||
* if query parsing fails
|
||||
* @throws IllegalArgumentException
|
||||
* if the length of the queries, fields, and flags array differ
|
||||
*/
|
||||
public static Query parse(String[] queries, String[] fields,
|
||||
BooleanClause.Occur[] flags, Analyzer analyzer) throws ParseException {
|
||||
if (!(queries.length == fields.length && queries.length == flags.length))
|
||||
throw new IllegalArgumentException(
|
||||
"queries, fields, and flags array have have different length");
|
||||
BooleanQuery bQuery = new BooleanQuery();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
QueryParserWrapper qp = new QueryParserWrapper(fields[i], analyzer);
|
||||
Query q = qp.parse(queries[i]);
|
||||
if (q != null && // q never null, just being defensive
|
||||
(!(q instanceof BooleanQuery) || ((BooleanQuery) q).getClauses().length > 0)) {
|
||||
bQuery.add(q, flags[i]);
|
||||
}
|
||||
}
|
||||
return bQuery;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,491 +0,0 @@
|
|||
package org.apache.lucene.queryParser.standard;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.text.Collator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.document.DateTools.Resolution;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.queryParser.QueryParser;
|
||||
import org.apache.lucene.queryParser.core.QueryNodeException;
|
||||
import org.apache.lucene.queryParser.core.config.FieldConfig;
|
||||
import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
|
||||
import org.apache.lucene.queryParser.core.nodes.QueryNode;
|
||||
import org.apache.lucene.queryParser.core.parser.SyntaxParser;
|
||||
import org.apache.lucene.queryParser.core.processors.QueryNodeProcessor;
|
||||
import org.apache.lucene.queryParser.standard.builders.StandardQueryBuilder;
|
||||
import org.apache.lucene.queryParser.standard.builders.StandardQueryTreeBuilder;
|
||||
import org.apache.lucene.queryParser.standard.config.AllowLeadingWildcardAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.AnalyzerAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.DateResolutionAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.DefaultPhraseSlopAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.LocaleAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.LowercaseExpandedTermsAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.MultiTermRewriteMethodAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.PositionIncrementsAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.RangeCollatorAttribute;
|
||||
import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
|
||||
import org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser;
|
||||
import org.apache.lucene.queryParser.standard.processors.StandardQueryNodeProcessorPipeline;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
/**
|
||||
* This class performs the query parsing using the new query parser
|
||||
* implementation, but keeps the old {@link QueryParser} API. <br/>
|
||||
* <br/>
|
||||
* This class should be used when the new query parser features are and the old
|
||||
* {@link QueryParser} API are needed at the same time. <br/>
|
||||
*
|
||||
* @deprecated this class will be removed soon, it's a temporary class to be
|
||||
* used along the transition from the old query parser to the new
|
||||
* one
|
||||
*/
|
||||
@Deprecated
|
||||
public class QueryParserWrapper {
|
||||
|
||||
/**
|
||||
* The default operator for parsing queries. Use
|
||||
* {@link QueryParserWrapper#setDefaultOperator} to change it.
|
||||
*/
|
||||
static public enum Operator { OR, AND }
|
||||
|
||||
// the nested class:
|
||||
/** Alternative form of QueryParser.Operator.AND */
|
||||
public static final Operator AND_OPERATOR = Operator.AND;
|
||||
|
||||
/** Alternative form of QueryParser.Operator.OR */
|
||||
public static final Operator OR_OPERATOR = Operator.OR;
|
||||
|
||||
/**
|
||||
* Returns a String where those characters that QueryParser expects to be
|
||||
* escaped are escaped by a preceding <code>\</code>.
|
||||
*/
|
||||
public static String escape(String s) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (int i = 0; i < s.length(); i++) {
|
||||
char c = s.charAt(i);
|
||||
// These characters are part of the query syntax and must be escaped
|
||||
if (c == '\\' || c == '+' || c == '-' || c == '!' || c == '(' || c == ')'
|
||||
|| c == ':' || c == '^' || c == '[' || c == ']' || c == '\"'
|
||||
|| c == '{' || c == '}' || c == '~' || c == '*' || c == '?'
|
||||
|| c == '|' || c == '&') {
|
||||
sb.append('\\');
|
||||
}
|
||||
sb.append(c);
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private SyntaxParser syntaxParser = new StandardSyntaxParser();
|
||||
|
||||
private StandardQueryConfigHandler config;
|
||||
|
||||
private StandardQueryParser qpHelper;
|
||||
|
||||
private QueryNodeProcessor processorPipeline;
|
||||
|
||||
private StandardQueryBuilder builder = new StandardQueryTreeBuilder();
|
||||
|
||||
private String defaultField;
|
||||
|
||||
public QueryParserWrapper(String defaultField, Analyzer analyzer) {
|
||||
this.defaultField = defaultField;
|
||||
|
||||
this.qpHelper = new StandardQueryParser();
|
||||
|
||||
this.config = (StandardQueryConfigHandler) qpHelper.getQueryConfigHandler();
|
||||
|
||||
this.qpHelper.setAnalyzer(analyzer);
|
||||
|
||||
this.processorPipeline = new StandardQueryNodeProcessorPipeline(this.config);
|
||||
|
||||
}
|
||||
|
||||
StandardQueryParser getQueryParserHelper() {
|
||||
return qpHelper;
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
return this.defaultField;
|
||||
}
|
||||
|
||||
public Analyzer getAnalyzer() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(AnalyzerAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(AnalyzerAttribute.class).getAnalyzer();
|
||||
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link StandardQueryBuilder} used to generate a {@link Query}
|
||||
* object from the parsed and processed query node tree.
|
||||
*
|
||||
* @param builder the builder
|
||||
*/
|
||||
public void setQueryBuilder(StandardQueryBuilder builder) {
|
||||
this.builder = builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link QueryNodeProcessor} used to process the query node tree
|
||||
* generated by the
|
||||
* {@link org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser}.
|
||||
*
|
||||
* @param processor the processor
|
||||
*/
|
||||
public void setQueryProcessor(QueryNodeProcessor processor) {
|
||||
this.processorPipeline = processor;
|
||||
this.processorPipeline.setQueryConfigHandler(this.config);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link QueryConfigHandler} used by the {@link QueryNodeProcessor}
|
||||
* set to this object.
|
||||
*
|
||||
* @param queryConfig the query config handler
|
||||
*/
|
||||
public void setQueryConfig(StandardQueryConfigHandler queryConfig) {
|
||||
this.config = queryConfig;
|
||||
|
||||
if (this.processorPipeline != null) {
|
||||
this.processorPipeline.setQueryConfigHandler(this.config);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the query config handler used by this query parser
|
||||
*
|
||||
* @return the query config handler
|
||||
*/
|
||||
public QueryConfigHandler getQueryConfigHandler() {
|
||||
return this.config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns {@link QueryNodeProcessor} used to process the query node tree
|
||||
* generated by the
|
||||
* {@link org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser}.
|
||||
*
|
||||
* @return the query processor
|
||||
*/
|
||||
public QueryNodeProcessor getQueryProcessor() {
|
||||
return this.processorPipeline;
|
||||
}
|
||||
|
||||
public ParseException generateParseException() {
|
||||
return null;
|
||||
}
|
||||
|
||||
public boolean getAllowLeadingWildcard() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(AllowLeadingWildcardAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(AllowLeadingWildcardAttribute.class)
|
||||
.isAllowLeadingWildcard();
|
||||
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
public MultiTermQuery.RewriteMethod getMultiTermRewriteMethod() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(MultiTermRewriteMethodAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(MultiTermRewriteMethodAttribute.class)
|
||||
.getMultiTermRewriteMethod();
|
||||
|
||||
}
|
||||
|
||||
return MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT;
|
||||
|
||||
}
|
||||
|
||||
public Resolution getDateResolution(String fieldName) {
|
||||
|
||||
if (this.config != null) {
|
||||
FieldConfig fieldConfig = this.config.getFieldConfig(fieldName);
|
||||
|
||||
if (fieldConfig != null) {
|
||||
|
||||
if (this.config.hasAttribute(DateResolutionAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(DateResolutionAttribute.class)
|
||||
.getDateResolution();
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
public boolean getEnablePositionIncrements() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(PositionIncrementsAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(PositionIncrementsAttribute.class)
|
||||
.isPositionIncrementsEnabled();
|
||||
|
||||
}
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
public float getFuzzyMinSim() {
|
||||
return FuzzyQuery.defaultMinSimilarity;
|
||||
}
|
||||
|
||||
public int getFuzzyPrefixLength() {
|
||||
return FuzzyQuery.defaultPrefixLength;
|
||||
}
|
||||
|
||||
public Locale getLocale() {
|
||||
|
||||
if (this.config != null && this.config.hasAttribute(LocaleAttribute.class)) {
|
||||
return this.config.getAttribute(LocaleAttribute.class).getLocale();
|
||||
}
|
||||
|
||||
return Locale.getDefault();
|
||||
|
||||
}
|
||||
|
||||
public boolean getLowercaseExpandedTerms() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(LowercaseExpandedTermsAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(LowercaseExpandedTermsAttribute.class)
|
||||
.isLowercaseExpandedTerms();
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
||||
public int getPhraseSlop() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(AllowLeadingWildcardAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(DefaultPhraseSlopAttribute.class)
|
||||
.getDefaultPhraseSlop();
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
public Collator getRangeCollator() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(RangeCollatorAttribute.class)) {
|
||||
|
||||
return this.config.getAttribute(RangeCollatorAttribute.class)
|
||||
.getRangeCollator();
|
||||
|
||||
}
|
||||
|
||||
return null;
|
||||
|
||||
}
|
||||
|
||||
public boolean getUseOldRangeQuery() {
|
||||
if (getMultiTermRewriteMethod() == MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public Query parse(String query) throws ParseException {
|
||||
|
||||
try {
|
||||
QueryNode queryTree = this.syntaxParser.parse(query, getField());
|
||||
queryTree = this.processorPipeline.process(queryTree);
|
||||
return this.builder.build(queryTree);
|
||||
|
||||
} catch (QueryNodeException e) {
|
||||
throw new ParseException("parse exception");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void setAllowLeadingWildcard(boolean allowLeadingWildcard) {
|
||||
this.qpHelper.setAllowLeadingWildcard(allowLeadingWildcard);
|
||||
}
|
||||
|
||||
public void setMultiTermRewriteMethod(MultiTermQuery.RewriteMethod method) {
|
||||
this.qpHelper.setMultiTermRewriteMethod(method);
|
||||
}
|
||||
|
||||
public void setDateResolution(Resolution dateResolution) {
|
||||
this.qpHelper.setDateResolution(dateResolution);
|
||||
}
|
||||
|
||||
private Map<CharSequence, DateTools.Resolution> dateRes = new HashMap<CharSequence, DateTools.Resolution>();
|
||||
|
||||
public void setDateResolution(String fieldName, Resolution dateResolution) {
|
||||
dateRes.put(fieldName, dateResolution);
|
||||
this.qpHelper.setDateResolution(dateRes);
|
||||
}
|
||||
|
||||
public void setDefaultOperator(Operator op) {
|
||||
|
||||
this.qpHelper
|
||||
.setDefaultOperator(OR_OPERATOR.equals(op) ? org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.OR
|
||||
: org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.AND);
|
||||
|
||||
}
|
||||
|
||||
public Operator getDefaultOperator() {
|
||||
|
||||
if (this.config != null
|
||||
&& this.config.hasAttribute(DefaultOperatorAttribute.class)) {
|
||||
|
||||
return (this.config.getAttribute(DefaultOperatorAttribute.class)
|
||||
.getOperator() == org.apache.lucene.queryParser.standard.config.DefaultOperatorAttribute.Operator.AND) ? AND_OPERATOR
|
||||
: OR_OPERATOR;
|
||||
|
||||
}
|
||||
|
||||
return OR_OPERATOR;
|
||||
|
||||
}
|
||||
|
||||
public void setEnablePositionIncrements(boolean enable) {
|
||||
this.qpHelper.setEnablePositionIncrements(enable);
|
||||
}
|
||||
|
||||
public void setFuzzyMinSim(float fuzzyMinSim) {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
}
|
||||
|
||||
public void setFuzzyPrefixLength(int fuzzyPrefixLength) {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
}
|
||||
|
||||
public void setLocale(Locale locale) {
|
||||
this.qpHelper.setLocale(locale);
|
||||
}
|
||||
|
||||
public void setLowercaseExpandedTerms(boolean lowercaseExpandedTerms) {
|
||||
this.qpHelper.setLowercaseExpandedTerms(lowercaseExpandedTerms);
|
||||
}
|
||||
|
||||
public void setPhraseSlop(int phraseSlop) {
|
||||
this.qpHelper.setDefaultPhraseSlop(phraseSlop);
|
||||
}
|
||||
|
||||
public void setRangeCollator(Collator rc) {
|
||||
this.qpHelper.setRangeCollator(rc);
|
||||
}
|
||||
|
||||
public void setUseOldRangeQuery(boolean useOldRangeQuery) {
|
||||
if (useOldRangeQuery) {
|
||||
setMultiTermRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_QUERY_REWRITE);
|
||||
} else {
|
||||
setMultiTermRewriteMethod(MultiTermQuery.CONSTANT_SCORE_AUTO_REWRITE_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
protected Query getPrefixQuery(String field, String termStr)
|
||||
throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
protected Query getWildcardQuery(String field, String termStr)
|
||||
throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
protected Query getFuzzyQuery(String field, String termStr,
|
||||
float minSimilarity) throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/** @deprecated Use {@link #getFieldQuery(String, String, boolean)} instead */
|
||||
@Deprecated
|
||||
protected Query getFieldQuery(String field, String queryText) throws ParseException {
|
||||
return getFieldQuery(field, queryText, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @exception ParseException throw in overridden method to disallow
|
||||
*/
|
||||
protected Query getFieldQuery(String field, String queryText, boolean quoted)
|
||||
throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
protected Query getBooleanQuery(List<BooleanClause> clauses, boolean disableCoord)
|
||||
throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Base implementation delegates to {@link #getFieldQuery(String,String)}.
|
||||
* This method may be overridden, for example, to return a SpanNearQuery
|
||||
* instead of a PhraseQuery.
|
||||
*
|
||||
* @exception ParseException throw in overridden method to disallow
|
||||
*/
|
||||
protected Query getFieldQuery(String field, String queryText, int slop)
|
||||
throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* @exception ParseException throw in overridden method to disallow
|
||||
*/
|
||||
protected Query getRangeQuery(String field, String part1, String part2,
|
||||
boolean inclusive) throws ParseException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
|
@ -39,12 +39,5 @@ are used to reproduce the same behavior as the old query parser.
|
|||
Check <tt>org.apache.lucene.queryParser.standard.StandardQueryParser</tt> to quick start using the Lucene query parser.
|
||||
</p>
|
||||
|
||||
<p>
|
||||
There are 2 wrapper classes that extends QueryParser and MultiFieldQueryParser.
|
||||
The classes implement internally the new query parser structure. These 2
|
||||
classes are deprecated and should only be used when there is a need to use the
|
||||
old query parser interface.
|
||||
</p>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -347,7 +347,7 @@ public class JavaCharStream
|
|||
|
||||
@Deprecated
|
||||
/**
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndColumn
|
||||
*/
|
||||
public int getColumn() {
|
||||
|
@ -356,7 +356,7 @@ public class JavaCharStream
|
|||
|
||||
@Deprecated
|
||||
/**
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndLine
|
||||
*/
|
||||
public int getLine() {
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.Date;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.document.DateTools.Resolution;
|
||||
import org.apache.lucene.queryParser.core.QueryNodeException;
|
||||
|
@ -140,17 +139,8 @@ public class ParametricRangeQueryNodeProcessor extends QueryNodeProcessorImpl {
|
|||
d2 = cal.getTime();
|
||||
}
|
||||
|
||||
if (dateRes == null) {
|
||||
// no default or field specific date resolution has been set,
|
||||
// use deprecated DateField to maintain compatibilty with
|
||||
// pre-1.9 Lucene versions.
|
||||
part1 = DateField.dateToString(d1);
|
||||
part2 = DateField.dateToString(d2);
|
||||
|
||||
} else {
|
||||
part1 = DateTools.dateToString(d1, dateRes);
|
||||
part2 = DateTools.dateToString(d2, dateRes);
|
||||
}
|
||||
part1 = DateTools.dateToString(d1, dateRes);
|
||||
part2 = DateTools.dateToString(d2, dateRes);
|
||||
} catch (Exception e) {
|
||||
// do nothing
|
||||
}
|
||||
|
|
|
@ -28,14 +28,14 @@ public interface CharStream {
|
|||
|
||||
/**
|
||||
* Returns the column position of the character last read.
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndColumn
|
||||
*/
|
||||
int getColumn();
|
||||
|
||||
/**
|
||||
* Returns the line number of the character last read.
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndLine
|
||||
*/
|
||||
int getLine();
|
||||
|
|
|
@ -25,11 +25,11 @@ public class TestQueryNode extends LuceneTestCase {
|
|||
|
||||
/* LUCENE-2227 bug in QueryNodeImpl.add() */
|
||||
public void testAddChildren() throws Exception {
|
||||
FieldQueryNode nodeA = new FieldQueryNode("foo", "A", 0, 1);
|
||||
FieldQueryNode nodeB = new FieldQueryNode("foo", "B", 1, 2);
|
||||
QueryNode nodeA = new FieldQueryNode("foo", "A", 0, 1);
|
||||
QueryNode nodeB = new FieldQueryNode("foo", "B", 1, 2);
|
||||
BooleanQueryNode bq = new BooleanQueryNode(
|
||||
Arrays.asList(new QueryNode[] { nodeA }));
|
||||
bq.add(Arrays.asList(new QueryNode[] { nodeB }));
|
||||
Arrays.asList(nodeA));
|
||||
bq.add(Arrays.asList(nodeB));
|
||||
assertEquals(2, bq.getChildren().size());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenFilter;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.queryParser.TestQueryParser;
|
||||
import org.apache.lucene.queryParser.core.QueryNodeException;
|
||||
|
@ -405,19 +404,11 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
|
|||
final String hourField = "hour";
|
||||
PrecedenceQueryParser qp = new PrecedenceQueryParser(new MockAnalyzer());
|
||||
|
||||
// Don't set any date resolution and verify if DateField is used
|
||||
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
|
||||
endDateExpected.getTime(), null);
|
||||
|
||||
Map<CharSequence, DateTools.Resolution> fieldMap = new HashMap<CharSequence,DateTools.Resolution>();
|
||||
// set a field specific date resolution
|
||||
fieldMap.put(monthField, DateTools.Resolution.MONTH);
|
||||
qp.setDateResolution(fieldMap);
|
||||
|
||||
// DateField should still be used for defaultField
|
||||
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
|
||||
endDateExpected.getTime(), null);
|
||||
|
||||
// set default date resolution to MILLISECOND
|
||||
qp.setDateResolution(DateTools.Resolution.MILLISECOND);
|
||||
|
||||
|
@ -439,20 +430,14 @@ public class TestPrecedenceQueryParser extends LuceneTestCase {
|
|||
}
|
||||
|
||||
/** for testing DateTools support */
|
||||
private String getDate(String s, DateTools.Resolution resolution)
|
||||
throws Exception {
|
||||
private String getDate(String s, DateTools.Resolution resolution) throws Exception {
|
||||
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
|
||||
return getDate(df.parse(s), resolution);
|
||||
}
|
||||
|
||||
/** for testing DateTools support */
|
||||
private String getDate(Date d, DateTools.Resolution resolution)
|
||||
throws Exception {
|
||||
if (resolution == null) {
|
||||
return DateField.dateToString(d);
|
||||
} else {
|
||||
return DateTools.dateToString(d, resolution);
|
||||
}
|
||||
private String getDate(Date d, DateTools.Resolution resolution) throws Exception {
|
||||
return DateTools.dateToString(d, resolution);
|
||||
}
|
||||
|
||||
public void assertQueryEquals(PrecedenceQueryParser qp, String field, String query,
|
||||
|
|
|
@ -1,245 +0,0 @@
|
|||
package org.apache.lucene.queryParser.standard;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* This test case is a copy of the core Lucene query parser test, it was adapted
|
||||
* to use new QueryParserWrapper instead of the old query parser.
|
||||
*
|
||||
* Test QueryParser's ability to deal with Analyzers that return more than one
|
||||
* token per position or that return tokens with a position increment > 1.
|
||||
*
|
||||
*/
|
||||
public class TestMultiAnalyzerWrapper extends LuceneTestCase {
|
||||
|
||||
private static int multiToken = 0;
|
||||
|
||||
public void testMultiAnalyzer() throws ParseException {
|
||||
|
||||
QueryParserWrapper qp = new QueryParserWrapper("", new MultiAnalyzer());
|
||||
|
||||
// trivial, no multiple tokens:
|
||||
assertEquals("foo", qp.parse("foo").toString());
|
||||
assertEquals("foo", qp.parse("\"foo\"").toString());
|
||||
assertEquals("foo foobar", qp.parse("foo foobar").toString());
|
||||
assertEquals("\"foo foobar\"", qp.parse("\"foo foobar\"").toString());
|
||||
assertEquals("\"foo foobar blah\"", qp.parse("\"foo foobar blah\"")
|
||||
.toString());
|
||||
|
||||
// two tokens at the same position:
|
||||
assertEquals("(multi multi2) foo", qp.parse("multi foo").toString());
|
||||
assertEquals("foo (multi multi2)", qp.parse("foo multi").toString());
|
||||
assertEquals("(multi multi2) (multi multi2)", qp.parse("multi multi")
|
||||
.toString());
|
||||
assertEquals("+(foo (multi multi2)) +(bar (multi multi2))", qp.parse(
|
||||
"+(foo multi) +(bar multi)").toString());
|
||||
assertEquals("+(foo (multi multi2)) field:\"bar (multi multi2)\"", qp
|
||||
.parse("+(foo multi) field:\"bar multi\"").toString());
|
||||
|
||||
// phrases:
|
||||
assertEquals("\"(multi multi2) foo\"", qp.parse("\"multi foo\"").toString());
|
||||
assertEquals("\"foo (multi multi2)\"", qp.parse("\"foo multi\"").toString());
|
||||
assertEquals("\"foo (multi multi2) foobar (multi multi2)\"", qp.parse(
|
||||
"\"foo multi foobar multi\"").toString());
|
||||
|
||||
// fields:
|
||||
assertEquals("(field:multi field:multi2) field:foo", qp.parse(
|
||||
"field:multi field:foo").toString());
|
||||
assertEquals("field:\"(multi multi2) foo\"", qp
|
||||
.parse("field:\"multi foo\"").toString());
|
||||
|
||||
// three tokens at one position:
|
||||
assertEquals("triplemulti multi3 multi2", qp.parse("triplemulti")
|
||||
.toString());
|
||||
assertEquals("foo (triplemulti multi3 multi2) foobar", qp.parse(
|
||||
"foo triplemulti foobar").toString());
|
||||
|
||||
// phrase with non-default slop:
|
||||
assertEquals("\"(multi multi2) foo\"~10", qp.parse("\"multi foo\"~10")
|
||||
.toString());
|
||||
|
||||
// phrase with non-default boost:
|
||||
assertEquals("\"(multi multi2) foo\"^2.0", qp.parse("\"multi foo\"^2")
|
||||
.toString());
|
||||
|
||||
// phrase after changing default slop
|
||||
qp.setPhraseSlop(99);
|
||||
assertEquals("\"(multi multi2) foo\"~99 bar", qp.parse("\"multi foo\" bar")
|
||||
.toString());
|
||||
assertEquals("\"(multi multi2) foo\"~99 \"foo bar\"~2", qp.parse(
|
||||
"\"multi foo\" \"foo bar\"~2").toString());
|
||||
qp.setPhraseSlop(0);
|
||||
|
||||
// non-default operator:
|
||||
qp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
|
||||
assertEquals("+(multi multi2) +foo", qp.parse("multi foo").toString());
|
||||
|
||||
}
|
||||
|
||||
// public void testMultiAnalyzerWithSubclassOfQueryParser() throws
|
||||
// ParseException {
|
||||
// this test doesn't make sense when using the new QueryParser API
|
||||
// DumbQueryParser qp = new DumbQueryParser("", new MultiAnalyzer());
|
||||
// qp.setPhraseSlop(99); // modified default slop
|
||||
//
|
||||
// // direct call to (super's) getFieldQuery to demonstrate differnce
|
||||
// // between phrase and multiphrase with modified default slop
|
||||
// assertEquals("\"foo bar\"~99",
|
||||
// qp.getSuperFieldQuery("","foo bar").toString());
|
||||
// assertEquals("\"(multi multi2) bar\"~99",
|
||||
// qp.getSuperFieldQuery("","multi bar").toString());
|
||||
//
|
||||
//
|
||||
// // ask sublcass to parse phrase with modified default slop
|
||||
// assertEquals("\"(multi multi2) foo\"~99 bar",
|
||||
// qp.parse("\"multi foo\" bar").toString());
|
||||
//
|
||||
// }
|
||||
|
||||
public void testPosIncrementAnalyzer() throws ParseException {
|
||||
QueryParserWrapper qp = new QueryParserWrapper("",
|
||||
new PosIncrementAnalyzer());
|
||||
assertEquals("quick brown", qp.parse("the quick brown").toString());
|
||||
assertEquals("\"quick brown\"", qp.parse("\"the quick brown\"").toString());
|
||||
assertEquals("quick brown fox", qp.parse("the quick brown fox").toString());
|
||||
assertEquals("\"quick brown fox\"", qp.parse("\"the quick brown fox\"")
|
||||
.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Expands "multi" to "multi" and "multi2", both at the same position, and
|
||||
* expands "triplemulti" to "triplemulti", "multi3", and "multi2".
|
||||
*/
|
||||
private class MultiAnalyzer extends Analyzer {
|
||||
|
||||
public MultiAnalyzer() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
TokenStream result = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
|
||||
result = new TestFilter(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private final class TestFilter extends TokenFilter {
|
||||
|
||||
private String prevType;
|
||||
private int prevStartOffset;
|
||||
private int prevEndOffset;
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
|
||||
|
||||
public TestFilter(TokenStream in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean incrementToken() throws java.io.IOException {
|
||||
if (multiToken > 0) {
|
||||
termAtt.setEmpty().append("multi" + (multiToken + 1));
|
||||
offsetAtt.setOffset(prevStartOffset, prevEndOffset);
|
||||
typeAtt.setType(prevType);
|
||||
posIncrAtt.setPositionIncrement(0);
|
||||
multiToken--;
|
||||
return true;
|
||||
} else {
|
||||
boolean next = input.incrementToken();
|
||||
if (next == false) {
|
||||
return false;
|
||||
}
|
||||
prevType = typeAtt.type();
|
||||
prevStartOffset = offsetAtt.startOffset();
|
||||
prevEndOffset = offsetAtt.endOffset();
|
||||
String text = termAtt.toString();
|
||||
if (text.equals("triplemulti")) {
|
||||
multiToken = 2;
|
||||
return true;
|
||||
} else if (text.equals("multi")) {
|
||||
multiToken = 1;
|
||||
return true;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyzes "the quick brown" as: quick(incr=2) brown(incr=1). Does not work
|
||||
* correctly for input other than "the quick brown ...".
|
||||
*/
|
||||
private class PosIncrementAnalyzer extends Analyzer {
|
||||
|
||||
public PosIncrementAnalyzer() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
TokenStream result = new MockTokenizer(reader, MockTokenizer.WHITESPACE, true);
|
||||
result = new TestPosIncrementFilter(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class TestPosIncrementFilter extends TokenFilter {
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);
|
||||
|
||||
public TestPosIncrementFilter(TokenStream in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean incrementToken() throws java.io.IOException {
|
||||
while (input.incrementToken()) {
|
||||
if (termAtt.toString().equals("the")) {
|
||||
// stopword, do nothing
|
||||
} else if (termAtt.toString().equals("quick")) {
|
||||
posIncrAtt.setPositionIncrement(2);
|
||||
return true;
|
||||
} else {
|
||||
posIncrAtt.setPositionIncrement(1);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -1,370 +0,0 @@
|
|||
package org.apache.lucene.queryParser.standard;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.Reader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.queryParser.ParseException;
|
||||
import org.apache.lucene.queryParser.standard.MultiFieldQueryParserWrapper;
|
||||
import org.apache.lucene.queryParser.standard.QueryParserWrapper;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Tests multi field query parsing using the
|
||||
* {@link MultiFieldQueryParserWrapper}.
|
||||
*
|
||||
* @deprecated this tests test the deprecated MultiFieldQueryParserWrapper, so
|
||||
* when the latter is gone, so should this test.
|
||||
*/
|
||||
@Deprecated
|
||||
public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
|
||||
|
||||
/**
|
||||
* test stop words parsing for both the non static form, and for the
|
||||
* corresponding static form (qtxt, fields[]).
|
||||
*/
|
||||
public void testStopwordsParsing() throws Exception {
|
||||
assertStopQueryEquals("one", "b:one t:one");
|
||||
assertStopQueryEquals("one stop", "b:one t:one");
|
||||
assertStopQueryEquals("one (stop)", "b:one t:one");
|
||||
assertStopQueryEquals("one ((stop))", "b:one t:one");
|
||||
assertStopQueryEquals("stop", "");
|
||||
assertStopQueryEquals("(stop)", "");
|
||||
assertStopQueryEquals("((stop))", "");
|
||||
}
|
||||
|
||||
// verify parsing of query using a stopping analyzer
|
||||
private void assertStopQueryEquals(String qtxt, String expectedRes)
|
||||
throws Exception {
|
||||
String[] fields = { "b", "t" };
|
||||
Occur occur[] = { Occur.SHOULD, Occur.SHOULD };
|
||||
TestQueryParserWrapper.QPTestAnalyzer a = new TestQueryParserWrapper.QPTestAnalyzer();
|
||||
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
|
||||
fields, a);
|
||||
|
||||
Query q = mfqp.parse(qtxt);
|
||||
assertEquals(expectedRes, q.toString());
|
||||
|
||||
q = MultiFieldQueryParserWrapper.parse(qtxt, fields, occur, a);
|
||||
assertEquals(expectedRes, q.toString());
|
||||
}
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
String[] fields = { "b", "t" };
|
||||
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
|
||||
fields, new MockAnalyzer());
|
||||
|
||||
Query q = mfqp.parse("one");
|
||||
assertEquals("b:one t:one", q.toString());
|
||||
|
||||
q = mfqp.parse("one two");
|
||||
assertEquals("(b:one t:one) (b:two t:two)", q.toString());
|
||||
|
||||
q = mfqp.parse("+one +two");
|
||||
assertEquals("+(b:one t:one) +(b:two t:two)", q.toString());
|
||||
|
||||
q = mfqp.parse("+one -two -three");
|
||||
assertEquals("+(b:one t:one) -(b:two t:two) -(b:three t:three)", q
|
||||
.toString());
|
||||
|
||||
q = mfqp.parse("one^2 two");
|
||||
assertEquals("((b:one t:one)^2.0) (b:two t:two)", q.toString());
|
||||
|
||||
q = mfqp.parse("one~ two");
|
||||
assertEquals("(b:one~2.0 t:one~2.0) (b:two t:two)", q.toString());
|
||||
|
||||
q = mfqp.parse("one~0.8 two^2");
|
||||
assertEquals("(b:one~0.8 t:one~0.8) ((b:two t:two)^2.0)", q.toString());
|
||||
|
||||
q = mfqp.parse("one* two*");
|
||||
assertEquals("(b:one* t:one*) (b:two* t:two*)", q.toString());
|
||||
|
||||
q = mfqp.parse("[a TO c] two");
|
||||
assertEquals("(b:[a TO c] t:[a TO c]) (b:two t:two)", q.toString());
|
||||
|
||||
q = mfqp.parse("w?ldcard");
|
||||
assertEquals("b:w?ldcard t:w?ldcard", q.toString());
|
||||
|
||||
q = mfqp.parse("\"foo bar\"");
|
||||
assertEquals("b:\"foo bar\" t:\"foo bar\"", q.toString());
|
||||
|
||||
q = mfqp.parse("\"aa bb cc\" \"dd ee\"");
|
||||
assertEquals("(b:\"aa bb cc\" t:\"aa bb cc\") (b:\"dd ee\" t:\"dd ee\")", q
|
||||
.toString());
|
||||
|
||||
q = mfqp.parse("\"foo bar\"~4");
|
||||
assertEquals("b:\"foo bar\"~4 t:\"foo bar\"~4", q.toString());
|
||||
|
||||
// LUCENE-1213: MultiFieldQueryParserWrapper was ignoring slop when phrase
|
||||
// had a field.
|
||||
q = mfqp.parse("b:\"foo bar\"~4");
|
||||
assertEquals("b:\"foo bar\"~4", q.toString());
|
||||
|
||||
// make sure that terms which have a field are not touched:
|
||||
q = mfqp.parse("one f:two");
|
||||
assertEquals("(b:one t:one) f:two", q.toString());
|
||||
|
||||
// AND mode:
|
||||
mfqp.setDefaultOperator(QueryParserWrapper.AND_OPERATOR);
|
||||
q = mfqp.parse("one two");
|
||||
assertEquals("+(b:one t:one) +(b:two t:two)", q.toString());
|
||||
q = mfqp.parse("\"aa bb cc\" \"dd ee\"");
|
||||
assertEquals("+(b:\"aa bb cc\" t:\"aa bb cc\") +(b:\"dd ee\" t:\"dd ee\")",
|
||||
q.toString());
|
||||
|
||||
}
|
||||
|
||||
public void testBoostsSimple() throws Exception {
|
||||
Map<String,Float> boosts = new HashMap<String,Float>();
|
||||
boosts.put("b", Float.valueOf(5));
|
||||
boosts.put("t", Float.valueOf(10));
|
||||
String[] fields = { "b", "t" };
|
||||
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
|
||||
fields, new MockAnalyzer(), boosts);
|
||||
|
||||
// Check for simple
|
||||
Query q = mfqp.parse("one");
|
||||
assertEquals("b:one^5.0 t:one^10.0", q.toString());
|
||||
|
||||
// Check for AND
|
||||
q = mfqp.parse("one AND two");
|
||||
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0)", q
|
||||
.toString());
|
||||
|
||||
// Check for OR
|
||||
q = mfqp.parse("one OR two");
|
||||
assertEquals("(b:one^5.0 t:one^10.0) (b:two^5.0 t:two^10.0)", q.toString());
|
||||
|
||||
// Check for AND and a field
|
||||
q = mfqp.parse("one AND two AND foo:test");
|
||||
assertEquals("+(b:one^5.0 t:one^10.0) +(b:two^5.0 t:two^10.0) +foo:test", q
|
||||
.toString());
|
||||
|
||||
q = mfqp.parse("one^3 AND two^4");
|
||||
assertEquals("+((b:one^5.0 t:one^10.0)^3.0) +((b:two^5.0 t:two^10.0)^4.0)",
|
||||
q.toString());
|
||||
}
|
||||
|
||||
public void testStaticMethod1() throws ParseException {
|
||||
String[] fields = { "b", "t" };
|
||||
String[] queries = { "one", "two" };
|
||||
Query q = MultiFieldQueryParserWrapper.parse(queries, fields,
|
||||
new MockAnalyzer());
|
||||
assertEquals("b:one t:two", q.toString());
|
||||
|
||||
String[] queries2 = { "+one", "+two" };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries2, fields,
|
||||
new MockAnalyzer());
|
||||
assertEquals("(+b:one) (+t:two)", q.toString());
|
||||
|
||||
String[] queries3 = { "one", "+two" };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries3, fields,
|
||||
new MockAnalyzer());
|
||||
assertEquals("b:one (+t:two)", q.toString());
|
||||
|
||||
String[] queries4 = { "one +more", "+two" };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries4, fields,
|
||||
new MockAnalyzer());
|
||||
assertEquals("(b:one +b:more) (+t:two)", q.toString());
|
||||
|
||||
String[] queries5 = { "blah" };
|
||||
try {
|
||||
q = MultiFieldQueryParserWrapper.parse(queries5, fields,
|
||||
new MockAnalyzer());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
}
|
||||
|
||||
// check also with stop words for this static form (qtxts[], fields[]).
|
||||
TestQueryParserWrapper.QPTestAnalyzer stopA = new TestQueryParserWrapper.QPTestAnalyzer();
|
||||
|
||||
String[] queries6 = { "((+stop))", "+((stop))" };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries6, fields, stopA);
|
||||
assertEquals("", q.toString());
|
||||
|
||||
String[] queries7 = { "one ((+stop)) +more", "+((stop)) +two" };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries7, fields, stopA);
|
||||
assertEquals("(b:one +b:more) (+t:two)", q.toString());
|
||||
|
||||
}
|
||||
|
||||
public void testStaticMethod2() throws ParseException {
|
||||
String[] fields = { "b", "t" };
|
||||
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
|
||||
BooleanClause.Occur.MUST_NOT };
|
||||
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
|
||||
new MockAnalyzer());
|
||||
assertEquals("+b:one -t:one", q.toString());
|
||||
|
||||
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
|
||||
new MockAnalyzer());
|
||||
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
|
||||
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
|
||||
new MockAnalyzer());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
}
|
||||
}
|
||||
|
||||
public void testStaticMethod2Old() throws ParseException {
|
||||
String[] fields = { "b", "t" };
|
||||
// int[] flags = {MultiFieldQueryParserWrapper.REQUIRED_FIELD,
|
||||
// MultiFieldQueryParserWrapper.PROHIBITED_FIELD};
|
||||
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
|
||||
BooleanClause.Occur.MUST_NOT };
|
||||
|
||||
Query q = MultiFieldQueryParserWrapper.parse("one", fields, flags,
|
||||
new MockAnalyzer());// , fields, flags, new MockAnalyzer());
|
||||
assertEquals("+b:one -t:one", q.toString());
|
||||
|
||||
q = MultiFieldQueryParserWrapper.parse("one two", fields, flags,
|
||||
new MockAnalyzer());
|
||||
assertEquals("+(b:one b:two) -(t:one t:two)", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
|
||||
q = MultiFieldQueryParserWrapper.parse("blah", fields, flags2,
|
||||
new MockAnalyzer());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
}
|
||||
}
|
||||
|
||||
public void testStaticMethod3() throws ParseException {
|
||||
String[] queries = { "one", "two", "three" };
|
||||
String[] fields = { "f1", "f2", "f3" };
|
||||
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
|
||||
BooleanClause.Occur.MUST_NOT, BooleanClause.Occur.SHOULD };
|
||||
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
|
||||
new MockAnalyzer());
|
||||
assertEquals("+f1:one -f2:two f3:three", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
|
||||
new MockAnalyzer());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
}
|
||||
}
|
||||
|
||||
public void testStaticMethod3Old() throws ParseException {
|
||||
String[] queries = { "one", "two" };
|
||||
String[] fields = { "b", "t" };
|
||||
BooleanClause.Occur[] flags = { BooleanClause.Occur.MUST,
|
||||
BooleanClause.Occur.MUST_NOT };
|
||||
Query q = MultiFieldQueryParserWrapper.parse(queries, fields, flags,
|
||||
new MockAnalyzer());
|
||||
assertEquals("+b:one -t:two", q.toString());
|
||||
|
||||
try {
|
||||
BooleanClause.Occur[] flags2 = { BooleanClause.Occur.MUST };
|
||||
q = MultiFieldQueryParserWrapper.parse(queries, fields, flags2,
|
||||
new MockAnalyzer());
|
||||
fail();
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected exception, array length differs
|
||||
}
|
||||
}
|
||||
|
||||
public void testAnalyzerReturningNull() throws ParseException {
|
||||
String[] fields = new String[] { "f1", "f2", "f3" };
|
||||
MultiFieldQueryParserWrapper parser = new MultiFieldQueryParserWrapper(
|
||||
fields, new AnalyzerReturningNull());
|
||||
Query q = parser.parse("bla AND blo");
|
||||
assertEquals("+(f2:bla f3:bla) +(f2:blo f3:blo)", q.toString());
|
||||
// the following queries are not affected as their terms are not analyzed
|
||||
// anyway:
|
||||
q = parser.parse("bla*");
|
||||
assertEquals("f1:bla* f2:bla* f3:bla*", q.toString());
|
||||
q = parser.parse("bla~");
|
||||
assertEquals("f1:bla~2.0 f2:bla~2.0 f3:bla~2.0", q.toString());
|
||||
q = parser.parse("[a TO c]");
|
||||
assertEquals("f1:[a TO c] f2:[a TO c] f3:[a TO c]", q.toString());
|
||||
}
|
||||
|
||||
public void testStopWordSearching() throws Exception {
|
||||
Analyzer analyzer = new MockAnalyzer();
|
||||
Directory ramDir = newDirectory();
|
||||
IndexWriter iw = new IndexWriter(ramDir, analyzer, true,
|
||||
IndexWriter.MaxFieldLength.LIMITED);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("body", "blah the footest blah", Field.Store.NO,
|
||||
Field.Index.ANALYZED));
|
||||
iw.addDocument(doc);
|
||||
iw.close();
|
||||
|
||||
MultiFieldQueryParserWrapper mfqp = new MultiFieldQueryParserWrapper(
|
||||
new String[] { "body" }, analyzer);
|
||||
mfqp.setDefaultOperator(QueryParserWrapper.Operator.AND);
|
||||
Query q = mfqp.parse("the footest");
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
ScoreDoc[] hits = is.search(q, null, 1000).scoreDocs;
|
||||
assertEquals(1, hits.length);
|
||||
is.close();
|
||||
ramDir.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return empty tokens for field "f1".
|
||||
*/
|
||||
private static class AnalyzerReturningNull extends Analyzer {
|
||||
MockAnalyzer stdAnalyzer = new MockAnalyzer();
|
||||
|
||||
public AnalyzerReturningNull() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenStream tokenStream(String fieldName, Reader reader) {
|
||||
if ("f1".equals(fieldName)) {
|
||||
return new EmptyTokenStream();
|
||||
} else {
|
||||
return stdAnalyzer.tokenStream(fieldName, reader);
|
||||
}
|
||||
}
|
||||
|
||||
private static class EmptyTokenStream extends TokenStream {
|
||||
@Override
|
||||
public boolean incrementToken() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -39,7 +39,6 @@ import org.apache.lucene.analysis.Tokenizer;
|
|||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -692,12 +691,6 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
ramDir.close();
|
||||
}
|
||||
|
||||
/** for testing legacy DateField support */
|
||||
private String getLegacyDate(String s) throws Exception {
|
||||
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT);
|
||||
return DateField.dateToString(df.parse(s));
|
||||
}
|
||||
|
||||
/** for testing DateTools support */
|
||||
private String getDate(String s, DateTools.Resolution resolution)
|
||||
throws Exception {
|
||||
|
@ -708,11 +701,7 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
/** for testing DateTools support */
|
||||
private String getDate(Date d, DateTools.Resolution resolution)
|
||||
throws Exception {
|
||||
if (resolution == null) {
|
||||
return DateField.dateToString(d);
|
||||
} else {
|
||||
return DateTools.dateToString(d, resolution);
|
||||
}
|
||||
return DateTools.dateToString(d, resolution);
|
||||
}
|
||||
|
||||
private String escapeDateString(String s) {
|
||||
|
@ -735,21 +724,6 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
return df.format(calendar.getTime());
|
||||
}
|
||||
|
||||
/** for testing legacy DateField support */
|
||||
public void testLegacyDateRange() throws Exception {
|
||||
String startDate = getLocalizedDate(2002, 1, 1);
|
||||
String endDate = getLocalizedDate(2002, 1, 4);
|
||||
Calendar endDateExpected = new GregorianCalendar();
|
||||
endDateExpected.clear();
|
||||
endDateExpected.set(2002, 1, 4, 23, 59, 59);
|
||||
endDateExpected.set(Calendar.MILLISECOND, 999);
|
||||
assertQueryEquals("[ " + escapeDateString(startDate) + " TO " + escapeDateString(endDate) + "]", null, "["
|
||||
+ getLegacyDate(startDate) + " TO "
|
||||
+ DateField.dateToString(endDateExpected.getTime()) + "]");
|
||||
assertQueryEquals("{ " + escapeDateString(startDate) + " " + escapeDateString(endDate) + " }", null, "{"
|
||||
+ getLegacyDate(startDate) + " TO " + getLegacyDate(endDate) + "}");
|
||||
}
|
||||
|
||||
public void testDateRange() throws Exception {
|
||||
String startDate = getLocalizedDate(2002, 1, 1);
|
||||
String endDate = getLocalizedDate(2002, 1, 4);
|
||||
|
@ -762,20 +736,12 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
final String hourField = "hour";
|
||||
StandardQueryParser qp = new StandardQueryParser();
|
||||
|
||||
// Don't set any date resolution and verify if DateField is used
|
||||
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
|
||||
endDateExpected.getTime(), null);
|
||||
|
||||
Map<CharSequence, DateTools.Resolution> dateRes = new HashMap<CharSequence, DateTools.Resolution>();
|
||||
|
||||
// set a field specific date resolution
|
||||
dateRes.put(monthField, DateTools.Resolution.MONTH);
|
||||
qp.setDateResolution(dateRes);
|
||||
|
||||
// DateField should still be used for defaultField
|
||||
assertDateRangeQueryEquals(qp, defaultField, startDate, endDate,
|
||||
endDateExpected.getTime(), null);
|
||||
|
||||
// set default date resolution to MILLISECOND
|
||||
qp.setDateResolution(DateTools.Resolution.MILLISECOND);
|
||||
|
||||
|
@ -1075,22 +1041,35 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
assertEquals(query1, query2);
|
||||
}
|
||||
|
||||
public void testLocalDateFormat() throws IOException, QueryNodeException {
|
||||
Directory ramDir = newDirectory();
|
||||
IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
|
||||
addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
|
||||
addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
|
||||
iw.close();
|
||||
IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
|
||||
assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
|
||||
assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
|
||||
assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
|
||||
assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
|
||||
assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
|
||||
is.close();
|
||||
ramDir.close();
|
||||
}
|
||||
// Todo (nocommit): Convert from DateField to DateUtil
|
||||
// public void testLocalDateFormat() throws IOException, QueryNodeException {
|
||||
// Directory ramDir = newDirectory();
|
||||
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
|
||||
// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
|
||||
// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
|
||||
// iw.close();
|
||||
// IndexSearcher is = new IndexSearcher(ramDir, true);
|
||||
// assertHits(1, "[12/1/2005 TO 12/3/2005]", is);
|
||||
// assertHits(2, "[12/1/2005 TO 12/4/2005]", is);
|
||||
// assertHits(1, "[12/3/2005 TO 12/4/2005]", is);
|
||||
// assertHits(1, "{12/1/2005 TO 12/3/2005}", is);
|
||||
// assertHits(1, "{12/1/2005 TO 12/4/2005}", is);
|
||||
// assertHits(0, "{12/3/2005 TO 12/4/2005}", is);
|
||||
// is.close();
|
||||
// ramDir.close();
|
||||
// }
|
||||
//
|
||||
// private void addDateDoc(String content, int year, int month, int day,
|
||||
// int hour, int minute, int second, IndexWriter iw) throws IOException {
|
||||
// Document d = new Document();
|
||||
// d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
|
||||
// Calendar cal = Calendar.getInstance(Locale.ENGLISH);
|
||||
// cal.set(year, month - 1, day, hour, minute, second);
|
||||
// d.add(newField("date", DateField.dateToString(cal.getTime()),
|
||||
// Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
// iw.addDocument(d);
|
||||
// }
|
||||
|
||||
|
||||
public void testStarParsing() throws Exception {
|
||||
// final int[] type = new int[1];
|
||||
|
@ -1251,17 +1230,6 @@ public class TestQPHelper extends LuceneTestCase {
|
|||
assertEquals(expected, hits.length);
|
||||
}
|
||||
|
||||
private void addDateDoc(String content, int year, int month, int day,
|
||||
int hour, int minute, int second, IndexWriter iw) throws IOException {
|
||||
Document d = new Document();
|
||||
d.add(newField("f", content, Field.Store.YES, Field.Index.ANALYZED));
|
||||
Calendar cal = Calendar.getInstance(Locale.ENGLISH);
|
||||
cal.set(year, month - 1, day, hour, minute, second);
|
||||
d.add(newField("date", DateField.dateToString(cal.getTime()),
|
||||
Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
iw.addDocument(d);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
BooleanQuery.setMaxClauseCount(originalMaxClauses);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -76,7 +76,7 @@ public class TestRemoteSearchable extends RemoteTestCase {
|
|||
document = searcher.doc(0, fs);
|
||||
assertTrue("document is null and it shouldn't be", document != null);
|
||||
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
|
||||
fs = new MapFieldSelector(new String[]{"other"});
|
||||
fs = new MapFieldSelector("other");
|
||||
document = searcher.doc(0, fs);
|
||||
assertTrue("document is null and it shouldn't be", document != null);
|
||||
assertTrue("document.getFields() Size: " + document.getFields().size() + " is not: " + 1, document.getFields().size() == 1);
|
||||
|
|
|
@ -84,10 +84,12 @@ public class TestRemoteSort extends RemoteTestCase {
|
|||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
indexStore = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(1000);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
indexStore,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(1000))
|
||||
);
|
||||
for (int i=0; i<data.length; ++i) {
|
||||
Document doc = new Document();
|
||||
doc.add (new Field ("tracer", data[i][0], Field.Store.YES, Field.Index.NO));
|
||||
|
@ -217,7 +219,7 @@ public class TestRemoteSort extends RemoteTestCase {
|
|||
@Test
|
||||
public void testRemoteSort() throws Exception {
|
||||
Searchable searcher = lookupRemote();
|
||||
MultiSearcher multi = new MultiSearcher (new Searchable[] { searcher });
|
||||
MultiSearcher multi = new MultiSearcher (searcher);
|
||||
runMultiSorts(multi, true); // this runs on the full index
|
||||
}
|
||||
|
||||
|
@ -255,7 +257,7 @@ public class TestRemoteSort extends RemoteTestCase {
|
|||
HashMap<String,Float> scoresA = getScores (full.search (queryA, null, 1000).scoreDocs, full);
|
||||
|
||||
// we'll test searching locally, remote and multi
|
||||
MultiSearcher remote = new MultiSearcher (new Searchable[] { lookupRemote() });
|
||||
MultiSearcher remote = new MultiSearcher (lookupRemote());
|
||||
|
||||
// change sorting and make sure relevancy stays the same
|
||||
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.spatial.geometry.shape;
|
||||
|
||||
/**
|
||||
* Imported from mq java client. No changes made.
|
||||
*
|
||||
* <p><font color="red"><b>NOTE:</b> This API is still in
|
||||
* flux and might change in incompatible ways in the next
|
||||
* release.</font>
|
||||
*
|
||||
* @deprecated This has been replaced with more accurate
|
||||
* math in {@link LLRect}. This class will be removed in a future release.
|
||||
*/
|
||||
@Deprecated
|
||||
public class DistanceApproximation
|
||||
{
|
||||
private double m_testLat;
|
||||
private double m_testLng;
|
||||
private double m_mpd;
|
||||
private static final double m_milesPerLngDeg[]={
|
||||
69.170976f, 69.160441f, 69.128838f, 69.076177f, 69.002475f,
|
||||
68.907753f, 68.792041f, 68.655373f, 68.497792f, 68.319345f,
|
||||
68.120088f, 67.900079f, 67.659387f, 67.398085f, 67.116253f,
|
||||
66.813976f, 66.491346f, 66.148462f, 65.785428f, 65.402355f,
|
||||
64.999359f, 64.576564f, 64.134098f, 63.672096f, 63.190698f,
|
||||
62.690052f, 62.170310f, 61.631630f, 61.074176f, 60.498118f,
|
||||
59.903632f, 59.290899f, 58.660106f, 58.011443f, 57.345111f,
|
||||
56.661310f, 55.960250f, 55.242144f, 54.507211f, 53.755675f,
|
||||
52.987764f, 52.203713f, 51.403761f, 50.588151f, 49.757131f,
|
||||
48.910956f, 48.049882f, 47.174172f, 46.284093f, 45.379915f,
|
||||
44.461915f, 43.530372f, 42.585570f, 41.627796f, 40.657342f,
|
||||
39.674504f, 38.679582f, 37.672877f, 36.654698f, 35.625354f,
|
||||
34.585159f, 33.534429f, 32.473485f, 31.402650f, 30.322249f,
|
||||
29.232613f, 28.134073f, 27.026963f, 25.911621f, 24.788387f,
|
||||
23.657602f, 22.519612f, 21.374762f, 20.223401f, 19.065881f,
|
||||
17.902554f, 16.733774f, 15.559897f, 14.381280f, 13.198283f,
|
||||
12.011266f, 10.820591f, 9.626619f, 8.429716f, 7.230245f,
|
||||
6.028572f, 4.825062f, 3.620083f, 2.414002f, 1.207185f,
|
||||
1.000000f};
|
||||
|
||||
public static final double MILES_PER_LATITUDE = 69.170976f;
|
||||
public static final double KILOMETERS_PER_MILE = 1.609347f;
|
||||
|
||||
|
||||
public DistanceApproximation()
|
||||
{
|
||||
}
|
||||
|
||||
public void setTestPoint(double lat, double lng)
|
||||
{
|
||||
m_testLat = lat;
|
||||
m_testLng = lng;
|
||||
m_mpd = m_milesPerLngDeg[(int)(Math.abs(lat) + 0.5f)];
|
||||
}
|
||||
|
||||
// Approximate arc distance between 2 lat,lng positions using miles per
|
||||
// latitude and longitude degree
|
||||
public double getDistanceSq(double lat, double lng)
|
||||
{
|
||||
double latMiles = (lat - m_testLat) * MILES_PER_LATITUDE;
|
||||
|
||||
// Approximate longitude miles using the miles per degree assuming the
|
||||
// middle latitude/longitude. This is less accurate at high (near
|
||||
// polar) latitudes but no road network is present at the poles!
|
||||
// If we ever have any roads crossing the international date we will
|
||||
// have to worry about that case.
|
||||
double lngMiles = (lng - m_testLng) * m_mpd;
|
||||
|
||||
// Find the squared distance by the Pythagorean theorem (without sqrt)
|
||||
return (latMiles * latMiles + lngMiles * lngMiles);
|
||||
}
|
||||
|
||||
// Approximate arc distance between a segment (with lat,lng endpoints) and
|
||||
// the test position
|
||||
public double getDistanceSq(double lat1, double lng1, double lat2, double lng2)
|
||||
{
|
||||
// Check if lat1,lng1 is closest point. Construct a vector from point1
|
||||
// to point2 (v1) and another from point 1 to the test point (v2).
|
||||
// If dot product is negative then point 1 is the closest point
|
||||
double v1y = lat2 - lat1;
|
||||
double v1x = lng2 - lng1;
|
||||
double v2y = m_testLat - lat1;
|
||||
double v2x = m_testLng - lng1;
|
||||
double dot = v1x * v2x + v1y * v2y;
|
||||
if (dot <= 0.0f)
|
||||
return getDistanceSq(lat1, lng1);
|
||||
|
||||
// Get the component of vector v2 along v1. If component is greater
|
||||
// than 1 then the endpoint is the closest point.
|
||||
double c = dot / (v1x * v1x + v1y * v1y);
|
||||
if (c >= 1.0f)
|
||||
return getDistanceSq(lat2, lng2);
|
||||
|
||||
// Since we are working io lat,lng space we need to find the point
|
||||
// along p1->p2 such that q->pt is perpendicular to p1->p2. We
|
||||
// then find the distance squared between Q and pt.
|
||||
return getDistanceSq((lat1 + v1y * c), (lng1 + v1x * c));
|
||||
}
|
||||
|
||||
// Return the number of miles per degree of longitude
|
||||
public static double getMilesPerLngDeg(double lat)
|
||||
{
|
||||
return (Math.abs(lat) <= 90.0) ? m_milesPerLngDeg[(int)(Math.abs(lat) + 0.5f)] : 69.170976f;
|
||||
}
|
||||
|
||||
public static double getMilesPerLatDeg() {
|
||||
return MILES_PER_LATITUDE;
|
||||
}
|
||||
}
|
||||
|
|
@ -28,7 +28,7 @@ package org.apache.lucene.spatial.tier.projections;
|
|||
* flux and might change in incompatible ways in the next
|
||||
* release.</font>
|
||||
*
|
||||
* @deprecated Until we can put in place proper tests and a proper fix.
|
||||
* @deprecated (3.1) Until we can put in place proper tests and a proper fix.
|
||||
*/
|
||||
@Deprecated
|
||||
public class SinusoidalProjector implements IProjector {
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TestParser extends LuceneTestCase {
|
|||
|
||||
BufferedReader d = new BufferedReader(new InputStreamReader(TestParser.class.getResourceAsStream("reuters21578.txt")));
|
||||
dir=newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_24, analyzer));
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer));
|
||||
String line = d.readLine();
|
||||
while(line!=null)
|
||||
{
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriter.MaxFieldLength;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.NumericRangeFilter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.CharacterUtils;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util.VirtualMethod;
|
||||
import org.apache.lucene.util.CharacterUtils.CharacterBuffer;
|
||||
|
||||
/**
|
||||
|
@ -78,8 +77,6 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
public CharTokenizer(Version matchVersion, Reader input) {
|
||||
super(input);
|
||||
charUtils = CharacterUtils.getInstance(matchVersion);
|
||||
useOldAPI = useOldAPI(matchVersion);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -96,7 +93,6 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
Reader input) {
|
||||
super(source, input);
|
||||
charUtils = CharacterUtils.getInstance(matchVersion);
|
||||
useOldAPI = useOldAPI(matchVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -113,147 +109,30 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
Reader input) {
|
||||
super(factory, input);
|
||||
charUtils = CharacterUtils.getInstance(matchVersion);
|
||||
useOldAPI = useOldAPI(matchVersion);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link CharTokenizer} instance
|
||||
* @param input the input to split up into tokens
|
||||
* @deprecated use {@link #CharTokenizer(Version, Reader)} instead. This will be
|
||||
* removed in Lucene 4.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public CharTokenizer(Reader input) {
|
||||
this(Version.LUCENE_30, input);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link CharTokenizer} instance
|
||||
* @param input the input to split up into tokens
|
||||
* @param source the attribute source to use for this {@link Tokenizer}
|
||||
* @deprecated use {@link #CharTokenizer(Version, AttributeSource, Reader)} instead. This will be
|
||||
* removed in Lucene 4.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public CharTokenizer(AttributeSource source, Reader input) {
|
||||
this(Version.LUCENE_30, source, input);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link CharTokenizer} instance
|
||||
* @param input the input to split up into tokens
|
||||
* @param factory the attribute factory to use for this {@link Tokenizer}
|
||||
* @deprecated use {@link #CharTokenizer(Version, AttributeSource.AttributeFactory, Reader)} instead. This will be
|
||||
* removed in Lucene 4.0.
|
||||
*/
|
||||
@Deprecated
|
||||
public CharTokenizer(AttributeFactory factory, Reader input) {
|
||||
this(Version.LUCENE_30, factory, input);
|
||||
}
|
||||
|
||||
private int offset = 0, bufferIndex = 0, dataLen = 0, finalOffset = 0;
|
||||
private static final int MAX_WORD_LEN = 255;
|
||||
private static final int IO_BUFFER_SIZE = 4096;
|
||||
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);;
|
||||
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
|
||||
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
|
||||
|
||||
private final CharacterUtils charUtils;
|
||||
private final CharacterBuffer ioBuffer = CharacterUtils.newCharacterBuffer(IO_BUFFER_SIZE);
|
||||
|
||||
/**
|
||||
* @deprecated this will be removed in lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
private final boolean useOldAPI;
|
||||
|
||||
/**
|
||||
* @deprecated this will be removed in lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
private static final VirtualMethod<CharTokenizer> isTokenCharMethod =
|
||||
new VirtualMethod<CharTokenizer>(CharTokenizer.class, "isTokenChar", char.class);
|
||||
|
||||
/**
|
||||
* @deprecated this will be removed in lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
private static final VirtualMethod<CharTokenizer> normalizeMethod =
|
||||
new VirtualMethod<CharTokenizer>(CharTokenizer.class, "normalize", char.class);
|
||||
|
||||
/**
|
||||
* Returns true iff a UTF-16 code unit should be included in a token. This
|
||||
* tokenizer generates as tokens adjacent sequences of characters which
|
||||
* satisfy this predicate. Characters for which this is <code>false</code> are
|
||||
* used to define token boundaries and are not included in tokens.
|
||||
* <p>
|
||||
* Note: This method cannot handle <a href=
|
||||
* "http://java.sun.com/j2se/1.5.0/docs/api/java/lang/Character.html#supplementary"
|
||||
* >supplementary characters</a>. To support all Unicode characters, including
|
||||
* supplementary characters, use the {@link #isTokenChar(int)} method.
|
||||
* </p>
|
||||
*
|
||||
* @deprecated use {@link #isTokenChar(int)} instead. This method will be
|
||||
* removed in Lucene 4.0.
|
||||
*/
|
||||
@Deprecated
|
||||
protected boolean isTokenChar(char c) {
|
||||
return isTokenChar((int)c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called on each token UTF-16 code unit to normalize it before it is added to the
|
||||
* token. The default implementation does nothing. Subclasses may use this to,
|
||||
* e.g., lowercase tokens.
|
||||
* <p>
|
||||
* Note: This method cannot handle <a href=
|
||||
* "http://java.sun.com/j2se/1.5.0/docs/api/java/lang/Character.html#supplementary"
|
||||
* >supplementary characters</a>. To support all Unicode characters, including
|
||||
* supplementary characters, use the {@link #normalize(int)} method.
|
||||
* </p>
|
||||
*
|
||||
* @deprecated use {@link #normalize(int)} instead. This method will be
|
||||
* removed in Lucene 4.0.
|
||||
*/
|
||||
@Deprecated
|
||||
protected char normalize(char c) {
|
||||
return (char) normalize((int) c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff a codepoint should be included in a token. This tokenizer
|
||||
* generates as tokens adjacent sequences of codepoints which satisfy this
|
||||
* predicate. Codepoints for which this is false are used to define token
|
||||
* boundaries and are not included in tokens.
|
||||
* <p>
|
||||
* As of Lucene 3.1 the char based API ({@link #isTokenChar(char)} and
|
||||
* {@link #normalize(char)}) has been depreciated in favor of a Unicode 4.0
|
||||
* compatible int based API to support codepoints instead of UTF-16 code
|
||||
* units. Subclasses of {@link CharTokenizer} must not override the char based
|
||||
* methods if a {@link Version} >= 3.1 is passed to the constructor.
|
||||
* <p>
|
||||
* <p>
|
||||
* NOTE: This method will be marked <i>abstract</i> in Lucene 4.0.
|
||||
* </p>
|
||||
*/
|
||||
protected boolean isTokenChar(int c) {
|
||||
throw new UnsupportedOperationException("since LUCENE_31 subclasses of CharTokenizer must implement isTokenChar(int)");
|
||||
}
|
||||
protected abstract boolean isTokenChar(int c);
|
||||
|
||||
/**
|
||||
* Called on each token character to normalize it before it is added to the
|
||||
* token. The default implementation does nothing. Subclasses may use this to,
|
||||
* e.g., lowercase tokens.
|
||||
* <p>
|
||||
* As of Lucene 3.1 the char based API ({@link #isTokenChar(char)} and
|
||||
* {@link #normalize(char)}) has been depreciated in favor of a Unicode 4.0
|
||||
* compatible int based API to support codepoints instead of UTF-16 code
|
||||
* units. Subclasses of {@link CharTokenizer} must not override the char based
|
||||
* methods if a {@link Version} >= 3.1 is passed to the constructor.
|
||||
* <p>
|
||||
* <p>
|
||||
* NOTE: This method will be marked <i>abstract</i> in Lucene 4.0.
|
||||
* </p>
|
||||
*/
|
||||
protected int normalize(int c) {
|
||||
return c;
|
||||
|
@ -262,8 +141,6 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
@Override
|
||||
public final boolean incrementToken() throws IOException {
|
||||
clearAttributes();
|
||||
if(useOldAPI) // TODO remove this in LUCENE 4.0
|
||||
return incrementTokenOld();
|
||||
int length = 0;
|
||||
int start = -1; // this variable is always initialized
|
||||
char[] buffer = termAtt.buffer();
|
||||
|
@ -307,62 +184,6 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* The <= 3.0 version of incrementToken. This is a backwards compat implementation used
|
||||
* if a version <= 3.0 is provided to the ctor.
|
||||
* @deprecated remove in 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean incrementTokenOld() throws IOException {
|
||||
int length = 0;
|
||||
int start = -1; // this variable is always initialized
|
||||
char[] buffer = termAtt.buffer();
|
||||
final char[] oldIoBuffer = ioBuffer.getBuffer();
|
||||
while (true) {
|
||||
|
||||
if (bufferIndex >= dataLen) {
|
||||
offset += dataLen;
|
||||
dataLen = input.read(oldIoBuffer);
|
||||
if (dataLen == -1) {
|
||||
dataLen = 0; // so next offset += dataLen won't decrement offset
|
||||
if (length > 0) {
|
||||
break;
|
||||
} else {
|
||||
finalOffset = correctOffset(offset);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
bufferIndex = 0;
|
||||
}
|
||||
|
||||
final char c = oldIoBuffer[bufferIndex++];
|
||||
|
||||
if (isTokenChar(c)) { // if it's a token char
|
||||
|
||||
if (length == 0) { // start of token
|
||||
assert start == -1;
|
||||
start = offset + bufferIndex - 1;
|
||||
} else if (length == buffer.length) {
|
||||
buffer = termAtt.resizeBuffer(1+length);
|
||||
}
|
||||
|
||||
buffer[length++] = normalize(c); // buffer it, normalized
|
||||
|
||||
if (length == MAX_WORD_LEN) // buffer overflow!
|
||||
break;
|
||||
|
||||
} else if (length > 0) // at non-Letter w/ chars
|
||||
break; // return 'em
|
||||
}
|
||||
|
||||
termAtt.setLength(length);
|
||||
assert start != -1;
|
||||
offsetAtt.setOffset(correctOffset(start), correctOffset(start+length));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
public final void end() {
|
||||
// set final offset
|
||||
|
@ -378,17 +199,4 @@ public abstract class CharTokenizer extends Tokenizer {
|
|||
finalOffset = 0;
|
||||
ioBuffer.reset(); // make sure to reset the IO buffer!!
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated this will be removed in lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
private boolean useOldAPI(Version matchVersion) {
|
||||
final Class<? extends CharTokenizer> clazz = this.getClass();
|
||||
if (matchVersion.onOrAfter(Version.LUCENE_31)
|
||||
&& (isTokenCharMethod.isOverriddenAsOf(clazz) || normalizeMethod
|
||||
.isOverriddenAsOf(clazz))) throw new IllegalArgumentException(
|
||||
"For matchVersion >= LUCENE_31, CharTokenizer subclasses must not override isTokenChar(char) or normalize(char).");
|
||||
return !matchVersion.onOrAfter(Version.LUCENE_31);
|
||||
}
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
|
||||
import org.apache.lucene.util.NumericUtils; // for javadocs
|
||||
|
||||
import java.util.Date; // for javadoc
|
||||
import java.util.Calendar; // for javadoc
|
||||
|
||||
// do not remove in 3.0, needed for reading old indexes!
|
||||
|
||||
/**
|
||||
* Provides support for converting dates to strings and vice-versa.
|
||||
* The strings are structured so that lexicographic sorting orders by date,
|
||||
* which makes them suitable for use as field values and search terms.
|
||||
*
|
||||
* <P>Note that this class saves dates with millisecond granularity,
|
||||
* which is bad for {@link TermRangeQuery} and {@link PrefixQuery}, as those
|
||||
* queries are expanded to a BooleanQuery with a potentially large number
|
||||
* of terms when searching. Thus you might want to use
|
||||
* {@link DateTools} instead.
|
||||
*
|
||||
* <P>
|
||||
* Note: dates before 1970 cannot be used, and therefore cannot be
|
||||
* indexed when using this class. See {@link DateTools} for an
|
||||
* alternative without such a limitation.
|
||||
*
|
||||
* <P>
|
||||
* Another approach is {@link NumericUtils}, which provides
|
||||
* a sortable binary representation (prefix encoded) of numeric values, which
|
||||
* date/time are.
|
||||
* For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as
|
||||
* <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and
|
||||
* index this as a numeric value with {@link NumericField}
|
||||
* and use {@link NumericRangeQuery} to query it.
|
||||
*
|
||||
* @deprecated If you build a new index, use {@link DateTools} or
|
||||
* {@link NumericField} instead.
|
||||
* This class is included for use with existing
|
||||
* indices and will be removed in a future release (possibly Lucene 4.0).
|
||||
*/
|
||||
@Deprecated
|
||||
public class DateField {
|
||||
|
||||
private DateField() {}
|
||||
|
||||
// make date strings long enough to last a millenium
|
||||
private static int DATE_LEN = Long.toString(1000L*365*24*60*60*1000,
|
||||
Character.MAX_RADIX).length();
|
||||
|
||||
public static String MIN_DATE_STRING() {
|
||||
return timeToString(0);
|
||||
}
|
||||
|
||||
public static String MAX_DATE_STRING() {
|
||||
char[] buffer = new char[DATE_LEN];
|
||||
char c = Character.forDigit(Character.MAX_RADIX-1, Character.MAX_RADIX);
|
||||
for (int i = 0 ; i < DATE_LEN; i++)
|
||||
buffer[i] = c;
|
||||
return new String(buffer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a Date to a string suitable for indexing.
|
||||
* @throws RuntimeException if the date specified in the
|
||||
* method argument is before 1970
|
||||
*/
|
||||
public static String dateToString(Date date) {
|
||||
return timeToString(date.getTime());
|
||||
}
|
||||
/**
|
||||
* Converts a millisecond time to a string suitable for indexing.
|
||||
* @throws RuntimeException if the time specified in the
|
||||
* method argument is negative, that is, before 1970
|
||||
*/
|
||||
public static String timeToString(long time) {
|
||||
if (time < 0)
|
||||
throw new RuntimeException("time '" + time + "' is too early, must be >= 0");
|
||||
|
||||
String s = Long.toString(time, Character.MAX_RADIX);
|
||||
|
||||
if (s.length() > DATE_LEN)
|
||||
throw new RuntimeException("time '" + time + "' is too late, length of string " +
|
||||
"representation must be <= " + DATE_LEN);
|
||||
|
||||
// Pad with leading zeros
|
||||
if (s.length() < DATE_LEN) {
|
||||
StringBuilder sb = new StringBuilder(s);
|
||||
while (sb.length() < DATE_LEN)
|
||||
sb.insert(0, 0);
|
||||
s = sb.toString();
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
/** Converts a string-encoded date into a millisecond time. */
|
||||
public static long stringToTime(String s) {
|
||||
return Long.parseLong(s, Character.MAX_RADIX);
|
||||
}
|
||||
/** Converts a string-encoded date into a Date object. */
|
||||
public static Date stringToDate(String s) {
|
||||
return new Date(stringToTime(s));
|
||||
}
|
||||
}
|
|
@ -36,10 +36,6 @@ import org.apache.lucene.util.NumericUtils; // for javadocs
|
|||
* save dates with a finer resolution than you really need, as then
|
||||
* RangeQuery and PrefixQuery will require more memory and become slower.
|
||||
*
|
||||
* <P>Compared to {@link DateField} the strings generated by the methods
|
||||
* in this class take slightly more space, unless your selected resolution
|
||||
* is set to <code>Resolution.DAY</code> or lower.
|
||||
*
|
||||
* <P>
|
||||
* Another approach is {@link NumericUtils}, which provides
|
||||
* a sortable binary representation (prefix encoded) of numeric values, which
|
||||
|
|
|
@ -17,13 +17,13 @@ package org.apache.lucene.document;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.index.IndexWriter; // for javadoc
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
import java.io.Reader;
|
||||
import java.io.Serializable;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
A field is a section of a Document. Each field has two parts, a name and a
|
||||
value. Values may be free text, provided as a String or as a Reader, or they
|
||||
|
@ -516,24 +516,6 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Create a stored field with binary value. Optionally the value may be compressed.
|
||||
*
|
||||
* @param name The name of the field
|
||||
* @param value The binary value
|
||||
* @param store Must be Store.YES
|
||||
* @throws IllegalArgumentException if store is <code>Store.NO</code>
|
||||
* @deprecated Use {@link #Field(String, byte[]) instead}
|
||||
*/
|
||||
@Deprecated
|
||||
public Field(String name, byte[] value, Store store) {
|
||||
this(name, value, 0, value.length);
|
||||
|
||||
if (store == Store.NO) {
|
||||
throw new IllegalArgumentException("binary values can't be unstored");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stored field with binary value. Optionally the value may be compressed.
|
||||
*
|
||||
|
@ -544,26 +526,6 @@ public final class Field extends AbstractField implements Fieldable, Serializabl
|
|||
this(name, value, 0, value.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stored field with binary value. Optionally the value may be compressed.
|
||||
*
|
||||
* @param name The name of the field
|
||||
* @param value The binary value
|
||||
* @param offset Starting offset in value where this Field's bytes are
|
||||
* @param length Number of bytes to use for this Field, starting at offset
|
||||
* @param store How <code>value</code> should be stored (compressed or not)
|
||||
* @throws IllegalArgumentException if store is <code>Store.NO</code>
|
||||
* @deprecated Use {@link #Field(String, byte[], int, int) instead}
|
||||
*/
|
||||
@Deprecated
|
||||
public Field(String name, byte[] value, int offset, int length, Store store) {
|
||||
this(name, value, offset, length);
|
||||
|
||||
if (store == Store.NO) {
|
||||
throw new IllegalArgumentException("binary values can't be unstored");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stored field with binary value. Optionally the value may be compressed.
|
||||
*
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.document.NumericField; // for javadocs
|
||||
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
|
||||
import org.apache.lucene.util.NumericUtils; // for javadocs
|
||||
|
||||
// do not remove this class in 3.0, it may be needed to decode old indexes!
|
||||
|
||||
/**
|
||||
* Provides support for converting longs to Strings, and back again. The strings
|
||||
* are structured so that lexicographic sorting order is preserved.
|
||||
*
|
||||
* <p>
|
||||
* That is, if l1 is less than l2 for any two longs l1 and l2, then
|
||||
* NumberTools.longToString(l1) is lexicographically less than
|
||||
* NumberTools.longToString(l2). (Similarly for "greater than" and "equals".)
|
||||
*
|
||||
* <p>
|
||||
* This class handles <b>all</b> long values (unlike
|
||||
* {@link org.apache.lucene.document.DateField}).
|
||||
*
|
||||
* @deprecated For new indexes use {@link NumericUtils} instead, which
|
||||
* provides a sortable binary representation (prefix encoded) of numeric
|
||||
* values.
|
||||
* To index and efficiently query numeric values use {@link NumericField}
|
||||
* and {@link NumericRangeQuery}.
|
||||
* This class is included for use with existing
|
||||
* indices and will be removed in a future release (possibly Lucene 4.0).
|
||||
*/
|
||||
@Deprecated
|
||||
public class NumberTools {
|
||||
|
||||
private static final int RADIX = 36;
|
||||
|
||||
private static final char NEGATIVE_PREFIX = '-';
|
||||
|
||||
// NB: NEGATIVE_PREFIX must be < POSITIVE_PREFIX
|
||||
private static final char POSITIVE_PREFIX = '0';
|
||||
|
||||
//NB: this must be less than
|
||||
/**
|
||||
* Equivalent to longToString(Long.MIN_VALUE)
|
||||
*/
|
||||
public static final String MIN_STRING_VALUE = NEGATIVE_PREFIX
|
||||
+ "0000000000000";
|
||||
|
||||
/**
|
||||
* Equivalent to longToString(Long.MAX_VALUE)
|
||||
*/
|
||||
public static final String MAX_STRING_VALUE = POSITIVE_PREFIX
|
||||
+ "1y2p0ij32e8e7";
|
||||
|
||||
/**
|
||||
* The length of (all) strings returned by {@link #longToString}
|
||||
*/
|
||||
public static final int STR_SIZE = MIN_STRING_VALUE.length();
|
||||
|
||||
/**
|
||||
* Converts a long to a String suitable for indexing.
|
||||
*/
|
||||
public static String longToString(long l) {
|
||||
|
||||
if (l == Long.MIN_VALUE) {
|
||||
// special case, because long is not symmetric around zero
|
||||
return MIN_STRING_VALUE;
|
||||
}
|
||||
|
||||
StringBuilder buf = new StringBuilder(STR_SIZE);
|
||||
|
||||
if (l < 0) {
|
||||
buf.append(NEGATIVE_PREFIX);
|
||||
l = Long.MAX_VALUE + l + 1;
|
||||
} else {
|
||||
buf.append(POSITIVE_PREFIX);
|
||||
}
|
||||
String num = Long.toString(l, RADIX);
|
||||
|
||||
int padLen = STR_SIZE - num.length() - buf.length();
|
||||
while (padLen-- > 0) {
|
||||
buf.append('0');
|
||||
}
|
||||
buf.append(num);
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a String that was returned by {@link #longToString} back to a
|
||||
* long.
|
||||
*
|
||||
* @throws IllegalArgumentException
|
||||
* if the input is null
|
||||
* @throws NumberFormatException
|
||||
* if the input does not parse (it was not a String returned by
|
||||
* longToString()).
|
||||
*/
|
||||
public static long stringToLong(String str) {
|
||||
if (str == null) {
|
||||
throw new NullPointerException("string cannot be null");
|
||||
}
|
||||
if (str.length() != STR_SIZE) {
|
||||
throw new NumberFormatException("string is the wrong size");
|
||||
}
|
||||
|
||||
if (str.equals(MIN_STRING_VALUE)) {
|
||||
return Long.MIN_VALUE;
|
||||
}
|
||||
|
||||
char prefix = str.charAt(0);
|
||||
long l = Long.parseLong(str.substring(1), RADIX);
|
||||
|
||||
if (prefix == POSITIVE_PREFIX) {
|
||||
// nop
|
||||
} else if (prefix == NEGATIVE_PREFIX) {
|
||||
l = l - Long.MAX_VALUE - 1;
|
||||
} else {
|
||||
throw new NumberFormatException(
|
||||
"string does not begin with the correct prefix");
|
||||
}
|
||||
|
||||
return l;
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.store.BufferedIndexInput;
|
|||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.Lock;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
|
@ -227,6 +228,10 @@ public class CompoundFileReader extends Directory {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
}
|
||||
|
||||
/** Not implemented
|
||||
* @throws UnsupportedOperationException */
|
||||
@Override
|
||||
|
|
|
@ -180,68 +180,11 @@ import java.util.Date;
|
|||
* keeps track of the last non commit checkpoint.
|
||||
*/
|
||||
public class IndexWriter implements Closeable {
|
||||
|
||||
/**
|
||||
* Default value for the write lock timeout (1,000).
|
||||
* @see #setDefaultWriteLockTimeout
|
||||
* @deprecated use {@link IndexWriterConfig#WRITE_LOCK_TIMEOUT} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static long WRITE_LOCK_TIMEOUT = IndexWriterConfig.WRITE_LOCK_TIMEOUT;
|
||||
|
||||
private long writeLockTimeout;
|
||||
|
||||
/**
|
||||
* Name of the write lock in the index.
|
||||
*/
|
||||
public static final String WRITE_LOCK_NAME = "write.lock";
|
||||
|
||||
/**
|
||||
* Value to denote a flush trigger is disabled
|
||||
* @deprecated use {@link IndexWriterConfig#DISABLE_AUTO_FLUSH} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public final static int DISABLE_AUTO_FLUSH = IndexWriterConfig.DISABLE_AUTO_FLUSH;
|
||||
|
||||
/**
|
||||
* Disabled by default (because IndexWriter flushes by RAM usage
|
||||
* by default). Change using {@link #setMaxBufferedDocs(int)}.
|
||||
* @deprecated use {@link IndexWriterConfig#DEFAULT_MAX_BUFFERED_DOCS} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public final static int DEFAULT_MAX_BUFFERED_DOCS = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DOCS;
|
||||
|
||||
/**
|
||||
* Default value is 16 MB (which means flush when buffered
|
||||
* docs consume 16 MB RAM). Change using {@link #setRAMBufferSizeMB}.
|
||||
* @deprecated use {@link IndexWriterConfig#DEFAULT_RAM_BUFFER_SIZE_MB} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public final static double DEFAULT_RAM_BUFFER_SIZE_MB = IndexWriterConfig.DEFAULT_RAM_BUFFER_SIZE_MB;
|
||||
|
||||
/**
|
||||
* Disabled by default (because IndexWriter flushes by RAM usage
|
||||
* by default). Change using {@link #setMaxBufferedDeleteTerms(int)}.
|
||||
* @deprecated use {@link IndexWriterConfig#DEFAULT_MAX_BUFFERED_DELETE_TERMS} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public final static int DEFAULT_MAX_BUFFERED_DELETE_TERMS = IndexWriterConfig.DEFAULT_MAX_BUFFERED_DELETE_TERMS;
|
||||
|
||||
/**
|
||||
* Default value is 10,000. Change using {@link #setMaxFieldLength(int)}.
|
||||
*
|
||||
* @deprecated see {@link IndexWriterConfig}
|
||||
*/
|
||||
@Deprecated
|
||||
public final static int DEFAULT_MAX_FIELD_LENGTH = 10000;
|
||||
|
||||
/**
|
||||
* Default value is 128. Change using {@link #setTermIndexInterval(int)}.
|
||||
* @deprecated use {@link IndexWriterConfig#DEFAULT_TERM_INDEX_INTERVAL} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public final static int DEFAULT_TERM_INDEX_INTERVAL = IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL;
|
||||
|
||||
/**
|
||||
* Absolute hard maximum length for a term, in bytes once
|
||||
* encoded as UTF8. If a term arrives from the analyzer
|
||||
|
@ -268,9 +211,6 @@ public class IndexWriter implements Closeable {
|
|||
private final Directory directory; // where this index resides
|
||||
private final Analyzer analyzer; // how to analyze text
|
||||
|
||||
// TODO 4.0: this should be made final once the setter is out
|
||||
private /*final*/Similarity similarity = Similarity.getDefault(); // how to normalize
|
||||
|
||||
private volatile long changeCount; // increments every time a change is completed
|
||||
private long lastCommitChangeCount; // last changeCount that was committed
|
||||
|
||||
|
@ -290,8 +230,7 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
private Lock writeLock;
|
||||
|
||||
// TODO 4.0: this should be made final once the setter is out
|
||||
private /*final*/int termIndexInterval;
|
||||
private final int termIndexInterval;
|
||||
|
||||
private boolean closed;
|
||||
private boolean closing;
|
||||
|
@ -301,8 +240,7 @@ public class IndexWriter implements Closeable {
|
|||
private HashSet<SegmentInfo> mergingSegments = new HashSet<SegmentInfo>();
|
||||
|
||||
private MergePolicy mergePolicy;
|
||||
// TODO 4.0: this should be made final once the setter is removed
|
||||
private /*final*/MergeScheduler mergeScheduler;
|
||||
private final MergeScheduler mergeScheduler;
|
||||
private LinkedList<MergePolicy.OneMerge> pendingMerges = new LinkedList<MergePolicy.OneMerge>();
|
||||
private Set<MergePolicy.OneMerge> runningMerges = new HashSet<MergePolicy.OneMerge>();
|
||||
private List<MergePolicy.OneMerge> mergeExceptions = new ArrayList<MergePolicy.OneMerge>();
|
||||
|
@ -730,276 +668,6 @@ public class IndexWriter implements Closeable {
|
|||
throw new IllegalArgumentException("this method can only be called when the merge policy is the default LogMergePolicy");
|
||||
}
|
||||
|
||||
/** <p>Get the current setting of whether newly flushed
|
||||
* segments will use the compound file format. Note that
|
||||
* this just returns the value previously set with
|
||||
* setUseCompoundFile(boolean), or the default value
|
||||
* (true). You cannot use this to query the status of
|
||||
* previously flushed segments.</p>
|
||||
*
|
||||
* <p>Note that this method is a convenience method: it
|
||||
* just calls mergePolicy.getUseCompoundFile as long as
|
||||
* mergePolicy is an instance of {@link LogMergePolicy}.
|
||||
* Otherwise an IllegalArgumentException is thrown.</p>
|
||||
*
|
||||
* @see #setUseCompoundFile(boolean)
|
||||
* @deprecated use {@link LogMergePolicy#getUseCompoundDocStore()} and
|
||||
* {@link LogMergePolicy#getUseCompoundFile()} directly.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean getUseCompoundFile() {
|
||||
return getLogMergePolicy().getUseCompoundFile();
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>
|
||||
* Setting to turn on usage of a compound file. When on, multiple files for
|
||||
* each segment are merged into a single file when a new segment is flushed.
|
||||
* </p>
|
||||
*
|
||||
* <p>
|
||||
* Note that this method is a convenience method: it just calls
|
||||
* mergePolicy.setUseCompoundFile as long as mergePolicy is an instance of
|
||||
* {@link LogMergePolicy}. Otherwise an IllegalArgumentException is thrown.
|
||||
* </p>
|
||||
*
|
||||
* @deprecated use {@link LogMergePolicy#setUseCompoundDocStore(boolean)} and
|
||||
* {@link LogMergePolicy#setUseCompoundFile(boolean)} directly.
|
||||
* Note that this method set the given value on both, therefore
|
||||
* you should consider doing the same.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setUseCompoundFile(boolean value) {
|
||||
getLogMergePolicy().setUseCompoundFile(value);
|
||||
getLogMergePolicy().setUseCompoundDocStore(value);
|
||||
}
|
||||
|
||||
/** Expert: Set the Similarity implementation used by this IndexWriter.
|
||||
*
|
||||
* @see Similarity#setDefault(Similarity)
|
||||
* @deprecated use {@link IndexWriterConfig#setSimilarity(Similarity)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public void setSimilarity(Similarity similarity) {
|
||||
ensureOpen();
|
||||
this.similarity = similarity;
|
||||
docWriter.setSimilarity(similarity);
|
||||
// Required so config.getSimilarity returns the right value. But this will
|
||||
// go away together with the method in 4.0.
|
||||
config.setSimilarity(similarity);
|
||||
}
|
||||
|
||||
/** Expert: Return the Similarity implementation used by this IndexWriter.
|
||||
*
|
||||
* <p>This defaults to the current value of {@link Similarity#getDefault()}.
|
||||
* @deprecated use {@link IndexWriterConfig#getSimilarity()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public Similarity getSimilarity() {
|
||||
ensureOpen();
|
||||
return similarity;
|
||||
}
|
||||
|
||||
/** Expert: Set the interval between indexed terms. Large values cause less
|
||||
* memory to be used by IndexReader, but slow random-access to terms. Small
|
||||
* values cause more memory to be used by an IndexReader, and speed
|
||||
* random-access to terms.
|
||||
*
|
||||
* This parameter determines the amount of computation required per query
|
||||
* term, regardless of the number of documents that contain that term. In
|
||||
* particular, it is the maximum number of other terms that must be
|
||||
* scanned before a term is located and its frequency and position information
|
||||
* may be processed. In a large index with user-entered query terms, query
|
||||
* processing time is likely to be dominated not by term lookup but rather
|
||||
* by the processing of frequency and positional data. In a small index
|
||||
* or when many uncommon query terms are generated (e.g., by wildcard
|
||||
* queries) term lookup may become a dominant cost.
|
||||
*
|
||||
* In particular, <code>numUniqueTerms/interval</code> terms are read into
|
||||
* memory by an IndexReader, and, on average, <code>interval/2</code> terms
|
||||
* must be scanned for each random term access.
|
||||
*
|
||||
* @see #DEFAULT_TERM_INDEX_INTERVAL
|
||||
* @deprecated use {@link IndexWriterConfig#setTermIndexInterval(int)}
|
||||
*/
|
||||
@Deprecated
|
||||
public void setTermIndexInterval(int interval) {
|
||||
ensureOpen();
|
||||
this.termIndexInterval = interval;
|
||||
// Required so config.getTermIndexInterval returns the right value. But this
|
||||
// will go away together with the method in 4.0.
|
||||
config.setTermIndexInterval(interval);
|
||||
}
|
||||
|
||||
/** Expert: Return the interval between indexed terms.
|
||||
*
|
||||
* @see #setTermIndexInterval(int)
|
||||
* @deprecated use {@link IndexWriterConfig#getTermIndexInterval()}
|
||||
*/
|
||||
@Deprecated
|
||||
public int getTermIndexInterval() {
|
||||
// We pass false because this method is called by SegmentMerger while we are in the process of closing
|
||||
ensureOpen(false);
|
||||
return termIndexInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If <code>create</code>
|
||||
* is true, then a new, empty index will be created in
|
||||
* <code>d</code>, replacing the index already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this(d, new IndexWriterConfig(Version.LUCENE_31, a).setOpenMode(
|
||||
create ? OpenMode.CREATE : OpenMode.APPEND).setMaxFieldLength(
|
||||
mfl.getLimit()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs an IndexWriter for the index in
|
||||
* <code>d</code>, first creating it if it does not
|
||||
* already exist. Text will be analyzed with
|
||||
* <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param mfl Maximum field length in number of terms/tokens: LIMITED, UNLIMITED, or user-specified
|
||||
* via the MaxFieldLength constructor.
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexWriter(Directory d, Analyzer a, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this(d, new IndexWriterConfig(Version.LUCENE_31, a)
|
||||
.setMaxFieldLength(mfl.getLimit()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>,
|
||||
* first creating it if it does not already exist. Text
|
||||
* will be analyzed with <code>a</code>.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl whether or not to limit field lengths
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be
|
||||
* read/written to or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this(d, new IndexWriterConfig(Version.LUCENE_31, a).setMaxFieldLength(
|
||||
mfl.getLimit()).setIndexDeletionPolicy(deletionPolicy));
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter with a custom {@link
|
||||
* IndexDeletionPolicy}, for the index in <code>d</code>.
|
||||
* Text will be analyzed with <code>a</code>. If
|
||||
* <code>create</code> is true, then a new, empty index
|
||||
* will be created in <code>d</code>, replacing the index
|
||||
* already there, if any.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param create <code>true</code> to create the index or overwrite
|
||||
* the existing one; <code>false</code> to append to the existing
|
||||
* index
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}, whether or not to limit field lengths. Value is in number of terms/tokens
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexWriter(Directory d, Analyzer a, boolean create, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this(d, new IndexWriterConfig(Version.LUCENE_31, a).setOpenMode(
|
||||
create ? OpenMode.CREATE : OpenMode.APPEND).setMaxFieldLength(
|
||||
mfl.getLimit()).setIndexDeletionPolicy(deletionPolicy));
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: constructs an IndexWriter on specific commit
|
||||
* point, with a custom {@link IndexDeletionPolicy}, for
|
||||
* the index in <code>d</code>. Text will be analyzed
|
||||
* with <code>a</code>.
|
||||
*
|
||||
* <p> This is only meaningful if you've used a {@link
|
||||
* IndexDeletionPolicy} in that past that keeps more than
|
||||
* just the last commit.
|
||||
*
|
||||
* <p>This operation is similar to {@link #rollback()},
|
||||
* except that method can only rollback what's been done
|
||||
* with the current instance of IndexWriter since its last
|
||||
* commit, whereas this method can rollback to an
|
||||
* arbitrary commit point from the past, assuming the
|
||||
* {@link IndexDeletionPolicy} has preserved past
|
||||
* commits.
|
||||
*
|
||||
* @param d the index directory
|
||||
* @param a the analyzer to use
|
||||
* @param deletionPolicy see <a href="#deletionPolicy">above</a>
|
||||
* @param mfl whether or not to limit field lengths, value is in number of terms/tokens. See {@link org.apache.lucene.index.IndexWriter.MaxFieldLength}.
|
||||
* @param commit which commit to open
|
||||
* @throws CorruptIndexException if the index is corrupt
|
||||
* @throws LockObtainFailedException if another writer
|
||||
* has this index open (<code>write.lock</code> could not
|
||||
* be obtained)
|
||||
* @throws IOException if the directory cannot be read/written to, or
|
||||
* if it does not exist and <code>create</code> is
|
||||
* <code>false</code> or if there is any other low-level
|
||||
* IO error
|
||||
* @deprecated use {@link #IndexWriter(Directory, IndexWriterConfig)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexWriter(Directory d, Analyzer a, IndexDeletionPolicy deletionPolicy, MaxFieldLength mfl, IndexCommit commit)
|
||||
throws CorruptIndexException, LockObtainFailedException, IOException {
|
||||
this(d, new IndexWriterConfig(Version.LUCENE_31, a)
|
||||
.setOpenMode(OpenMode.APPEND).setMaxFieldLength(mfl.getLimit())
|
||||
.setIndexDeletionPolicy(deletionPolicy).setIndexCommit(commit));
|
||||
}
|
||||
|
||||
CodecProvider codecs;
|
||||
|
||||
/**
|
||||
|
@ -1038,8 +706,6 @@ public class IndexWriter implements Closeable {
|
|||
setMessageID(defaultInfoStream);
|
||||
maxFieldLength = conf.getMaxFieldLength();
|
||||
termIndexInterval = conf.getTermIndexInterval();
|
||||
writeLockTimeout = conf.getWriteLockTimeout();
|
||||
similarity = conf.getSimilarity();
|
||||
mergePolicy = conf.getMergePolicy();
|
||||
mergePolicy.setIndexWriter(this);
|
||||
mergeScheduler = conf.getMergeScheduler();
|
||||
|
@ -1061,7 +727,7 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
writeLock = directory.makeLock(WRITE_LOCK_NAME);
|
||||
|
||||
if (!writeLock.obtain(writeLockTimeout)) // obtain write lock
|
||||
if (!writeLock.obtain(conf.getWriteLockTimeout())) // obtain write lock
|
||||
throw new LockObtainFailedException("Index locked for write: " + writeLock);
|
||||
|
||||
boolean success = false;
|
||||
|
@ -1176,203 +842,13 @@ public class IndexWriter implements Closeable {
|
|||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: set the merge policy used by this writer.
|
||||
*
|
||||
* @deprecated use {@link IndexWriterConfig#setMergePolicy(MergePolicy)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMergePolicy(MergePolicy mp) {
|
||||
ensureOpen();
|
||||
if (mp == null)
|
||||
throw new NullPointerException("MergePolicy must be non-null");
|
||||
|
||||
if (mergePolicy != mp)
|
||||
mergePolicy.close();
|
||||
mergePolicy = mp;
|
||||
mergePolicy.setIndexWriter(this);
|
||||
pushMaxBufferedDocs();
|
||||
if (infoStream != null)
|
||||
message("setMergePolicy " + mp);
|
||||
// Required so config.getMergePolicy returns the right value. But this will
|
||||
// go away together with the method in 4.0.
|
||||
config.setMergePolicy(mp);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: returns the current MergePolicy in use by this writer.
|
||||
* @see #setMergePolicy
|
||||
*
|
||||
* @deprecated use {@link IndexWriterConfig#getMergePolicy()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public MergePolicy getMergePolicy() {
|
||||
ensureOpen();
|
||||
return mergePolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: set the merge scheduler used by this writer.
|
||||
* @deprecated use {@link IndexWriterConfig#setMergeScheduler(MergeScheduler)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
synchronized public void setMergeScheduler(MergeScheduler mergeScheduler) throws CorruptIndexException, IOException {
|
||||
ensureOpen();
|
||||
if (mergeScheduler == null)
|
||||
throw new NullPointerException("MergeScheduler must be non-null");
|
||||
|
||||
if (this.mergeScheduler != mergeScheduler) {
|
||||
finishMerges(true);
|
||||
this.mergeScheduler.close();
|
||||
}
|
||||
this.mergeScheduler = mergeScheduler;
|
||||
if (infoStream != null)
|
||||
message("setMergeScheduler " + mergeScheduler);
|
||||
// Required so config.getMergeScheduler returns the right value. But this will
|
||||
// go away together with the method in 4.0.
|
||||
config.setMergeScheduler(mergeScheduler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: returns the current MergeScheduler in use by this
|
||||
* writer.
|
||||
* @see #setMergeScheduler(MergeScheduler)
|
||||
* @deprecated use {@link IndexWriterConfig#getMergeScheduler()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public MergeScheduler getMergeScheduler() {
|
||||
ensureOpen();
|
||||
return mergeScheduler;
|
||||
}
|
||||
|
||||
/** <p>Determines the largest segment (measured by
|
||||
* document count) that may be merged with other segments.
|
||||
* Small values (e.g., less than 10,000) are best for
|
||||
* interactive indexing, as this limits the length of
|
||||
* pauses while indexing to a few seconds. Larger values
|
||||
* are best for batched indexing and speedier
|
||||
* searches.</p>
|
||||
*
|
||||
* <p>The default value is {@link Integer#MAX_VALUE}.</p>
|
||||
*
|
||||
* <p>Note that this method is a convenience method: it
|
||||
* just calls mergePolicy.setMaxMergeDocs as long as
|
||||
* mergePolicy is an instance of {@link LogMergePolicy}.
|
||||
* Otherwise an IllegalArgumentException is thrown.</p>
|
||||
*
|
||||
* <p>The default merge policy ({@link
|
||||
* LogByteSizeMergePolicy}) also allows you to set this
|
||||
* limit by net size (in MB) of the segment, using {@link
|
||||
* LogByteSizeMergePolicy#setMaxMergeMB}.</p>
|
||||
* @deprecated use {@link LogMergePolicy#setMaxMergeDocs(int)} directly.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMaxMergeDocs(int maxMergeDocs) {
|
||||
getLogMergePolicy().setMaxMergeDocs(maxMergeDocs);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the largest segment (measured by document
|
||||
* count) that may be merged with other segments.</p>
|
||||
*
|
||||
* <p>Note that this method is a convenience method: it
|
||||
* just calls mergePolicy.getMaxMergeDocs as long as
|
||||
* mergePolicy is an instance of {@link LogMergePolicy}.
|
||||
* Otherwise an IllegalArgumentException is thrown.</p>
|
||||
*
|
||||
* @see #setMaxMergeDocs
|
||||
* @deprecated use {@link LogMergePolicy#getMaxMergeDocs()} directly.
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMaxMergeDocs() {
|
||||
return getLogMergePolicy().getMaxMergeDocs();
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum number of terms that will be indexed for a single field in a
|
||||
* document. This limits the amount of memory required for indexing, so that
|
||||
* collections with very large files will not crash the indexing process by
|
||||
* running out of memory. This setting refers to the number of running terms,
|
||||
* not to the number of different terms.<p/>
|
||||
* <strong>Note:</strong> this silently truncates large documents, excluding from the
|
||||
* index all terms that occur further in the document. If you know your source
|
||||
* documents are large, be sure to set this value high enough to accomodate
|
||||
* the expected size. If you set it to Integer.MAX_VALUE, then the only limit
|
||||
* is your memory, but you should anticipate an OutOfMemoryError.<p/>
|
||||
* By default, no more than {@link #DEFAULT_MAX_FIELD_LENGTH} terms
|
||||
* will be indexed for a field.
|
||||
* @deprecated use {@link IndexWriterConfig#setMaxFieldLength(int)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMaxFieldLength(int maxFieldLength) {
|
||||
ensureOpen();
|
||||
this.maxFieldLength = maxFieldLength;
|
||||
docWriter.setMaxFieldLength(maxFieldLength);
|
||||
if (infoStream != null)
|
||||
message("setMaxFieldLength " + maxFieldLength);
|
||||
// Required so config.getMaxFieldLength returns the right value. But this
|
||||
// will go away together with the method in 4.0.
|
||||
config.setMaxFieldLength(maxFieldLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the maximum number of terms that will be
|
||||
* indexed for a single field in a document.
|
||||
* @see #setMaxFieldLength
|
||||
* @deprecated use {@link IndexWriterConfig#getMaxFieldLength()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMaxFieldLength() {
|
||||
ensureOpen();
|
||||
return maxFieldLength;
|
||||
}
|
||||
|
||||
/** Determines the minimal number of documents required
|
||||
* before the buffered in-memory documents are flushed as
|
||||
* a new Segment. Large values generally gives faster
|
||||
* indexing.
|
||||
*
|
||||
* <p>When this is set, the writer will flush every
|
||||
* maxBufferedDocs added documents. Pass in {@link
|
||||
* #DISABLE_AUTO_FLUSH} to prevent triggering a flush due
|
||||
* to number of buffered documents. Note that if flushing
|
||||
* by RAM usage is also enabled, then the flush will be
|
||||
* triggered by whichever comes first.</p>
|
||||
*
|
||||
* <p>Disabled by default (writer flushes by RAM usage).</p>
|
||||
*
|
||||
* @throws IllegalArgumentException if maxBufferedDocs is
|
||||
* enabled but smaller than 2, or it disables maxBufferedDocs
|
||||
* when ramBufferSize is already disabled
|
||||
* @see #setRAMBufferSizeMB
|
||||
* @deprecated use {@link IndexWriterConfig#setMaxBufferedDocs(int)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMaxBufferedDocs(int maxBufferedDocs) {
|
||||
ensureOpen();
|
||||
if (maxBufferedDocs != DISABLE_AUTO_FLUSH && maxBufferedDocs < 2)
|
||||
throw new IllegalArgumentException(
|
||||
"maxBufferedDocs must at least be 2 when enabled");
|
||||
if (maxBufferedDocs == DISABLE_AUTO_FLUSH
|
||||
&& getRAMBufferSizeMB() == DISABLE_AUTO_FLUSH)
|
||||
throw new IllegalArgumentException(
|
||||
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
|
||||
docWriter.setMaxBufferedDocs(maxBufferedDocs);
|
||||
pushMaxBufferedDocs();
|
||||
if (infoStream != null)
|
||||
message("setMaxBufferedDocs " + maxBufferedDocs);
|
||||
// Required so config.getMaxBufferedDocs returns the right value. But this
|
||||
// will go away together with the method in 4.0.
|
||||
config.setMaxBufferedDocs(maxBufferedDocs);
|
||||
}
|
||||
|
||||
/**
|
||||
* If we are flushing by doc count (not by RAM usage), and
|
||||
* using LogDocMergePolicy then push maxBufferedDocs down
|
||||
* as its minMergeDocs, to keep backwards compatibility.
|
||||
*/
|
||||
private void pushMaxBufferedDocs() {
|
||||
if (docWriter.getMaxBufferedDocs() != DISABLE_AUTO_FLUSH) {
|
||||
if (docWriter.getMaxBufferedDocs() != IndexWriterConfig.DISABLE_AUTO_FLUSH) {
|
||||
final MergePolicy mp = mergePolicy;
|
||||
if (mp instanceof LogDocMergePolicy) {
|
||||
LogDocMergePolicy lmp = (LogDocMergePolicy) mp;
|
||||
|
@ -1386,164 +862,6 @@ public class IndexWriter implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of buffered added documents that will
|
||||
* trigger a flush if enabled.
|
||||
* @see #setMaxBufferedDocs
|
||||
* @deprecated use {@link IndexWriterConfig#getMaxBufferedDocs()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMaxBufferedDocs() {
|
||||
ensureOpen();
|
||||
return docWriter.getMaxBufferedDocs();
|
||||
}
|
||||
|
||||
/** Determines the amount of RAM that may be used for
|
||||
* buffering added documents and deletions before they are
|
||||
* flushed to the Directory. Generally for faster
|
||||
* indexing performance it's best to flush by RAM usage
|
||||
* instead of document count and use as large a RAM buffer
|
||||
* as you can.
|
||||
*
|
||||
* <p>When this is set, the writer will flush whenever
|
||||
* buffered documents and deletions use this much RAM.
|
||||
* Pass in {@link #DISABLE_AUTO_FLUSH} to prevent
|
||||
* triggering a flush due to RAM usage. Note that if
|
||||
* flushing by document count is also enabled, then the
|
||||
* flush will be triggered by whichever comes first.</p>
|
||||
*
|
||||
* <p> <b>NOTE</b>: the account of RAM usage for pending
|
||||
* deletions is only approximate. Specifically, if you
|
||||
* delete by Query, Lucene currently has no way to measure
|
||||
* the RAM usage if individual Queries so the accounting
|
||||
* will under-estimate and you should compensate by either
|
||||
* calling commit() periodically yourself, or by using
|
||||
* {@link #setMaxBufferedDeleteTerms} to flush by count
|
||||
* instead of RAM usage (each buffered delete Query counts
|
||||
* as one).
|
||||
*
|
||||
* <p> <b>NOTE</b>: because IndexWriter uses
|
||||
* <code>int</code>s when managing its internal storage,
|
||||
* the absolute maximum value for this setting is somewhat
|
||||
* less than 2048 MB. The precise limit depends on
|
||||
* various factors, such as how large your documents are,
|
||||
* how many fields have norms, etc., so it's best to set
|
||||
* this value comfortably under 2048.</p>
|
||||
*
|
||||
* <p> The default value is {@link #DEFAULT_RAM_BUFFER_SIZE_MB}.</p>
|
||||
*
|
||||
* @throws IllegalArgumentException if ramBufferSize is
|
||||
* enabled but non-positive, or it disables ramBufferSize
|
||||
* when maxBufferedDocs is already disabled
|
||||
* @deprecated use {@link IndexWriterConfig#setRAMBufferSizeMB(double)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setRAMBufferSizeMB(double mb) {
|
||||
if (mb > 2048.0) {
|
||||
throw new IllegalArgumentException("ramBufferSize " + mb + " is too large; should be comfortably less than 2048");
|
||||
}
|
||||
if (mb != DISABLE_AUTO_FLUSH && mb <= 0.0)
|
||||
throw new IllegalArgumentException(
|
||||
"ramBufferSize should be > 0.0 MB when enabled");
|
||||
if (mb == DISABLE_AUTO_FLUSH && getMaxBufferedDocs() == DISABLE_AUTO_FLUSH)
|
||||
throw new IllegalArgumentException(
|
||||
"at least one of ramBufferSize and maxBufferedDocs must be enabled");
|
||||
docWriter.setRAMBufferSizeMB(mb);
|
||||
if (infoStream != null)
|
||||
message("setRAMBufferSizeMB " + mb);
|
||||
// Required so config.getRAMBufferSizeMB returns the right value. But this
|
||||
// will go away together with the method in 4.0.
|
||||
config.setRAMBufferSizeMB(mb);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value set by {@link #setRAMBufferSizeMB} if enabled.
|
||||
* @deprecated use {@link IndexWriterConfig#getRAMBufferSizeMB()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRAMBufferSizeMB() {
|
||||
return docWriter.getRAMBufferSizeMB();
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Determines the minimal number of delete terms required before the buffered
|
||||
* in-memory delete terms are applied and flushed. If there are documents
|
||||
* buffered in memory at the time, they are merged and a new segment is
|
||||
* created.</p>
|
||||
|
||||
* <p>Disabled by default (writer flushes by RAM usage).</p>
|
||||
*
|
||||
* @throws IllegalArgumentException if maxBufferedDeleteTerms
|
||||
* is enabled but smaller than 1
|
||||
* @see #setRAMBufferSizeMB
|
||||
* @deprecated use {@link IndexWriterConfig#setMaxBufferedDeleteTerms(int)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMaxBufferedDeleteTerms(int maxBufferedDeleteTerms) {
|
||||
ensureOpen();
|
||||
if (maxBufferedDeleteTerms != DISABLE_AUTO_FLUSH
|
||||
&& maxBufferedDeleteTerms < 1)
|
||||
throw new IllegalArgumentException(
|
||||
"maxBufferedDeleteTerms must at least be 1 when enabled");
|
||||
docWriter.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
|
||||
if (infoStream != null)
|
||||
message("setMaxBufferedDeleteTerms " + maxBufferedDeleteTerms);
|
||||
// Required so config.getMaxBufferedDeleteTerms returns the right value. But
|
||||
// this will go away together with the method in 4.0.
|
||||
config.setMaxBufferedDeleteTerms(maxBufferedDeleteTerms);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of buffered deleted terms that will
|
||||
* trigger a flush if enabled.
|
||||
* @see #setMaxBufferedDeleteTerms
|
||||
* @deprecated use {@link IndexWriterConfig#getMaxBufferedDeleteTerms()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMaxBufferedDeleteTerms() {
|
||||
ensureOpen();
|
||||
return docWriter.getMaxBufferedDeleteTerms();
|
||||
}
|
||||
|
||||
/** Determines how often segment indices are merged by addDocument(). With
|
||||
* smaller values, less RAM is used while indexing, and searches on
|
||||
* unoptimized indices are faster, but indexing speed is slower. With larger
|
||||
* values, more RAM is used during indexing, and while searches on unoptimized
|
||||
* indices are slower, indexing is faster. Thus larger values (> 10) are best
|
||||
* for batch index creation, and smaller values (< 10) for indices that are
|
||||
* interactively maintained.
|
||||
*
|
||||
* <p>Note that this method is a convenience method: it
|
||||
* just calls mergePolicy.setMergeFactor as long as
|
||||
* mergePolicy is an instance of {@link LogMergePolicy}.
|
||||
* Otherwise an IllegalArgumentException is thrown.</p>
|
||||
*
|
||||
* <p>This must never be less than 2. The default value is 10.
|
||||
* @deprecated use {@link LogMergePolicy#setMergeFactor(int)} directly.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMergeFactor(int mergeFactor) {
|
||||
getLogMergePolicy().setMergeFactor(mergeFactor);
|
||||
}
|
||||
|
||||
/**
|
||||
* <p>Returns the number of segments that are merged at
|
||||
* once and also controls the total number of segments
|
||||
* allowed to accumulate in the index.</p>
|
||||
*
|
||||
* <p>Note that this method is a convenience method: it
|
||||
* just calls mergePolicy.getMergeFactor as long as
|
||||
* mergePolicy is an instance of {@link LogMergePolicy}.
|
||||
* Otherwise an IllegalArgumentException is thrown.</p>
|
||||
*
|
||||
* @see #setMergeFactor
|
||||
* @deprecated use {@link LogMergePolicy#getMergeFactor()} directly.
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMergeFactor() {
|
||||
return getLogMergePolicy().getMergeFactor();
|
||||
}
|
||||
|
||||
/** If non-null, this will be the default infoStream used
|
||||
* by a newly instantiated IndexWriter.
|
||||
* @see #setInfoStream
|
||||
|
@ -1595,52 +913,6 @@ public class IndexWriter implements Closeable {
|
|||
return infoStream != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter. @see
|
||||
* @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter.
|
||||
* @deprecated use {@link IndexWriterConfig#setWriteLockTimeout(long)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public void setWriteLockTimeout(long writeLockTimeout) {
|
||||
ensureOpen();
|
||||
this.writeLockTimeout = writeLockTimeout;
|
||||
// Required so config.getWriteLockTimeout returns the right value. But this
|
||||
// will go away together with the method in 4.0.
|
||||
config.setWriteLockTimeout(writeLockTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns allowed timeout when acquiring the write lock.
|
||||
* @see #setWriteLockTimeout
|
||||
* @deprecated use {@link IndexWriterConfig#getWriteLockTimeout()}
|
||||
*/
|
||||
@Deprecated
|
||||
public long getWriteLockTimeout() {
|
||||
ensureOpen();
|
||||
return writeLockTimeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in
|
||||
* milliseconds).
|
||||
* @deprecated use {@link IndexWriterConfig#setDefaultWriteLockTimeout(long)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void setDefaultWriteLockTimeout(long writeLockTimeout) {
|
||||
IndexWriterConfig.setDefaultWriteLockTimeout(writeLockTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns default write lock timeout for newly
|
||||
* instantiated IndexWriters.
|
||||
* @see #setDefaultWriteLockTimeout
|
||||
* @deprecated use {@link IndexWriterConfig#getDefaultWriteLockTimeout()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static long getDefaultWriteLockTimeout() {
|
||||
return IndexWriterConfig.getDefaultWriteLockTimeout();
|
||||
}
|
||||
|
||||
/**
|
||||
* Commits all changes to an index and closes all
|
||||
* associated files. Note that this may be a costly
|
||||
|
@ -3030,7 +2302,7 @@ public class IndexWriter implements Closeable {
|
|||
}
|
||||
|
||||
// Now create the compound file if needed
|
||||
if (mergePolicy instanceof LogMergePolicy && getUseCompoundFile()) {
|
||||
if (mergePolicy instanceof LogMergePolicy && ((LogMergePolicy) mergePolicy).getUseCompoundFile()) {
|
||||
|
||||
List<String> files = null;
|
||||
|
||||
|
@ -3998,7 +3270,7 @@ public class IndexWriter implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private final synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
|
||||
private synchronized void closeMergeReaders(MergePolicy.OneMerge merge, boolean suppressExceptions) throws IOException {
|
||||
final int numSegments = merge.segments.size();
|
||||
if (suppressExceptions) {
|
||||
// Suppress any new exceptions so we throw the
|
||||
|
@ -4043,7 +3315,7 @@ public class IndexWriter implements Closeable {
|
|||
/** Does the actual (time-consuming) work of the merge,
|
||||
* but without holding synchronized lock on IndexWriter
|
||||
* instance */
|
||||
final private int mergeMiddle(MergePolicy.OneMerge merge)
|
||||
private int mergeMiddle(MergePolicy.OneMerge merge)
|
||||
throws CorruptIndexException, IOException {
|
||||
|
||||
merge.checkAborted(directory);
|
||||
|
@ -4507,63 +3779,6 @@ public class IndexWriter implements Closeable {
|
|||
directory.makeLock(IndexWriter.WRITE_LOCK_NAME).release();
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies maximum field length (in number of tokens/terms) in
|
||||
* {@link IndexWriter} constructors. {@link #setMaxFieldLength(int)} overrides
|
||||
* the value set by the constructor.
|
||||
*
|
||||
* @deprecated use {@link IndexWriterConfig} and pass
|
||||
* {@link IndexWriterConfig#UNLIMITED_FIELD_LENGTH} or your own
|
||||
* value.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final class MaxFieldLength {
|
||||
|
||||
private int limit;
|
||||
private String name;
|
||||
|
||||
/**
|
||||
* Private type-safe-enum-pattern constructor.
|
||||
*
|
||||
* @param name instance name
|
||||
* @param limit maximum field length
|
||||
*/
|
||||
private MaxFieldLength(String name, int limit) {
|
||||
this.name = name;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Public constructor to allow users to specify the maximum field size limit.
|
||||
*
|
||||
* @param limit The maximum field length
|
||||
*/
|
||||
public MaxFieldLength(int limit) {
|
||||
this("User-specified", limit);
|
||||
}
|
||||
|
||||
public int getLimit() {
|
||||
return limit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
return name + ":" + limit;
|
||||
}
|
||||
|
||||
/** Sets the maximum field length to {@link Integer#MAX_VALUE}. */
|
||||
public static final MaxFieldLength UNLIMITED
|
||||
= new MaxFieldLength("UNLIMITED", Integer.MAX_VALUE);
|
||||
|
||||
/**
|
||||
* Sets the maximum field length to
|
||||
* {@link #DEFAULT_MAX_FIELD_LENGTH}
|
||||
* */
|
||||
public static final MaxFieldLength LIMITED
|
||||
= new MaxFieldLength("LIMITED", DEFAULT_MAX_FIELD_LENGTH);
|
||||
}
|
||||
|
||||
/** If {@link #getReader} has been called (ie, this writer
|
||||
* is in near real-time mode), then after a merge
|
||||
* completes, this class can be invoked to warm the
|
||||
|
@ -4582,31 +3797,6 @@ public class IndexWriter implements Closeable {
|
|||
|
||||
private IndexReaderWarmer mergedSegmentWarmer;
|
||||
|
||||
/**
|
||||
* Set the merged segment warmer. See {@link IndexReaderWarmer}.
|
||||
*
|
||||
* @deprecated use
|
||||
* {@link IndexWriterConfig#setMergedSegmentWarmer}
|
||||
* instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public void setMergedSegmentWarmer(IndexReaderWarmer warmer) {
|
||||
mergedSegmentWarmer = warmer;
|
||||
// Required so config.getMergedSegmentWarmer returns the right value. But
|
||||
// this will go away together with the method in 4.0.
|
||||
config.setMergedSegmentWarmer(mergedSegmentWarmer);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current merged segment warmer. See {@link IndexReaderWarmer}.
|
||||
*
|
||||
* @deprecated use {@link IndexWriterConfig#getMergedSegmentWarmer()} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public IndexReaderWarmer getMergedSegmentWarmer() {
|
||||
return mergedSegmentWarmer;
|
||||
}
|
||||
|
||||
private void handleOOM(OutOfMemoryError oom, String location) {
|
||||
if (infoStream != null) {
|
||||
message("hit OutOfMemoryError inside " + location);
|
||||
|
|
|
@ -1240,33 +1240,6 @@ public class SegmentReader extends IndexReader implements Cloneable {
|
|||
public final Object getCoreCacheKey() {
|
||||
return core;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lotsa tests did hacks like:<br/>
|
||||
* SegmentReader reader = (SegmentReader) IndexReader.open(dir);<br/>
|
||||
* They broke. This method serves as a hack to keep hacks working
|
||||
* We do it with R/W access for the tests (BW compatibility)
|
||||
* @deprecated Remove this when tests are fixed!
|
||||
*/
|
||||
@Deprecated
|
||||
static SegmentReader getOnlySegmentReader(Directory dir) throws IOException {
|
||||
return getOnlySegmentReader(IndexReader.open(dir, false));
|
||||
}
|
||||
|
||||
static SegmentReader getOnlySegmentReader(IndexReader reader) {
|
||||
if (reader instanceof SegmentReader)
|
||||
return (SegmentReader) reader;
|
||||
|
||||
if (reader instanceof DirectoryReader) {
|
||||
IndexReader[] subReaders = reader.getSequentialSubReaders();
|
||||
if (subReaders.length != 1)
|
||||
throw new IllegalArgumentException(reader + " has " + subReaders.length + " segments instead of exactly one");
|
||||
|
||||
return (SegmentReader) subReaders[0];
|
||||
}
|
||||
|
||||
throw new IllegalArgumentException(reader + " is not a SegmentReader or a single-segment DirectoryReader");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTermInfosIndexDivisor() {
|
||||
|
|
|
@ -168,7 +168,7 @@ public final class Term implements Comparable<Term>, java.io.Serializable {
|
|||
BytesRef.getUTF8SortedAsUTF16Comparator();
|
||||
|
||||
/**
|
||||
* @deprecated For internal backwards compatibility use only
|
||||
* @deprecated (4.0) For internal backwards compatibility use only
|
||||
* @lucene.internal
|
||||
*/
|
||||
@Deprecated
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.lucene.index.codecs.FieldsProducer;
|
|||
* format. It does not provide a writer because newly
|
||||
* written segments should use StandardCodec.
|
||||
*
|
||||
* @deprecated This is only used to read indexes created
|
||||
* @deprecated (4.0) This is only used to read indexes created
|
||||
* before 4.0.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -44,7 +44,10 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.UnicodeUtil;
|
||||
|
||||
/** Exposes flex API on a pre-flex index, as a codec.
|
||||
* @lucene.experimental */
|
||||
* @lucene.experimental
|
||||
* @deprecated (4.0)
|
||||
*/
|
||||
@Deprecated
|
||||
public class PreFlexFields extends FieldsProducer {
|
||||
|
||||
private static final boolean DEBUG_SURROGATES = false;
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.index.codecs.standard.DefaultSkipListReader;
|
|||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
/** @deprecated
|
||||
/** @deprecated (4.0)
|
||||
* @lucene.experimental */
|
||||
@Deprecated
|
||||
public class SegmentTermDocs {
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.lucene.index.IndexFormatTooOldException;
|
|||
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||
|
||||
/**
|
||||
* @deprecated No longer used with flex indexing, except for
|
||||
* @deprecated (4.0) No longer used with flex indexing, except for
|
||||
* reading old segments
|
||||
* @lucene.experimental */
|
||||
|
||||
|
|
|
@ -23,7 +23,11 @@ import org.apache.lucene.index.FieldInfos;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
|
||||
/** @lucene.experimental */
|
||||
/**
|
||||
* @lucene.experimental
|
||||
* @deprecated (4.0)
|
||||
*/
|
||||
@Deprecated
|
||||
public final class SegmentTermPositions
|
||||
extends SegmentTermDocs {
|
||||
private IndexInput proxStream;
|
||||
|
|
|
@ -25,6 +25,11 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
|
||||
/**
|
||||
* @lucene.experimental
|
||||
* @deprecated (4.0)
|
||||
*/
|
||||
@Deprecated
|
||||
final class TermBuffer implements Cloneable {
|
||||
|
||||
private String field;
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.lucene.index.codecs.preflex;
|
|||
|
||||
/** A TermInfo is the record of information stored for a
|
||||
* term
|
||||
* @deprecated This class is no longer used in flexible
|
||||
* @deprecated (4.0) This class is no longer used in flexible
|
||||
* indexing. */
|
||||
|
||||
@Deprecated
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.util.DoubleBarrelLRUCache;
|
|||
/** This stores a monotonically increasing set of <Term, TermInfo> pairs in a
|
||||
* Directory. Pairs are accessed either by Term or by ordinal position the
|
||||
* set
|
||||
* @deprecated This class has been replaced by
|
||||
* @deprecated (4.0) This class has been replaced by
|
||||
* FormatPostingsTermsDictReader, except for reading old segments.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
|
|
@ -28,14 +28,14 @@ public interface CharStream {
|
|||
|
||||
/**
|
||||
* Returns the column position of the character last read.
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndColumn
|
||||
*/
|
||||
int getColumn();
|
||||
|
||||
/**
|
||||
* Returns the line number of the character last read.
|
||||
* @deprecated
|
||||
* @deprecated (gen)
|
||||
* @see #getEndLine
|
||||
*/
|
||||
int getLine();
|
||||
|
|
|
@ -18,7 +18,6 @@ import org.apache.lucene.analysis.CachingTokenFilter;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
|
@ -77,10 +76,8 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* <tt>date:[6/1/2005 TO 6/4/2005]</tt> produces a range query that searches
|
||||
* for "date" fields between 2005-06-01 and 2005-06-04. Note that the format
|
||||
* of the accepted input depends on {@link #setLocale(Locale) the locale}.
|
||||
* By default a date is converted into a search term using the deprecated
|
||||
* {@link DateField} for compatibility reasons.
|
||||
* To use the new {@link DateTools} to convert dates, a
|
||||
* {@link org.apache.lucene.document.DateTools.Resolution} has to be set.
|
||||
* A {@link org.apache.lucene.document.DateTools.Resolution} has to be set,
|
||||
* if you want to use {@link DateTools} for date conversion.
|
||||
* </p>
|
||||
* <p>
|
||||
* The date resolution that shall be used for RangeQueries can be set
|
||||
|
@ -91,10 +88,9 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* resolutions take, if set, precedence over the default date resolution.
|
||||
* </p>
|
||||
* <p>
|
||||
* If you use neither {@link DateField} nor {@link DateTools} in your
|
||||
* index, you can create your own
|
||||
* If you don't use {@link DateTools} in your index, you can create your own
|
||||
* query parser that inherits QueryParser and overwrites
|
||||
* {@link #getRangeQuery(String, String, String, boolean)} to
|
||||
* {@link #getRangeQuery(String, String, String, boolean, boolean)} to
|
||||
* use a different method for date conversion.
|
||||
* </p>
|
||||
*
|
||||
|
@ -108,8 +104,6 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* <p><b>NOTE</b>: You must specify the required {@link Version}
|
||||
* compatibility when creating QueryParser:
|
||||
* <ul>
|
||||
* <li> As of 2.9, {@link #setEnablePositionIncrements} is true by
|
||||
* default.
|
||||
* <li> As of 3.1, {@link #setAutoGeneratePhraseQueries} is false by
|
||||
* default.
|
||||
* </ul>
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.apache.lucene.analysis.CachingTokenFilter;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
|
@ -101,10 +100,8 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* <tt>date:[6/1/2005 TO 6/4/2005]</tt> produces a range query that searches
|
||||
* for "date" fields between 2005-06-01 and 2005-06-04. Note that the format
|
||||
* of the accepted input depends on {@link #setLocale(Locale) the locale}.
|
||||
* By default a date is converted into a search term using the deprecated
|
||||
* {@link DateField} for compatibility reasons.
|
||||
* To use the new {@link DateTools} to convert dates, a
|
||||
* {@link org.apache.lucene.document.DateTools.Resolution} has to be set.
|
||||
* A {@link org.apache.lucene.document.DateTools.Resolution} has to be set,
|
||||
* if you want to use {@link DateTools} for date conversion.
|
||||
* </p>
|
||||
* <p>
|
||||
* The date resolution that shall be used for RangeQueries can be set
|
||||
|
@ -115,10 +112,9 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* resolutions take, if set, precedence over the default date resolution.
|
||||
* </p>
|
||||
* <p>
|
||||
* If you use neither {@link DateField} nor {@link DateTools} in your
|
||||
* index, you can create your own
|
||||
* If you don't use {@link DateTools} in your index, you can create your own
|
||||
* query parser that inherits QueryParser and overwrites
|
||||
* {@link #getRangeQuery(String, String, String, boolean)} to
|
||||
* {@link #getRangeQuery(String, String, String, boolean, boolean)} to
|
||||
* use a different method for date conversion.
|
||||
* </p>
|
||||
*
|
||||
|
@ -132,8 +128,6 @@ import org.apache.lucene.util.VirtualMethod;
|
|||
* <p><b>NOTE</b>: You must specify the required {@link Version}
|
||||
* compatibility when creating QueryParser:
|
||||
* <ul>
|
||||
* <li> As of 2.9, {@link #setEnablePositionIncrements} is true by
|
||||
* default.
|
||||
* <li> As of 3.1, {@link #setAutoGeneratePhraseQueries} is false by
|
||||
* default.
|
||||
* </ul>
|
||||
|
|
|
@ -17,26 +17,24 @@
|
|||
|
||||
package org.apache.lucene.queryParser;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CachingTokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.lucene.util.VirtualMethod;
|
||||
import org.apache.lucene.queryParser.QueryParser.Operator;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.text.Collator;
|
||||
import java.text.DateFormat;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CachingTokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryParser.QueryParser.Operator;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/** This class is overridden by QueryParser in QueryParser.jj
|
||||
* and acts to eparate the majority of the Java code from the .jj grammar file.
|
||||
*/
|
||||
|
@ -84,20 +82,6 @@ public abstract class QueryParserBase {
|
|||
// for use when constructing RangeQuerys.
|
||||
Collator rangeCollator = null;
|
||||
|
||||
/** @deprecated remove when getFieldQuery is removed */
|
||||
@Deprecated
|
||||
static final VirtualMethod<QueryParserBase> getFieldQueryMethod =
|
||||
new VirtualMethod<QueryParserBase>(QueryParserBase.class, "getFieldQuery", String.class, String.class);
|
||||
/** @deprecated remove when getFieldQuery is removed */
|
||||
@Deprecated
|
||||
static final VirtualMethod<QueryParserBase> getFieldQueryWithQuotedMethod =
|
||||
new VirtualMethod<QueryParserBase>(QueryParserBase.class, "getFieldQuery", String.class, String.class, boolean.class);
|
||||
/** @deprecated remove when getFieldQuery is removed */
|
||||
@Deprecated
|
||||
final boolean hasNewAPI =
|
||||
VirtualMethod.compareImplementationDistance(getClass(),
|
||||
getFieldQueryWithQuotedMethod, getFieldQueryMethod) >= 0; // its ok for both to be overridden
|
||||
|
||||
boolean autoGeneratePhraseQueries;
|
||||
|
||||
// So the generated QueryParser(CharStream) won't error out
|
||||
|
@ -112,11 +96,6 @@ public abstract class QueryParserBase {
|
|||
public void init(Version matchVersion, String f, Analyzer a) {
|
||||
analyzer = a;
|
||||
field = f;
|
||||
if (matchVersion.onOrAfter(Version.LUCENE_29)) {
|
||||
enablePositionIncrements = true;
|
||||
} else {
|
||||
enablePositionIncrements = false;
|
||||
}
|
||||
if (matchVersion.onOrAfter(Version.LUCENE_31)) {
|
||||
setAutoGeneratePhraseQueries(false);
|
||||
} else {
|
||||
|
@ -190,9 +169,6 @@ public abstract class QueryParserBase {
|
|||
* surrounded by double quotes.
|
||||
*/
|
||||
public final void setAutoGeneratePhraseQueries(boolean value) {
|
||||
if (value == false && !hasNewAPI)
|
||||
throw new IllegalArgumentException("You must implement the new API: getFieldQuery(String,String,boolean)"
|
||||
+ " to use setAutoGeneratePhraseQueries(false)");
|
||||
this.autoGeneratePhraseQueries = value;
|
||||
}
|
||||
|
||||
|
@ -272,7 +248,7 @@ public abstract class QueryParserBase {
|
|||
* Useful when e.g. a StopFilter increases the position increment of
|
||||
* the token that follows an omitted token.
|
||||
* <p>
|
||||
* Default: false.
|
||||
* Default: true.
|
||||
*/
|
||||
public void setEnablePositionIncrements(boolean enable) {
|
||||
this.enablePositionIncrements = enable;
|
||||
|
@ -488,15 +464,6 @@ public abstract class QueryParserBase {
|
|||
throw new RuntimeException("Clause cannot be both required and prohibited");
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link #getFieldQuery(String,String,boolean)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
protected Query getFieldQuery(String field, String queryText) throws ParseException {
|
||||
// treat the text as if it was quoted, to drive phrase logic with old versions.
|
||||
return getFieldQuery(field, queryText, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* @exception org.apache.lucene.queryParser.ParseException throw in overridden method to disallow
|
||||
*/
|
||||
|
@ -684,7 +651,7 @@ public abstract class QueryParserBase {
|
|||
*/
|
||||
protected Query getFieldQuery(String field, String queryText, int slop)
|
||||
throws ParseException {
|
||||
Query query = hasNewAPI ? getFieldQuery(field, queryText, true) : getFieldQuery(field, queryText);
|
||||
Query query = getFieldQuery(field, queryText, true);
|
||||
|
||||
if (query instanceof PhraseQuery) {
|
||||
((PhraseQuery) query).setSlop(slop);
|
||||
|
@ -696,11 +663,6 @@ public abstract class QueryParserBase {
|
|||
return query;
|
||||
}
|
||||
|
||||
|
||||
@Deprecated
|
||||
protected final Query getRangeQuery(String field, String part1, String part2, boolean inclusive) throws MethodRemovedUseAnother {return null;}
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* @exception org.apache.lucene.queryParser.ParseException
|
||||
|
@ -722,15 +684,7 @@ public abstract class QueryParserBase {
|
|||
DateTools.Resolution resolution = getDateResolution(field);
|
||||
|
||||
try {
|
||||
Date d1 = df.parse(part1);
|
||||
if (resolution == null) {
|
||||
// no default or field specific date resolution has been set,
|
||||
// use deprecated DateField to maintain compatibility with
|
||||
// pre-1.9 Lucene versions.
|
||||
part1 = DateField.dateToString(d1);
|
||||
} else {
|
||||
part1 = DateTools.dateToString(d1, resolution);
|
||||
}
|
||||
part1 = DateTools.dateToString(df.parse(part1), resolution);
|
||||
} catch (Exception e) { }
|
||||
|
||||
try {
|
||||
|
@ -747,14 +701,7 @@ public abstract class QueryParserBase {
|
|||
cal.set(Calendar.MILLISECOND, 999);
|
||||
d2 = cal.getTime();
|
||||
}
|
||||
if (resolution == null) {
|
||||
// no default or field specific date resolution has been set,
|
||||
// use deprecated DateField to maintain compatibility with
|
||||
// pre-1.9 Lucene versions.
|
||||
part2 = DateField.dateToString(d2);
|
||||
} else {
|
||||
part2 = DateTools.dateToString(d2, resolution);
|
||||
}
|
||||
part2 = DateTools.dateToString(d2, resolution);
|
||||
} catch (Exception e) { }
|
||||
|
||||
return newRangeQuery(field, part1, part2, startInclusive, endInclusive);
|
||||
|
@ -838,10 +785,6 @@ public abstract class QueryParserBase {
|
|||
return new FuzzyQuery(term,minimumSimilarity,prefixLength);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
protected final Query newRangeQuery(String field, String part1, String part2, boolean inclusive) throws MethodRemovedUseAnother {return null;}
|
||||
|
||||
|
||||
/**
|
||||
* Builds a new TermRangeQuery instance
|
||||
* @param field Field
|
||||
|
@ -1064,7 +1007,7 @@ public abstract class QueryParserBase {
|
|||
}
|
||||
q = getFuzzyQuery(qfield, termImage, fms);
|
||||
} else {
|
||||
q = hasNewAPI ? getFieldQuery(qfield, termImage, false) : getFieldQuery(qfield, termImage);
|
||||
q = getFieldQuery(qfield, termImage, false);
|
||||
}
|
||||
return q;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@ import org.apache.lucene.analysis.CachingTokenFilter;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.document.DateField;
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
|
|
|
@ -193,7 +193,7 @@ class BooleanScorer2 extends Scorer {
|
|||
}
|
||||
|
||||
private Scorer dualConjunctionSumScorer(Scorer req1, Scorer req2) throws IOException { // non counting.
|
||||
return new ConjunctionScorer(defaultSimilarity, new Scorer[]{req1, req2});
|
||||
return new ConjunctionScorer(defaultSimilarity, req1, req2);
|
||||
// All scorers match, so defaultSimilarity always has 1 as
|
||||
// the coordination factor.
|
||||
// Therefore the sum of the scores of two scorers
|
||||
|
|
|
@ -18,15 +18,15 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Explanation.IDFExplanation;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
|
||||
|
||||
/**
|
||||
* Expert: Scoring API.
|
||||
|
@ -562,16 +562,6 @@ public abstract class Similarity implements Serializable {
|
|||
NORM_TABLE[i] = SmallFloat.byte315ToFloat((byte)i);
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes a normalization factor stored in an index.
|
||||
* @see #decodeNormValue(byte)
|
||||
* @deprecated Use {@link #decodeNormValue} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static float decodeNorm(byte b) {
|
||||
return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127
|
||||
}
|
||||
|
||||
/** Decodes a normalization factor stored in an index.
|
||||
* @see #encodeNormValue(float)
|
||||
*/
|
||||
|
@ -579,17 +569,6 @@ public abstract class Similarity implements Serializable {
|
|||
return NORM_TABLE[b & 0xFF]; // & 0xFF maps negative bytes to positive above 127
|
||||
}
|
||||
|
||||
/** Returns a table for decoding normalization bytes.
|
||||
* @see #encodeNormValue(float)
|
||||
* @see #decodeNormValue(byte)
|
||||
*
|
||||
* @deprecated Use instance methods for encoding/decoding norm values to enable customization.
|
||||
*/
|
||||
@Deprecated
|
||||
public static float[] getNormDecoder() {
|
||||
return NORM_TABLE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the normalization value for a field, given the accumulated
|
||||
* state of term processing for this field (see {@link FieldInvertState}).
|
||||
|
@ -670,20 +649,6 @@ public abstract class Similarity implements Serializable {
|
|||
return SmallFloat.floatToByte315(f);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static accessor kept for backwards compability reason, use encodeNormValue instead.
|
||||
* @param f norm-value to encode
|
||||
* @return byte representing the given float
|
||||
* @deprecated Use {@link #encodeNormValue} instead.
|
||||
*
|
||||
* @see #encodeNormValue(float)
|
||||
*/
|
||||
@Deprecated
|
||||
public static byte encodeNorm(float f) {
|
||||
return SmallFloat.floatToByte315(f);
|
||||
}
|
||||
|
||||
|
||||
/** Computes a score factor based on a term or phrase's frequency in a
|
||||
* document. This value is multiplied by the {@link #idf(int, int)}
|
||||
* factor for each term in the query and these products are then summed to
|
||||
|
|
|
@ -21,13 +21,7 @@ import java.io.IOException;
|
|||
import java.io.Serializable;
|
||||
import java.util.Locale;
|
||||
|
||||
import org.apache.lucene.search.cache.ByteValuesCreator;
|
||||
import org.apache.lucene.search.cache.CachedArrayCreator;
|
||||
import org.apache.lucene.search.cache.DoubleValuesCreator;
|
||||
import org.apache.lucene.search.cache.FloatValuesCreator;
|
||||
import org.apache.lucene.search.cache.IntValuesCreator;
|
||||
import org.apache.lucene.search.cache.LongValuesCreator;
|
||||
import org.apache.lucene.search.cache.ShortValuesCreator;
|
||||
import org.apache.lucene.search.cache.*;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
|
@ -138,7 +132,7 @@ implements Serializable {
|
|||
* @throws IllegalArgumentException if the parser fails to
|
||||
* subclass an existing numeric parser, or field is null
|
||||
*
|
||||
* @deprecated use EntryCreator version
|
||||
* @deprecated (4.0) use EntryCreator version
|
||||
*/
|
||||
@Deprecated
|
||||
public SortField (String field, FieldCache.Parser parser) {
|
||||
|
@ -156,7 +150,7 @@ implements Serializable {
|
|||
* @throws IllegalArgumentException if the parser fails to
|
||||
* subclass an existing numeric parser, or field is null
|
||||
*
|
||||
* @deprecated use EntryCreator version
|
||||
* @deprecated (4.0) use EntryCreator version
|
||||
*/
|
||||
@Deprecated
|
||||
public SortField (String field, FieldCache.Parser parser, boolean reverse) {
|
||||
|
@ -314,7 +308,7 @@ implements Serializable {
|
|||
/** Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
|
||||
* May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
|
||||
* @return An instance of a {@link FieldCache} parser, or <code>null</code>.
|
||||
* @deprecated use getEntryCreator()
|
||||
* @deprecated (4.0) use getEntryCreator()
|
||||
*/
|
||||
@Deprecated
|
||||
public FieldCache.Parser getParser() {
|
||||
|
|
|
@ -20,10 +20,10 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
import java.text.Collator;
|
||||
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
/**
|
||||
* A Query that matches documents within an range of terms.
|
||||
|
@ -143,12 +143,6 @@ public class TermRangeQuery extends MultiTermQuery {
|
|||
lowerTerm, upperTerm, includeLower, includeUpper, collator);
|
||||
}
|
||||
|
||||
/** @deprecated */
|
||||
@Deprecated
|
||||
public String field() {
|
||||
return getField();
|
||||
}
|
||||
|
||||
/** Prints a user-readable version of this query. */
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.lucene.search.function;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
|
||||
/** This class wraps another ValueSource, but protects
|
||||
* against accidental double RAM usage in FieldCache when
|
||||
|
@ -31,7 +31,7 @@ import org.apache.lucene.search.Explanation;
|
|||
* lookup, as it must resolve the incoming document to the
|
||||
* right sub-reader using a binary search.</p>
|
||||
*
|
||||
* @deprecated This class is temporary, to ease the
|
||||
* @deprecated (4.0) This class is temporary, to ease the
|
||||
* migration to segment-based searching. Please change your
|
||||
* code to not pass composite readers to these APIs. */
|
||||
|
||||
|
|
|
@ -105,59 +105,6 @@ public abstract class DataOutput {
|
|||
writeBytes(utf8Result.bytes, 0, utf8Result.length);
|
||||
}
|
||||
|
||||
/** Writes a sub sequence of characters from s as the old
|
||||
* format (modified UTF-8 encoded bytes).
|
||||
* @param s the source of the characters
|
||||
* @param start the first character in the sequence
|
||||
* @param length the number of characters in the sequence
|
||||
* @deprecated -- please pre-convert to utf8 bytes
|
||||
* instead or use {@link #writeString}
|
||||
*/
|
||||
@Deprecated
|
||||
public void writeChars(String s, int start, int length)
|
||||
throws IOException {
|
||||
final int end = start + length;
|
||||
for (int i = start; i < end; i++) {
|
||||
final int code = s.charAt(i);
|
||||
if (code >= 0x01 && code <= 0x7F)
|
||||
writeByte((byte)code);
|
||||
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
|
||||
writeByte((byte)(0xC0 | (code >> 6)));
|
||||
writeByte((byte)(0x80 | (code & 0x3F)));
|
||||
} else {
|
||||
writeByte((byte)(0xE0 | (code >>> 12)));
|
||||
writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
|
||||
writeByte((byte)(0x80 | (code & 0x3F)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Writes a sub sequence of characters from char[] as
|
||||
* the old format (modified UTF-8 encoded bytes).
|
||||
* @param s the source of the characters
|
||||
* @param start the first character in the sequence
|
||||
* @param length the number of characters in the sequence
|
||||
* @deprecated -- please pre-convert to utf8 bytes instead or use {@link #writeString}
|
||||
*/
|
||||
@Deprecated
|
||||
public void writeChars(char[] s, int start, int length)
|
||||
throws IOException {
|
||||
final int end = start + length;
|
||||
for (int i = start; i < end; i++) {
|
||||
final int code = s[i];
|
||||
if (code >= 0x01 && code <= 0x7F)
|
||||
writeByte((byte)code);
|
||||
else if (((code >= 0x80) && (code <= 0x7FF)) || code == 0) {
|
||||
writeByte((byte)(0xC0 | (code >> 6)));
|
||||
writeByte((byte)(0x80 | (code & 0x3F)));
|
||||
} else {
|
||||
writeByte((byte)(0xE0 | (code >>> 12)));
|
||||
writeByte((byte)(0x80 | ((code >> 6) & 0x3F)));
|
||||
writeByte((byte)(0x80 | (code & 0x3F)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static int COPY_BUFFER_SIZE = 16384;
|
||||
private byte[] copyBuffer;
|
||||
|
||||
|
|
|
@ -94,19 +94,6 @@ public abstract class Directory implements Closeable {
|
|||
public abstract IndexOutput createOutput(String name)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Ensure that any writes to this file are moved to
|
||||
* stable storage. Lucene uses this to properly commit
|
||||
* changes to the index, to prevent a machine/OS crash
|
||||
* from corrupting the index.
|
||||
* @deprecated use {@link #sync(Collection)} instead.
|
||||
* For easy migration you can change your code to call
|
||||
* sync(Collections.singleton(name))
|
||||
*/
|
||||
@Deprecated
|
||||
public void sync(String name) throws IOException { // TODO 4.0 kill me
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure that any writes to these files are moved to
|
||||
* stable storage. Lucene uses this to properly commit
|
||||
|
@ -118,10 +105,7 @@ public abstract class Directory implements Closeable {
|
|||
* For other impls the operation can be a noop, for various
|
||||
* reasons.
|
||||
*/
|
||||
public void sync(Collection<String> names) throws IOException { // TODO 4.0 make me abstract
|
||||
for (String name : names)
|
||||
sync(name);
|
||||
}
|
||||
public abstract void sync(Collection<String> names) throws IOException;
|
||||
|
||||
/** Returns a stream reading an existing file. */
|
||||
public abstract IndexInput openInput(String name)
|
||||
|
@ -232,41 +216,6 @@ public abstract class Directory implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy contents of a directory src to a directory dest. If a file in src
|
||||
* already exists in dest then the one in dest will be blindly overwritten.
|
||||
* <p>
|
||||
* <b>NOTE:</b> the source directory cannot change while this method is
|
||||
* running. Otherwise the results are undefined and you could easily hit a
|
||||
* FileNotFoundException.
|
||||
* <p>
|
||||
* <b>NOTE:</b> this method only copies files that look like index files (ie,
|
||||
* have extensions matching the known extensions of index files).
|
||||
*
|
||||
* @param src source directory
|
||||
* @param dest destination directory
|
||||
* @param closeDirSrc if <code>true</code>, call {@link #close()} method on
|
||||
* source directory
|
||||
* @deprecated should be replaced with calls to
|
||||
* {@link #copy(Directory, String, String)} for every file that
|
||||
* needs copying. You can use the following code:
|
||||
*
|
||||
* <pre>
|
||||
* for (String file : src.listAll()) {
|
||||
* src.copy(dest, file, file);
|
||||
* }
|
||||
* </pre>
|
||||
*/
|
||||
@Deprecated
|
||||
public static void copy(Directory src, Directory dest, boolean closeDirSrc) throws IOException {
|
||||
for (String file : src.listAll()) {
|
||||
src.copy(dest, file, file);
|
||||
}
|
||||
if (closeDirSrc) {
|
||||
src.close();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws AlreadyClosedException if this Directory is closed
|
||||
*/
|
||||
|
|
|
@ -321,12 +321,6 @@ public abstract class FSDirectory extends Directory {
|
|||
staleFiles.add(io.name);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public void sync(String name) throws IOException {
|
||||
sync(Collections.singleton(name));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
ensureOpen();
|
||||
|
@ -383,12 +377,6 @@ public abstract class FSDirectory extends Directory {
|
|||
isOpen = false;
|
||||
}
|
||||
|
||||
/** @deprecated Use {@link #getDirectory} instead. */
|
||||
@Deprecated
|
||||
public File getFile() {
|
||||
return getDirectory();
|
||||
}
|
||||
|
||||
/** @return the underlying filesystem directory */
|
||||
public File getDirectory() {
|
||||
ensureOpen();
|
||||
|
|
|
@ -135,12 +135,6 @@ public class FileSwitchDirectory extends Directory {
|
|||
return getDirectory(name).createOutput(name);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@Override
|
||||
public void sync(String name) throws IOException {
|
||||
sync(Collections.singleton(name));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
List<String> primaryNames = new ArrayList<String>();
|
||||
|
|
|
@ -33,14 +33,7 @@ public class NoLockFactory extends LockFactory {
|
|||
private static NoLock singletonLock = new NoLock();
|
||||
private static NoLockFactory singleton = new NoLockFactory();
|
||||
|
||||
/**
|
||||
* @deprecated This constructor was not intended to be public and should not be used.
|
||||
* It will be made private in Lucene 4.0
|
||||
* @see #getNoLockFactory()
|
||||
*/
|
||||
// make private in 4.0!
|
||||
@Deprecated
|
||||
public NoLockFactory() {}
|
||||
private NoLockFactory() {}
|
||||
|
||||
public static NoLockFactory getNoLockFactory() {
|
||||
return singleton;
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.store;
|
|||
import java.io.IOException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.Serializable;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -187,6 +188,9 @@ public class RAMDirectory extends Directory implements Serializable {
|
|||
return new RAMFile(this);
|
||||
}
|
||||
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
}
|
||||
|
||||
/** Returns a stream reading an existing file. */
|
||||
@Override
|
||||
public IndexInput openInput(String name) throws IOException {
|
||||
|
|
|
@ -37,19 +37,6 @@ import java.nio.ByteBuffer;
|
|||
* problem, a char is appended, indicating the number of encoded bytes in the
|
||||
* final content char.
|
||||
* <p/>
|
||||
* Some methods in this class are defined over CharBuffers and ByteBuffers, but
|
||||
* these are deprecated in favor of methods that operate directly on byte[] and
|
||||
* char[] arrays. Note that this class calls array() and arrayOffset()
|
||||
* on the CharBuffers and ByteBuffers it uses, so only wrapped arrays may be
|
||||
* used. This class interprets the arrayOffset() and limit() values returned
|
||||
* by its input buffers as beginning and end+1 positions on the wrapped array,
|
||||
* respectively; similarly, on the output buffer, arrayOffset() is the first
|
||||
* position written to, and limit() is set to one past the final output array
|
||||
* position.
|
||||
* <p/>
|
||||
* WARNING: This means that the deprecated Buffer-based methods
|
||||
* only work correctly with buffers that have an offset of 0. For example, they
|
||||
* will not correctly interpret buffers returned by {@link ByteBuffer#slice}.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
@ -71,28 +58,6 @@ public final class IndexableBinaryStringTools {
|
|||
// Export only static methods
|
||||
private IndexableBinaryStringTools() {}
|
||||
|
||||
/**
|
||||
* Returns the number of chars required to encode the given byte sequence.
|
||||
*
|
||||
* @param original The byte sequence to be encoded. Must be backed by an
|
||||
* array.
|
||||
* @return The number of chars required to encode the given byte sequence
|
||||
* @throws IllegalArgumentException If the given ByteBuffer is not backed by
|
||||
* an array
|
||||
* @deprecated Use {@link #getEncodedLength(byte[], int, int)} instead. This
|
||||
* method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static int getEncodedLength(ByteBuffer original)
|
||||
throws IllegalArgumentException {
|
||||
if (original.hasArray()) {
|
||||
return getEncodedLength(original.array(), original.arrayOffset(),
|
||||
original.limit() - original.arrayOffset());
|
||||
} else {
|
||||
throw new IllegalArgumentException("original argument must have a backing array");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of chars required to encode the given bytes.
|
||||
*
|
||||
|
@ -107,28 +72,6 @@ public final class IndexableBinaryStringTools {
|
|||
return (int)((8L * inputLength + 14L) / 15L) + 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the number of bytes required to decode the given char sequence.
|
||||
*
|
||||
* @param encoded The char sequence to be decoded. Must be backed by an array.
|
||||
* @return The number of bytes required to decode the given char sequence
|
||||
* @throws IllegalArgumentException If the given CharBuffer is not backed by
|
||||
* an array
|
||||
* @deprecated Use {@link #getDecodedLength(char[], int, int)} instead. This
|
||||
* method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static int getDecodedLength(CharBuffer encoded)
|
||||
throws IllegalArgumentException {
|
||||
if (encoded.hasArray()) {
|
||||
return getDecodedLength(encoded.array(), encoded.arrayOffset(),
|
||||
encoded.limit() - encoded.arrayOffset());
|
||||
} else {
|
||||
throw new IllegalArgumentException("encoded argument must have a backing array");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of bytes required to decode the given char sequence.
|
||||
*
|
||||
|
@ -149,36 +92,6 @@ public final class IndexableBinaryStringTools {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the input byte sequence into the output char sequence. Before
|
||||
* calling this method, ensure that the output CharBuffer has sufficient
|
||||
* capacity by calling {@link #getEncodedLength(java.nio.ByteBuffer)}.
|
||||
*
|
||||
* @param input The byte sequence to encode
|
||||
* @param output Where the char sequence encoding result will go. The limit is
|
||||
* set to one past the position of the final char.
|
||||
* @throws IllegalArgumentException If either the input or the output buffer
|
||||
* is not backed by an array
|
||||
* @deprecated Use {@link #encode(byte[], int, int, char[], int, int)}
|
||||
* instead. This method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static void encode(ByteBuffer input, CharBuffer output) {
|
||||
if (input.hasArray() && output.hasArray()) {
|
||||
final int inputOffset = input.arrayOffset();
|
||||
final int inputLength = input.limit() - inputOffset;
|
||||
final int outputOffset = output.arrayOffset();
|
||||
final int outputLength = getEncodedLength(input.array(), inputOffset,
|
||||
inputLength);
|
||||
output.limit(outputLength + outputOffset);
|
||||
output.position(0);
|
||||
encode(input.array(), inputOffset, inputLength, output.array(),
|
||||
outputOffset, outputLength);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Arguments must have backing arrays");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the input byte sequence into the output char sequence. Before
|
||||
* calling this method, ensure that the output array has sufficient
|
||||
|
@ -233,36 +146,6 @@ public final class IndexableBinaryStringTools {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the input char sequence into the output byte sequence. Before
|
||||
* calling this method, ensure that the output ByteBuffer has sufficient
|
||||
* capacity by calling {@link #getDecodedLength(java.nio.CharBuffer)}.
|
||||
*
|
||||
* @param input The char sequence to decode
|
||||
* @param output Where the byte sequence decoding result will go. The limit is
|
||||
* set to one past the position of the final char.
|
||||
* @throws IllegalArgumentException If either the input or the output buffer
|
||||
* is not backed by an array
|
||||
* @deprecated Use {@link #decode(char[], int, int, byte[], int, int)}
|
||||
* instead. This method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static void decode(CharBuffer input, ByteBuffer output) {
|
||||
if (input.hasArray() && output.hasArray()) {
|
||||
final int inputOffset = input.arrayOffset();
|
||||
final int inputLength = input.limit() - inputOffset;
|
||||
final int outputOffset = output.arrayOffset();
|
||||
final int outputLength = getDecodedLength(input.array(), inputOffset,
|
||||
inputLength);
|
||||
output.limit(outputLength + outputOffset);
|
||||
output.position(0);
|
||||
decode(input.array(), inputOffset, inputLength, output.array(),
|
||||
outputOffset, outputLength);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Arguments must have backing arrays");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the input char sequence into the output byte sequence. Before
|
||||
* calling this method, ensure that the output array has sufficient capacity
|
||||
|
@ -330,46 +213,6 @@ public final class IndexableBinaryStringTools {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decodes the given char sequence, which must have been encoded by
|
||||
* {@link #encode(java.nio.ByteBuffer)} or
|
||||
* {@link #encode(java.nio.ByteBuffer, java.nio.CharBuffer)}.
|
||||
*
|
||||
* @param input The char sequence to decode
|
||||
* @return A byte sequence containing the decoding result. The limit is set to
|
||||
* one past the position of the final char.
|
||||
* @throws IllegalArgumentException If the input buffer is not backed by an
|
||||
* array
|
||||
* @deprecated Use {@link #decode(char[], int, int, byte[], int, int)}
|
||||
* instead. This method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static ByteBuffer decode(CharBuffer input) {
|
||||
byte[] outputArray = new byte[getDecodedLength(input)];
|
||||
ByteBuffer output = ByteBuffer.wrap(outputArray);
|
||||
decode(input, output);
|
||||
return output;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the input byte sequence.
|
||||
*
|
||||
* @param input The byte sequence to encode
|
||||
* @return A char sequence containing the encoding result. The limit is set to
|
||||
* one past the position of the final char.
|
||||
* @throws IllegalArgumentException If the input buffer is not backed by an
|
||||
* array
|
||||
* @deprecated Use {@link #encode(byte[], int, int, char[], int, int)}
|
||||
* instead. This method will be removed in Lucene 4.0
|
||||
*/
|
||||
@Deprecated
|
||||
public static CharBuffer encode(ByteBuffer input) {
|
||||
char[] outputArray = new char[getEncodedLength(input)];
|
||||
CharBuffer output = CharBuffer.wrap(outputArray);
|
||||
encode(input, output);
|
||||
return output;
|
||||
}
|
||||
|
||||
static class CodingCase {
|
||||
int numBytes, initialShift, middleShift, finalShift, advanceBytes = 2;
|
||||
short middleMask, finalMask;
|
||||
|
|
|
@ -17,10 +17,10 @@ package org.apache.lucene.util;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
|
||||
import org.apache.lucene.document.NumericField; // for javadocs
|
||||
import org.apache.lucene.analysis.NumericTokenStream;
|
||||
import org.apache.lucene.document.NumericField;
|
||||
import org.apache.lucene.search.NumericRangeFilter;
|
||||
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
|
||||
import org.apache.lucene.search.NumericRangeFilter; // for javadocs
|
||||
|
||||
// TODO: Remove the commented out methods before release!
|
||||
|
||||
|
@ -130,32 +130,6 @@ public final class NumericUtils {
|
|||
return hash;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
|
||||
* This is method is used by {@link LongRangeBuilder}.
|
||||
* @param val the numeric value
|
||||
* @param shift how many bits to strip from the right
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String longToPrefixCoded(final long val, final int shift) {
|
||||
final BytesRef buffer = new BytesRef(BUF_SIZE_LONG);
|
||||
longToPrefixCoded(val, shift, buffer);
|
||||
return buffer.utf8ToString();
|
||||
}*/
|
||||
|
||||
/*
|
||||
* This is a convenience method, that returns prefix coded bits of a long without
|
||||
* reducing the precision. It can be used to store the full precision value as a
|
||||
* stored field in index.
|
||||
* <p>To decode, use {@link #prefixCodedToLong}.
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String longToPrefixCoded(final long val) {
|
||||
return longToPrefixCoded(val, 0);
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
|
||||
* This is method is used by {@link NumericTokenStream}.
|
||||
|
@ -190,46 +164,6 @@ public final class NumericUtils {
|
|||
return hash;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
|
||||
* This is method is used by {@link IntRangeBuilder}.
|
||||
* @param val the numeric value
|
||||
* @param shift how many bits to strip from the right
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String intToPrefixCoded(final int val, final int shift) {
|
||||
final BytesRef buffer = new BytesRef(BUF_SIZE_INT);
|
||||
intToPrefixCoded(val, shift, buffer);
|
||||
return buffer.utf8ToString();
|
||||
}*/
|
||||
|
||||
/*
|
||||
* This is a convenience method, that returns prefix coded bits of an int without
|
||||
* reducing the precision. It can be used to store the full precision value as a
|
||||
* stored field in index.
|
||||
* <p>To decode, use {@link #prefixCodedToInt}.
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String intToPrefixCoded(final int val) {
|
||||
return intToPrefixCoded(val, 0);
|
||||
}*/
|
||||
|
||||
/*
|
||||
* Returns a long from prefixCoded characters.
|
||||
* Rightmost bits will be zero for lower precision codes.
|
||||
* This method can be used to decode e.g. a stored field.
|
||||
* @throws NumberFormatException if the supplied string is
|
||||
* not correctly prefix encoded.
|
||||
* @see #longToPrefixCoded(long)
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static long prefixCodedToLong(final String prefixCoded) {
|
||||
return prefixCodedToLong(new BytesRef(prefixCoded));
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Returns the shift value from a prefix encoded {@code long}.
|
||||
* @throws NumberFormatException if the supplied {@link BytesRef} is
|
||||
|
@ -278,21 +212,7 @@ public final class NumericUtils {
|
|||
return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns an int from prefixCoded characters.
|
||||
* Rightmost bits will be zero for lower precision codes.
|
||||
* This method can be used to decode a term's value.
|
||||
* @throws NumberFormatException if the supplied string is
|
||||
* not correctly prefix encoded.
|
||||
* @see #intToPrefixCoded(int)
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static int prefixCodedToInt(final String prefixCoded) {
|
||||
return prefixCodedToInt(new BytesRef(prefixCoded));
|
||||
}*/
|
||||
|
||||
/*
|
||||
/**
|
||||
* Returns an int from prefixCoded bytes.
|
||||
* Rightmost bits will be zero for lower precision codes.
|
||||
* This method can be used to decode a term's value.
|
||||
|
@ -329,16 +249,6 @@ public final class NumericUtils {
|
|||
return f;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience method: this just returns:
|
||||
* longToPrefixCoded(doubleToSortableLong(val))
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String doubleToPrefixCoded(double val) {
|
||||
return longToPrefixCoded(doubleToSortableLong(val));
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Converts a sortable <code>long</code> back to a <code>double</code>.
|
||||
* @see #doubleToSortableLong
|
||||
|
@ -348,16 +258,6 @@ public final class NumericUtils {
|
|||
return Double.longBitsToDouble(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience method: this just returns:
|
||||
* sortableLongToDouble(prefixCodedToLong(val))
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static double prefixCodedToDouble(String val) {
|
||||
return sortableLongToDouble(prefixCodedToLong(val));
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Converts a <code>float</code> value to a sortable signed <code>int</code>.
|
||||
* The value is converted by getting their IEEE 754 floating-point "float format"
|
||||
|
@ -371,16 +271,6 @@ public final class NumericUtils {
|
|||
return f;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience method: this just returns:
|
||||
* intToPrefixCoded(floatToSortableInt(val))
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static String floatToPrefixCoded(float val) {
|
||||
return intToPrefixCoded(floatToSortableInt(val));
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Converts a sortable <code>int</code> back to a <code>float</code>.
|
||||
* @see #floatToSortableInt
|
||||
|
@ -390,16 +280,6 @@ public final class NumericUtils {
|
|||
return Float.intBitsToFloat(val);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convenience method: this just returns:
|
||||
* sortableIntToFloat(prefixCodedToInt(val))
|
||||
* @deprecated This method is no longer needed!
|
||||
*
|
||||
@Deprecated
|
||||
public static float prefixCodedToFloat(String val) {
|
||||
return sortableIntToFloat(prefixCodedToInt(val));
|
||||
}*/
|
||||
|
||||
/**
|
||||
* Splits a long range recursively.
|
||||
* You may implement a builder that adds clauses to a
|
||||
|
|
|
@ -44,7 +44,7 @@ public abstract class StringHelper {
|
|||
* @param bytes2 The second byte[] to compare
|
||||
* @return The number of common elements.
|
||||
*/
|
||||
public static final int bytesDifference(byte[] bytes1, int len1, byte[] bytes2, int len2) {
|
||||
public static int bytesDifference(byte[] bytes1, int len1, byte[] bytes2, int len2) {
|
||||
int len = len1 < len2 ? len1 : len2;
|
||||
for (int i = 0; i < len; i++)
|
||||
if (bytes1[i] != bytes2[i])
|
||||
|
@ -52,29 +52,6 @@ public abstract class StringHelper {
|
|||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compares two strings, character by character, and returns the
|
||||
* first position where the two strings differ from one another.
|
||||
*
|
||||
* @param s1 The first string to compare
|
||||
* @param s2 The second string to compare
|
||||
* @return The first position where the two strings differ.
|
||||
*
|
||||
* @deprecated This method cannot handle supplementary characters.
|
||||
*/
|
||||
@Deprecated
|
||||
public static final int stringDifference(String s1, String s2) {
|
||||
int len1 = s1.length();
|
||||
int len2 = s2.length();
|
||||
int len = len1 < len2 ? len1 : len2;
|
||||
for (int i = 0; i < len; i++) {
|
||||
if (s1.charAt(i) != s2.charAt(i)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
private StringHelper() {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,29 +28,18 @@ package org.apache.lucene.util;
|
|||
* your indexing code to match, and re-index.
|
||||
*/
|
||||
public enum Version {
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.0 release. */
|
||||
LUCENE_20,
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.1 release. */
|
||||
LUCENE_21,
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.2 release. */
|
||||
LUCENE_22,
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.3 release. */
|
||||
LUCENE_23,
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.4 release. */
|
||||
LUCENE_24,
|
||||
|
||||
/** Match settings and bugs in Lucene's 2.9 release. */
|
||||
LUCENE_29,
|
||||
|
||||
/** Match settings and bugs in Lucene's 3.0 release. */
|
||||
/**
|
||||
* Match settings and bugs in Lucene's 3.0 release.
|
||||
* @deprecated (4.0) Use latest
|
||||
*/
|
||||
@Deprecated
|
||||
LUCENE_30,
|
||||
|
||||
/** Match settings and bugs in Lucene's 3.1 release. */
|
||||
/**
|
||||
* Match settings and bugs in Lucene's 3.1 release.
|
||||
* @deprecated (4.0) Use latest
|
||||
*/
|
||||
@Deprecated
|
||||
LUCENE_31,
|
||||
|
||||
/** Match settings and bugs in Lucene's 4.0 release.
|
||||
|
|
|
@ -620,10 +620,12 @@ public class TestExternalCodecs extends LuceneTestCase {
|
|||
|
||||
final int NUM_DOCS = 173;
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, true)).setCodecProvider(provider));
|
||||
|
||||
w.setMergeFactor(3);
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, true, true)).
|
||||
setCodecProvider(provider).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
Document doc = new Document();
|
||||
// uses default codec:
|
||||
doc.add(newField("field1", "this field uses the standard codec as the test", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
|
|
@ -114,29 +114,15 @@ public class TestSearch extends LuceneTestCase {
|
|||
Query query = parser.parse(queries[j]);
|
||||
out.println("Query: " + query.toString("contents"));
|
||||
|
||||
//DateFilter filter =
|
||||
// new DateFilter("modified", Time(1997,0,1), Time(1998,0,1));
|
||||
//DateFilter filter = DateFilter.Before("modified", Time(1997,00,01));
|
||||
//System.out.println(filter);
|
||||
|
||||
hits = searcher.search(query, null, 1000).scoreDocs;
|
||||
|
||||
out.println(hits.length + " total results");
|
||||
for (int i = 0 ; i < hits.length && i < 10; i++) {
|
||||
Document d = searcher.doc(hits[i].doc);
|
||||
out.println(i + " " + hits[i].score
|
||||
// + " " + DateField.stringToDate(d.get("modified"))
|
||||
+ " " + d.get("contents"));
|
||||
out.println(i + " " + hits[i].score + " " + d.get("contents"));
|
||||
}
|
||||
}
|
||||
searcher.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
static long Time(int year, int month, int day) {
|
||||
GregorianCalendar calendar = new GregorianCalendar();
|
||||
calendar.clear();
|
||||
calendar.set(year, month, day);
|
||||
return calendar.getTime().getTime();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,10 +18,8 @@ package org.apache.lucene.analysis;
|
|||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.StringReader;
|
||||
|
||||
import org.apache.lucene.util.Version;
|
||||
|
||||
/**
|
||||
* Testcase for {@link CharTokenizer} subclasses
|
||||
|
@ -92,96 +90,4 @@ public class TestCharTokenizers extends BaseTokenStreamTestCase {
|
|||
MockTokenizer tokenizer = new MockTokenizer(new StringReader(builder.toString() + builder.toString()), MockTokenizer.SIMPLE, true);
|
||||
assertTokenStreamContents(tokenizer, new String[] {builder.toString().toLowerCase(), builder.toString().toLowerCase()});
|
||||
}
|
||||
|
||||
public void testIsTokenCharCharInSubclass() {
|
||||
new TestingCharTokenizer(Version.LUCENE_30, new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizer(TEST_VERSION_CURRENT, new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testNormalizeCharInSubclass() {
|
||||
new TestingCharTokenizerNormalize(Version.LUCENE_30, new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizerNormalize(TEST_VERSION_CURRENT,
|
||||
new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testNormalizeAndIsTokenCharCharInSubclass() {
|
||||
new TestingCharTokenizerNormalizeIsTokenChar(Version.LUCENE_30,
|
||||
new StringReader(""));
|
||||
try {
|
||||
new TestingCharTokenizerNormalizeIsTokenChar(TEST_VERSION_CURRENT,
|
||||
new StringReader(""));
|
||||
fail("version 3.1 is not permitted if char based method is implemented");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
static final class TestingCharTokenizer extends CharTokenizer {
|
||||
public TestingCharTokenizer(Version matchVersion, Reader input) {
|
||||
super(matchVersion, input);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isTokenChar(int c) {
|
||||
return Character.isLetter(c);
|
||||
}
|
||||
|
||||
@Deprecated @Override
|
||||
protected boolean isTokenChar(char c) {
|
||||
return Character.isLetter(c);
|
||||
}
|
||||
}
|
||||
|
||||
static final class TestingCharTokenizerNormalize extends CharTokenizer {
|
||||
public TestingCharTokenizerNormalize(Version matchVersion, Reader input) {
|
||||
super(matchVersion, input);
|
||||
}
|
||||
|
||||
@Deprecated @Override
|
||||
protected char normalize(char c) {
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int normalize(int c) {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
static final class TestingCharTokenizerNormalizeIsTokenChar extends CharTokenizer {
|
||||
public TestingCharTokenizerNormalizeIsTokenChar(Version matchVersion,
|
||||
Reader input) {
|
||||
super(matchVersion, input);
|
||||
}
|
||||
|
||||
@Deprecated @Override
|
||||
protected char normalize(char c) {
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int normalize(int c) {
|
||||
return c;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean isTokenChar(int c) {
|
||||
return Character.isLetter(c);
|
||||
}
|
||||
|
||||
@Deprecated @Override
|
||||
protected boolean isTokenChar(char c) {
|
||||
return Character.isLetter(c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -176,20 +176,20 @@ public class TestToken extends LuceneTestCase {
|
|||
char[] content = "hello".toCharArray();
|
||||
t.copyBuffer(content, 0, 5);
|
||||
char[] buf = t.buffer();
|
||||
Token copy = (Token) TestSimpleAttributeImpls.assertCloneIsEqual(t);
|
||||
Token copy = assertCloneIsEqual(t);
|
||||
assertEquals(t.toString(), copy.toString());
|
||||
assertNotSame(buf, copy.buffer());
|
||||
|
||||
Payload pl = new Payload(new byte[]{1,2,3,4});
|
||||
t.setPayload(pl);
|
||||
copy = (Token) TestSimpleAttributeImpls.assertCloneIsEqual(t);
|
||||
copy = assertCloneIsEqual(t);
|
||||
assertEquals(pl, copy.getPayload());
|
||||
assertNotSame(pl, copy.getPayload());
|
||||
}
|
||||
|
||||
public void testCopyTo() throws Exception {
|
||||
Token t = new Token();
|
||||
Token copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
|
||||
Token copy = assertCopyIsEqual(t);
|
||||
assertEquals("", t.toString());
|
||||
assertEquals("", copy.toString());
|
||||
|
||||
|
@ -197,13 +197,13 @@ public class TestToken extends LuceneTestCase {
|
|||
char[] content = "hello".toCharArray();
|
||||
t.copyBuffer(content, 0, 5);
|
||||
char[] buf = t.buffer();
|
||||
copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
|
||||
copy = assertCopyIsEqual(t);
|
||||
assertEquals(t.toString(), copy.toString());
|
||||
assertNotSame(buf, copy.buffer());
|
||||
|
||||
Payload pl = new Payload(new byte[]{1,2,3,4});
|
||||
t.setPayload(pl);
|
||||
copy = (Token) TestSimpleAttributeImpls.assertCopyIsEqual(t);
|
||||
copy = assertCopyIsEqual(t);
|
||||
assertEquals(pl, copy.getPayload());
|
||||
assertNotSame(pl, copy.getPayload());
|
||||
}
|
||||
|
@ -240,4 +240,21 @@ public class TestToken extends LuceneTestCase {
|
|||
assertTrue("TypeAttribute is not implemented by Token",
|
||||
ts.addAttribute(TypeAttribute.class) instanceof Token);
|
||||
}
|
||||
|
||||
public static <T extends AttributeImpl> T assertCloneIsEqual(T att) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T clone = (T) att.clone();
|
||||
assertEquals("Clone must be equal", att, clone);
|
||||
assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
|
||||
return clone;
|
||||
}
|
||||
|
||||
public static <T extends AttributeImpl> T assertCopyIsEqual(T att) throws Exception {
|
||||
@SuppressWarnings("unchecked")
|
||||
T copy = (T) att.getClass().newInstance();
|
||||
att.copyTo(copy);
|
||||
assertEquals("Copied instance must be equal", att, copy);
|
||||
assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
|
||||
return copy;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package org.apache.lucene.analysis.tokenattributes;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.TestToken;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.Formatter;
|
||||
|
@ -91,7 +92,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase {
|
|||
char[] content = "hello".toCharArray();
|
||||
t.copyBuffer(content, 0, 5);
|
||||
char[] buf = t.buffer();
|
||||
CharTermAttributeImpl copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCloneIsEqual(t);
|
||||
CharTermAttributeImpl copy = TestToken.assertCloneIsEqual(t);
|
||||
assertEquals(t.toString(), copy.toString());
|
||||
assertNotSame(buf, copy.buffer());
|
||||
}
|
||||
|
@ -113,7 +114,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase {
|
|||
|
||||
public void testCopyTo() throws Exception {
|
||||
CharTermAttributeImpl t = new CharTermAttributeImpl();
|
||||
CharTermAttributeImpl copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
|
||||
CharTermAttributeImpl copy = TestToken.assertCopyIsEqual(t);
|
||||
assertEquals("", t.toString());
|
||||
assertEquals("", copy.toString());
|
||||
|
||||
|
@ -121,7 +122,7 @@ public class TestCharTermAttributeImpl extends LuceneTestCase {
|
|||
char[] content = "hello".toCharArray();
|
||||
t.copyBuffer(content, 0, 5);
|
||||
char[] buf = t.buffer();
|
||||
copy = (CharTermAttributeImpl) TestSimpleAttributeImpls.assertCopyIsEqual(t);
|
||||
copy = TestToken.assertCopyIsEqual(t);
|
||||
assertEquals(t.toString(), copy.toString());
|
||||
assertNotSame(buf, copy.buffer());
|
||||
}
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
package org.apache.lucene.analysis.tokenattributes;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.index.Payload;
|
||||
import org.apache.lucene.util.AttributeImpl;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.AttributeSource.AttributeFactory;
|
||||
|
||||
@Deprecated
|
||||
public class TestSimpleAttributeImpls extends LuceneTestCase {
|
||||
|
||||
public void testFlagsAttribute() throws Exception {
|
||||
FlagsAttributeImpl att = new FlagsAttributeImpl();
|
||||
assertEquals(0, att.getFlags());
|
||||
|
||||
att.setFlags(1234);
|
||||
assertEquals("flags=1234", att.toString());
|
||||
|
||||
FlagsAttributeImpl att2 = (FlagsAttributeImpl) assertCloneIsEqual(att);
|
||||
assertEquals(1234, att2.getFlags());
|
||||
|
||||
att2 = (FlagsAttributeImpl) assertCopyIsEqual(att);
|
||||
assertEquals(1234, att2.getFlags());
|
||||
|
||||
att.clear();
|
||||
assertEquals(0, att.getFlags());
|
||||
}
|
||||
|
||||
public void testPositionIncrementAttribute() throws Exception {
|
||||
PositionIncrementAttributeImpl att = new PositionIncrementAttributeImpl();
|
||||
assertEquals(1, att.getPositionIncrement());
|
||||
|
||||
att.setPositionIncrement(1234);
|
||||
assertEquals("positionIncrement=1234", att.toString());
|
||||
|
||||
PositionIncrementAttributeImpl att2 = (PositionIncrementAttributeImpl) assertCloneIsEqual(att);
|
||||
assertEquals(1234, att2.getPositionIncrement());
|
||||
|
||||
att2 = (PositionIncrementAttributeImpl) assertCopyIsEqual(att);
|
||||
assertEquals(1234, att2.getPositionIncrement());
|
||||
|
||||
att.clear();
|
||||
assertEquals(1, att.getPositionIncrement());
|
||||
}
|
||||
|
||||
public void testTypeAttribute() throws Exception {
|
||||
TypeAttributeImpl att = new TypeAttributeImpl();
|
||||
assertEquals(TypeAttribute.DEFAULT_TYPE, att.type());
|
||||
|
||||
att.setType("hallo");
|
||||
assertEquals("type=hallo", att.toString());
|
||||
|
||||
TypeAttributeImpl att2 = (TypeAttributeImpl) assertCloneIsEqual(att);
|
||||
assertEquals("hallo", att2.type());
|
||||
|
||||
att2 = (TypeAttributeImpl) assertCopyIsEqual(att);
|
||||
assertEquals("hallo", att2.type());
|
||||
|
||||
att.clear();
|
||||
assertEquals(TypeAttribute.DEFAULT_TYPE, att.type());
|
||||
}
|
||||
|
||||
public void testPayloadAttribute() throws Exception {
|
||||
PayloadAttributeImpl att = new PayloadAttributeImpl();
|
||||
assertNull(att.getPayload());
|
||||
|
||||
Payload pl = new Payload(new byte[]{1,2,3,4});
|
||||
att.setPayload(pl);
|
||||
|
||||
PayloadAttributeImpl att2 = (PayloadAttributeImpl) assertCloneIsEqual(att);
|
||||
assertEquals(pl, att2.getPayload());
|
||||
assertNotSame(pl, att2.getPayload());
|
||||
|
||||
att2 = (PayloadAttributeImpl) assertCopyIsEqual(att);
|
||||
assertEquals(pl, att2.getPayload());
|
||||
assertNotSame(pl, att2.getPayload());
|
||||
|
||||
att.clear();
|
||||
assertNull(att.getPayload());
|
||||
}
|
||||
|
||||
public void testOffsetAttribute() throws Exception {
|
||||
OffsetAttributeImpl att = new OffsetAttributeImpl();
|
||||
assertEquals(0, att.startOffset());
|
||||
assertEquals(0, att.endOffset());
|
||||
|
||||
att.setOffset(12, 34);
|
||||
// no string test here, because order unknown
|
||||
|
||||
OffsetAttributeImpl att2 = (OffsetAttributeImpl) assertCloneIsEqual(att);
|
||||
assertEquals(12, att2.startOffset());
|
||||
assertEquals(34, att2.endOffset());
|
||||
|
||||
att2 = (OffsetAttributeImpl) assertCopyIsEqual(att);
|
||||
assertEquals(12, att2.startOffset());
|
||||
assertEquals(34, att2.endOffset());
|
||||
|
||||
att.clear();
|
||||
assertEquals(0, att.startOffset());
|
||||
assertEquals(0, att.endOffset());
|
||||
}
|
||||
|
||||
public void testKeywordAttribute() {
|
||||
AttributeImpl attrImpl = AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY.createAttributeInstance(KeywordAttribute.class);
|
||||
assertSame(KeywordAttributeImpl.class, attrImpl.getClass());
|
||||
KeywordAttributeImpl att = (KeywordAttributeImpl) attrImpl;
|
||||
assertFalse(att.isKeyword());
|
||||
att.setKeyword(true);
|
||||
assertTrue(att.isKeyword());
|
||||
|
||||
KeywordAttributeImpl assertCloneIsEqual = (KeywordAttributeImpl) assertCloneIsEqual(att);
|
||||
assertTrue(assertCloneIsEqual.isKeyword());
|
||||
assertCloneIsEqual.clear();
|
||||
assertFalse(assertCloneIsEqual.isKeyword());
|
||||
assertTrue(att.isKeyword());
|
||||
|
||||
att.copyTo(assertCloneIsEqual);
|
||||
assertTrue(assertCloneIsEqual.isKeyword());
|
||||
assertTrue(att.isKeyword());
|
||||
}
|
||||
|
||||
public static final AttributeImpl assertCloneIsEqual(AttributeImpl att) {
|
||||
AttributeImpl clone = (AttributeImpl) att.clone();
|
||||
assertEquals("Clone must be equal", att, clone);
|
||||
assertEquals("Clone's hashcode must be equal", att.hashCode(), clone.hashCode());
|
||||
return clone;
|
||||
}
|
||||
|
||||
public static final AttributeImpl assertCopyIsEqual(AttributeImpl att) throws Exception {
|
||||
AttributeImpl copy = att.getClass().newInstance();
|
||||
att.copyTo(copy);
|
||||
assertEquals("Copied instance must be equal", att, copy);
|
||||
assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
|
||||
return copy;
|
||||
}
|
||||
|
||||
}
|
|
@ -37,14 +37,6 @@ public class TestBinaryDocument extends LuceneTestCase {
|
|||
Fieldable binaryFldStored = new Field("binaryStored", binaryValStored.getBytes());
|
||||
Fieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO);
|
||||
|
||||
try {
|
||||
// binary fields with store off are not allowed
|
||||
new Field("fail", binaryValStored.getBytes(), Field.Store.NO);
|
||||
fail();
|
||||
}
|
||||
catch (IllegalArgumentException iae) {
|
||||
}
|
||||
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(binaryFldStored);
|
||||
|
|
|
@ -1,82 +0,0 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestNumberTools extends LuceneTestCase {
|
||||
public void testNearZero() {
|
||||
for (int i = -100; i <= 100; i++) {
|
||||
for (int j = -100; j <= 100; j++) {
|
||||
subtestTwoLongs(i, j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testMax() {
|
||||
// make sure the constants convert to their equivalents
|
||||
assertEquals(Long.MAX_VALUE, NumberTools
|
||||
.stringToLong(NumberTools.MAX_STRING_VALUE));
|
||||
assertEquals(NumberTools.MAX_STRING_VALUE, NumberTools
|
||||
.longToString(Long.MAX_VALUE));
|
||||
|
||||
// test near MAX, too
|
||||
for (long l = Long.MAX_VALUE; l > Long.MAX_VALUE - 10000; l--) {
|
||||
subtestTwoLongs(l, l - 1);
|
||||
}
|
||||
}
|
||||
|
||||
public void testMin() {
|
||||
// make sure the constants convert to their equivelents
|
||||
assertEquals(Long.MIN_VALUE, NumberTools
|
||||
.stringToLong(NumberTools.MIN_STRING_VALUE));
|
||||
assertEquals(NumberTools.MIN_STRING_VALUE, NumberTools
|
||||
.longToString(Long.MIN_VALUE));
|
||||
|
||||
// test near MIN, too
|
||||
for (long l = Long.MIN_VALUE; l < Long.MIN_VALUE + 10000; l++) {
|
||||
subtestTwoLongs(l, l + 1);
|
||||
}
|
||||
}
|
||||
|
||||
private static void subtestTwoLongs(long i, long j) {
|
||||
// convert to strings
|
||||
String a = NumberTools.longToString(i);
|
||||
String b = NumberTools.longToString(j);
|
||||
|
||||
// are they the right length?
|
||||
assertEquals(NumberTools.STR_SIZE, a.length());
|
||||
assertEquals(NumberTools.STR_SIZE, b.length());
|
||||
|
||||
// are they the right order?
|
||||
if (i < j) {
|
||||
assertTrue(a.compareTo(b) < 0);
|
||||
} else if (i > j) {
|
||||
assertTrue(a.compareTo(b) > 0);
|
||||
} else {
|
||||
assertEquals(a, b);
|
||||
}
|
||||
|
||||
// can we convert them back to longs?
|
||||
long i2 = NumberTools.stringToLong(a);
|
||||
long j2 = NumberTools.stringToLong(b);
|
||||
|
||||
assertEquals(i, i2);
|
||||
assertEquals(j, j2);
|
||||
}
|
||||
}
|
|
@ -132,13 +132,14 @@ public class Test2BTerms extends LuceneTestCase {
|
|||
int TERMS_PER_DOC = 1000000;
|
||||
|
||||
Directory dir = FSDirectory.open(_TestUtil.getTempDir("2BTerms"));
|
||||
IndexWriter w = new IndexWriter(dir,
|
||||
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)
|
||||
.setRAMBufferSizeMB(256.0).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundFile(false);
|
||||
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
|
||||
((LogMergePolicy) w.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH).
|
||||
setRAMBufferSizeMB(256.0).
|
||||
setMergeScheduler(new ConcurrentMergeScheduler()).
|
||||
setMergePolicy(newLogMergePolicy(false, 10))
|
||||
);
|
||||
|
||||
Document doc = new Document();
|
||||
Field field = new Field("field", new MyTokenStream(TERMS_PER_DOC));
|
||||
|
|
|
@ -58,9 +58,12 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
// add 40 documents in separate files
|
||||
addDocs(writer, 40);
|
||||
assertEquals(40, writer.maxDoc());
|
||||
|
@ -75,7 +78,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
// test doc count before segments are merged
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.addIndexes(new Directory[] { aux, aux2 });
|
||||
writer.addIndexes(aux, aux2);
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
@ -97,7 +100,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
// test doc count before segments are merged/index is optimized
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.addIndexes(new Directory[] { aux3 });
|
||||
writer.addIndexes(aux3);
|
||||
assertEquals(230, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
|
@ -128,7 +131,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
assertEquals(230, writer.maxDoc());
|
||||
writer.addIndexes(new Directory[] { aux4 });
|
||||
writer.addIndexes(aux4);
|
||||
assertEquals(231, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
|
@ -150,7 +153,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
setUpDirs(dir, aux);
|
||||
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
writer.addIndexes(new Directory[] {aux});
|
||||
writer.addIndexes(aux);
|
||||
|
||||
// Adds 10 docs, then replaces them with another 10
|
||||
// docs, so 10 pending deletes:
|
||||
|
@ -197,7 +200,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.updateDocument(new Term("id", "" + (i%10)), doc);
|
||||
}
|
||||
|
||||
writer.addIndexes(new Directory[] {aux});
|
||||
writer.addIndexes(aux);
|
||||
|
||||
// Deletes one of the 10 added docs, leaving 9:
|
||||
PhraseQuery q = new PhraseQuery();
|
||||
|
@ -242,7 +245,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
q.add(new Term("content", "14"));
|
||||
writer.deleteDocuments(q);
|
||||
|
||||
writer.addIndexes(new Directory[] {aux});
|
||||
writer.addIndexes(aux);
|
||||
|
||||
writer.optimize();
|
||||
writer.commit();
|
||||
|
@ -271,22 +274,30 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(100, writer.maxDoc());
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(1000).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
// add 140 documents in separate files
|
||||
addDocs(writer, 40);
|
||||
writer.close();
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(1000));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(1000).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
addDocs(writer, 100);
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
try {
|
||||
// cannot add self
|
||||
writer.addIndexes(new Directory[] { aux, dir });
|
||||
writer.addIndexes(aux, dir);
|
||||
assertTrue(false);
|
||||
}
|
||||
catch (IllegalArgumentException e) {
|
||||
|
@ -311,13 +322,16 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
setUpDirs(dir, aux);
|
||||
|
||||
IndexWriter writer = newWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
IndexWriter writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(4))
|
||||
);
|
||||
addDocs(writer, 10);
|
||||
|
||||
writer.addIndexes(new Directory[] { aux });
|
||||
writer.addIndexes(aux);
|
||||
assertEquals(1040, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -337,11 +351,16 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
setUpDirs(dir, aux);
|
||||
|
||||
IndexWriter writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(9));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
IndexWriter writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(9).
|
||||
setMergePolicy(newLogMergePolicy(4))
|
||||
);
|
||||
addDocs(writer, 2);
|
||||
|
||||
writer.addIndexes(new Directory[] { aux });
|
||||
writer.addIndexes(aux);
|
||||
assertEquals(1032, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -361,12 +380,15 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
setUpDirs(dir, aux);
|
||||
|
||||
IndexWriter writer = newWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
IndexWriter writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(4))
|
||||
);
|
||||
|
||||
writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
|
||||
writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)));
|
||||
assertEquals(1060, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -393,12 +415,15 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(10, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
IndexWriter writer = newWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(4));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
IndexWriter writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(4).
|
||||
setMergePolicy(newLogMergePolicy(4))
|
||||
);
|
||||
|
||||
writer.addIndexes(new Directory[] { aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)) });
|
||||
writer.addIndexes(aux, new MockDirectoryWrapper(random, new RAMDirectory(aux)));
|
||||
assertEquals(1060, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -416,11 +441,14 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
|
||||
setUpDirs(dir, aux);
|
||||
|
||||
IndexWriter writer = newWriter(aux2, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
writer.addIndexes(new Directory[] { aux });
|
||||
IndexWriter writer = newWriter(
|
||||
aux2,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(100).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
writer.addIndexes(aux);
|
||||
assertEquals(30, writer.maxDoc());
|
||||
assertEquals(3, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
@ -439,11 +467,15 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(22, reader.numDocs());
|
||||
reader.close();
|
||||
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(6));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(4);
|
||||
writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(6).
|
||||
setMergePolicy(newLogMergePolicy(4))
|
||||
);
|
||||
|
||||
writer.addIndexes(new Directory[] { aux, aux2 });
|
||||
writer.addIndexes(aux, aux2);
|
||||
assertEquals(1060, writer.maxDoc());
|
||||
assertEquals(1000, writer.getDocCount(0));
|
||||
writer.close();
|
||||
|
@ -505,18 +537,24 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
assertEquals(1, writer.getSegmentCount());
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(100));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(1000).
|
||||
setMergePolicy(newLogMergePolicy(false, 10))
|
||||
);
|
||||
// add 30 documents in 3 segments
|
||||
for (int i = 0; i < 3; i++) {
|
||||
addDocs(writer, 10);
|
||||
writer.close();
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(100));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(1000).
|
||||
setMergePolicy(newLogMergePolicy(false, 10))
|
||||
);
|
||||
}
|
||||
assertEquals(30, writer.maxDoc());
|
||||
assertEquals(3, writer.getSegmentCount());
|
||||
|
@ -563,7 +601,7 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer = new IndexWriter(dir2, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer())
|
||||
.setMergeScheduler(new SerialMergeScheduler()).setMergePolicy(lmp));
|
||||
writer.addIndexes(new Directory[] {dir});
|
||||
writer.addIndexes(dir);
|
||||
writer.close();
|
||||
dir.close();
|
||||
dir2.close();
|
||||
|
@ -920,22 +958,26 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.close();
|
||||
_TestUtil.checkIndex(dir, provider);
|
||||
|
||||
writer = newWriter(aux, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setCodecProvider(
|
||||
provider));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy())
|
||||
.setUseCompoundFile(false); // use one without a compound file
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy())
|
||||
.setUseCompoundDocStore(false); // use one without a compound file
|
||||
writer = newWriter(
|
||||
aux,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setCodecProvider(provider).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
// add 40 documents in separate files
|
||||
addDocs(writer, 40);
|
||||
assertEquals(40, writer.maxDoc());
|
||||
writer.commit();
|
||||
writer.close();
|
||||
|
||||
writer = newWriter(aux2, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setOpenMode(OpenMode.CREATE).setCodecProvider(
|
||||
provider));
|
||||
writer = newWriter(
|
||||
aux2,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setCodecProvider(provider)
|
||||
);
|
||||
// add 40 documents in compound files
|
||||
addDocs2(writer, 50);
|
||||
assertEquals(50, writer.maxDoc());
|
||||
|
@ -943,11 +985,14 @@ public class TestAddIndexes extends LuceneTestCase {
|
|||
writer.close();
|
||||
|
||||
// test doc count before segments are merged
|
||||
writer = newWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setOpenMode(OpenMode.APPEND).setCodecProvider(
|
||||
provider));
|
||||
writer = newWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setCodecProvider(provider)
|
||||
);
|
||||
assertEquals(100, writer.maxDoc());
|
||||
writer.addIndexes(new Directory[] { aux, aux2 });
|
||||
writer.addIndexes(aux, aux2);
|
||||
assertEquals(190, writer.maxDoc());
|
||||
writer.close();
|
||||
_TestUtil.checkIndex(dir, provider);
|
||||
|
|
|
@ -235,7 +235,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
Directory targetDir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
w.addIndexes(new Directory[] { dir });
|
||||
w.addIndexes(dir);
|
||||
w.close();
|
||||
|
||||
_TestUtil.checkIndex(targetDir);
|
||||
|
@ -256,7 +256,7 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
Directory targetDir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(targetDir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
w.addIndexes(new IndexReader[] { reader });
|
||||
w.addIndexes(reader);
|
||||
w.close();
|
||||
reader.close();
|
||||
|
||||
|
@ -527,9 +527,13 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
try {
|
||||
Directory dir = FSDirectory.open(new File(fullDir(outputDir)));
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(-1).setRAMBufferSizeMB(16.0));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(-1).
|
||||
setRAMBufferSizeMB(16.0).
|
||||
setMergePolicy(newLogMergePolicy(true, 10))
|
||||
);
|
||||
for(int i=0;i<35;i++) {
|
||||
addDoc(writer, i);
|
||||
}
|
||||
|
|
|
@ -184,8 +184,12 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
Field idField = newField("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED);
|
||||
doc.add(idField);
|
||||
|
||||
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(100))
|
||||
);
|
||||
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
|
||||
|
@ -213,8 +217,12 @@ public class TestConcurrentMergeScheduler extends LuceneTestCase {
|
|||
reader.close();
|
||||
|
||||
// Reopen
|
||||
writer = new IndexWriter(directory, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
|
||||
writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMergePolicy(newLogMergePolicy(100))
|
||||
);
|
||||
}
|
||||
writer.close();
|
||||
|
||||
|
|
|
@ -373,10 +373,13 @@ public class TestDeletionPolicy extends LuceneTestCase {
|
|||
Directory dir = newDirectory();
|
||||
policy.dir = dir;
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setIndexDeletionPolicy(policy).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setIndexDeletionPolicy(policy).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int i=0;i<10;i++) {
|
||||
addDoc(writer);
|
||||
if ((1+i)%2 == 0)
|
||||
|
|
|
@ -111,10 +111,13 @@ public class TestDoc extends LuceneTestCase {
|
|||
PrintWriter out = new PrintWriter(sw, true);
|
||||
|
||||
Directory directory = FSDirectory.open(indexDir);
|
||||
IndexWriter writer = new IndexWriter(directory, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(-1));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
|
||||
SegmentInfo si1 = indexDoc(writer, "test.txt");
|
||||
printSegment(out, si1);
|
||||
|
@ -142,10 +145,13 @@ public class TestDoc extends LuceneTestCase {
|
|||
out = new PrintWriter(sw, true);
|
||||
|
||||
directory = FSDirectory.open(indexDir);
|
||||
writer = new IndexWriter(directory, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(-1));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
|
||||
si1 = indexDoc(writer, "test.txt");
|
||||
printSegment(out, si1);
|
||||
|
@ -188,7 +194,7 @@ public class TestDoc extends LuceneTestCase {
|
|||
SegmentReader r1 = SegmentReader.get(true, si1, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
SegmentReader r2 = SegmentReader.get(true, si2, IndexReader.DEFAULT_TERMS_INDEX_DIVISOR);
|
||||
|
||||
SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriter.DEFAULT_TERM_INDEX_INTERVAL, merged, null, CodecProvider.getDefault(), null);
|
||||
SegmentMerger merger = new SegmentMerger(si1.dir, IndexWriterConfig.DEFAULT_TERM_INDEX_INTERVAL, merged, null, CodecProvider.getDefault(), null);
|
||||
|
||||
merger.add(r1);
|
||||
merger.add(r2);
|
||||
|
|
|
@ -314,7 +314,7 @@ public class TestDocumentWriter extends LuceneTestCase {
|
|||
|
||||
_TestUtil.checkIndex(dir);
|
||||
|
||||
SegmentReader reader = SegmentReader.getOnlySegmentReader(dir);
|
||||
SegmentReader reader = getOnlySegmentReader(IndexReader.open(dir, false));
|
||||
FieldInfos fi = reader.fieldInfos();
|
||||
// f1
|
||||
assertFalse("f1 should have no norms", reader.hasNorms("f1"));
|
||||
|
|
|
@ -19,10 +19,7 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -429,6 +426,10 @@ public class TestFieldsReader extends LuceneTestCase {
|
|||
return fsDir.createOutput(name);
|
||||
}
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
fsDir.sync(names);
|
||||
}
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
fsDir.close();
|
||||
}
|
||||
|
|
|
@ -30,12 +30,14 @@ public class TestFlex extends LuceneTestCase {
|
|||
|
||||
final int DOC_COUNT = 177;
|
||||
|
||||
IndexWriter w = new IndexWriter(d, new MockAnalyzer(),
|
||||
IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter w = new IndexWriter(
|
||||
d,
|
||||
new IndexWriterConfig(Version.LUCENE_31, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(7)
|
||||
);
|
||||
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
if (iter == 0) {
|
||||
w.setMaxBufferedDocs(7);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field1", "this is field1", Field.Store.NO, Field.Index.ANALYZED));
|
||||
doc.add(newField("field2", "this is field2", Field.Store.NO, Field.Index.ANALYZED));
|
||||
|
|
|
@ -40,11 +40,12 @@ public class TestIndexFileDeleter extends LuceneTestCase {
|
|||
public void testDeleteLeftoverFiles() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(10));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(true, 10))
|
||||
);
|
||||
int i;
|
||||
for(i=0;i<35;i++) {
|
||||
addDoc(writer, i);
|
||||
|
|
|
@ -146,8 +146,10 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testGetFieldNames() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
IndexWriter writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new Field("keyword","test1", Field.Store.YES, Field.Index.NOT_ANALYZED));
|
||||
|
@ -166,8 +168,12 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(fieldNames.contains("unstored"));
|
||||
reader.close();
|
||||
// add more documents
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setOpenMode(OpenMode.APPEND));
|
||||
writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMergePolicy(newLogMergePolicy())
|
||||
);
|
||||
// want to get some more segments here
|
||||
int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
|
||||
for (int i = 0; i < 5*mergeFactor; i++) {
|
||||
|
@ -261,8 +267,11 @@ public class TestIndexReader extends LuceneTestCase
|
|||
public void testTermVectors() throws Exception {
|
||||
Directory d = newDirectory();
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
IndexWriter writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy())
|
||||
);
|
||||
// want to get some more segments here
|
||||
// new termvector fields
|
||||
int mergeFactor = ((LogMergePolicy) writer.getConfig().getMergePolicy()).getMergeFactor();
|
||||
|
@ -566,9 +575,11 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Term searchTerm = new Term("content", "aaa");
|
||||
|
||||
// add 1 documents with term : aaa
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(false);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
addDoc(writer, searchTerm.text());
|
||||
writer.close();
|
||||
|
||||
|
@ -1410,10 +1421,12 @@ public class TestIndexReader extends LuceneTestCase
|
|||
Directory d = newDirectory();
|
||||
|
||||
// set up writer
|
||||
IndexWriter writer = new IndexWriter(d, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int i=0;i<27;i++)
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
@ -1428,10 +1441,13 @@ public class TestIndexReader extends LuceneTestCase
|
|||
assertTrue(c.equals(r.getIndexCommit()));
|
||||
|
||||
// Change the index
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()).setOpenMode(
|
||||
OpenMode.APPEND).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int i=0;i<7;i++)
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
@ -1471,11 +1487,13 @@ public class TestIndexReader extends LuceneTestCase
|
|||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
|
||||
writer = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
|
||||
writer = new IndexWriter(
|
||||
d,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
addDocumentWithFields(writer);
|
||||
writer.close();
|
||||
|
||||
|
@ -1619,7 +1637,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.close();
|
||||
|
||||
// Open reader
|
||||
IndexReader r = SegmentReader.getOnlySegmentReader(dir);
|
||||
IndexReader r = getOnlySegmentReader(IndexReader.open(dir, false));
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r, "number");
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
@ -1643,16 +1661,19 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// FieldCache
|
||||
public void testFieldCacheReuseAfterReopen() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
@ -1676,8 +1697,12 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// reopen switches readOnly
|
||||
public void testReopenChangeReadonly() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(-1));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("number", "17", Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
|
@ -1686,7 +1711,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
// Open reader1
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
assertTrue(r instanceof DirectoryReader);
|
||||
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
final int[] ints = FieldCache.DEFAULT.getInts(r1, "number");
|
||||
assertEquals(1, ints.length);
|
||||
assertEquals(17, ints[0]);
|
||||
|
@ -1727,7 +1752,7 @@ public class TestIndexReader extends LuceneTestCase
|
|||
writer.commit();
|
||||
|
||||
IndexReader r = IndexReader.open(dir, false);
|
||||
IndexReader r1 = SegmentReader.getOnlySegmentReader(r);
|
||||
IndexReader r1 = getOnlySegmentReader(r);
|
||||
assertEquals(36, r1.getUniqueTermCount());
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
@ -1768,8 +1793,12 @@ public class TestIndexReader extends LuceneTestCase
|
|||
}
|
||||
|
||||
assertEquals(-1, ((SegmentReader) r.getSequentialSubReaders()[0]).getTermInfosIndexDivisor());
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setCodecProvider(_TestUtil.alwaysCodec("Standard")));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setCodecProvider(_TestUtil.alwaysCodec("Standard")).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
writer.addDocument(doc);
|
||||
writer.close();
|
||||
|
||||
|
|
|
@ -302,7 +302,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
IndexReader r1 = IndexReader.open(dir1, false);
|
||||
IndexReader r2 = IndexReader.open(dir2, false);
|
||||
|
||||
MultiReader multiReader = new MultiReader(new IndexReader[] { r1, r2 });
|
||||
MultiReader multiReader = new MultiReader(r1, r2);
|
||||
performDefaultTests(multiReader);
|
||||
multiReader.close();
|
||||
dir1.close();
|
||||
|
@ -312,7 +312,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
public void testSegmentReaderUndeleteall() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(dir1);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
origSegmentReader.deleteDocument(10);
|
||||
assertDelDocsRefCountEquals(1, origSegmentReader);
|
||||
origSegmentReader.undeleteAll();
|
||||
|
@ -325,7 +325,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
public void testSegmentReaderCloseReferencing() throws Exception {
|
||||
final Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(dir1);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
origSegmentReader.deleteDocument(1);
|
||||
origSegmentReader.setNorm(4, "field1", 0.5f);
|
||||
|
||||
|
@ -346,7 +346,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
|
||||
IndexReader origReader = IndexReader.open(dir1, false);
|
||||
SegmentReader origSegmentReader = SegmentReader.getOnlySegmentReader(origReader);
|
||||
SegmentReader origSegmentReader = getOnlySegmentReader(origReader);
|
||||
// deletedDocsRef should be null because nothing has updated yet
|
||||
assertNull(origSegmentReader.deletedDocsRef);
|
||||
|
||||
|
@ -358,7 +358,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
// the cloned segmentreader should have 2 references, 1 to itself, and 1 to
|
||||
// the original segmentreader
|
||||
IndexReader clonedReader = (IndexReader) origReader.clone();
|
||||
SegmentReader clonedSegmentReader = SegmentReader.getOnlySegmentReader(clonedReader);
|
||||
SegmentReader clonedSegmentReader = getOnlySegmentReader(clonedReader);
|
||||
assertDelDocsRefCountEquals(2, origSegmentReader);
|
||||
// deleting a document creates a new deletedDocs bitvector, the refs goes to
|
||||
// 1
|
||||
|
@ -395,7 +395,7 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
// test a reopened reader
|
||||
IndexReader reopenedReader = clonedReader.reopen();
|
||||
IndexReader cloneReader2 = (IndexReader) reopenedReader.clone();
|
||||
SegmentReader cloneSegmentReader2 = SegmentReader.getOnlySegmentReader(cloneReader2);
|
||||
SegmentReader cloneSegmentReader2 = getOnlySegmentReader(cloneReader2);
|
||||
assertDelDocsRefCountEquals(2, cloneSegmentReader2);
|
||||
clonedReader.close();
|
||||
reopenedReader.close();
|
||||
|
@ -490,10 +490,11 @@ public class TestIndexReaderClone extends LuceneTestCase {
|
|||
|
||||
public void testCloseStoredFields() throws Exception {
|
||||
final Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundFile(false);
|
||||
((LogMergePolicy) w.getConfig().getMergePolicy()).setUseCompoundDocStore(false);
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(false))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
|
|
|
@ -109,11 +109,14 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
Directory dir3 = FSDirectory.open(indexDir3);
|
||||
|
||||
createIndex(random, dir3);
|
||||
IndexWriter iw = new IndexWriter(dir3, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, anlzr).setOpenMode(OpenMode.APPEND)
|
||||
.setMaxBufferedDocs(5));
|
||||
((LogMergePolicy) iw.getConfig().getMergePolicy()).setMergeFactor(3);
|
||||
iw.addIndexes(new Directory[] { dir1, dir2 });
|
||||
IndexWriter iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.addIndexes(dir1, dir2);
|
||||
iw.optimize();
|
||||
iw.close();
|
||||
|
||||
|
@ -128,9 +131,13 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
doTestNorms(random, dir3);
|
||||
|
||||
// now with optimize
|
||||
iw = new IndexWriter(dir3, newIndexWriterConfig( TEST_VERSION_CURRENT,
|
||||
anlzr).setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(5));
|
||||
((LogMergePolicy) iw.getConfig().getMergePolicy()).setMergeFactor(3);
|
||||
iw = new IndexWriter(
|
||||
dir3,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, anlzr).
|
||||
setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(5).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
iw.optimize();
|
||||
iw.close();
|
||||
verifyIndex(dir3);
|
||||
|
@ -162,7 +169,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
public void testNormsClose() throws IOException {
|
||||
Directory dir1 = newDirectory();
|
||||
TestIndexReaderReopen.createIndex(random, dir1, false);
|
||||
SegmentReader reader1 = SegmentReader.getOnlySegmentReader(dir1);
|
||||
SegmentReader reader1 = getOnlySegmentReader(IndexReader.open(dir1, false));
|
||||
reader1.norms("field1");
|
||||
Norm r1norm = reader1.norms.get("field1");
|
||||
AtomicInteger r1BytesRef = r1norm.bytesRef();
|
||||
|
@ -181,7 +188,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
|
||||
IndexReader reader2C = (IndexReader) reader1.clone();
|
||||
SegmentReader segmentReader2C = SegmentReader.getOnlySegmentReader(reader2C);
|
||||
SegmentReader segmentReader2C = getOnlySegmentReader(reader2C);
|
||||
segmentReader2C.norms("field1"); // load the norms for the field
|
||||
Norm reader2CNorm = segmentReader2C.norms.get("field1");
|
||||
assertTrue("reader2CNorm.bytesRef()=" + reader2CNorm.bytesRef(), reader2CNorm.bytesRef().get() == 2);
|
||||
|
@ -189,13 +196,13 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
|
||||
|
||||
IndexReader reader3C = (IndexReader) reader2C.clone();
|
||||
SegmentReader segmentReader3C = SegmentReader.getOnlySegmentReader(reader3C);
|
||||
SegmentReader segmentReader3C = getOnlySegmentReader(reader3C);
|
||||
Norm reader3CCNorm = segmentReader3C.norms.get("field1");
|
||||
assertEquals(3, reader3CCNorm.bytesRef().get());
|
||||
|
||||
// edit a norm and the refcount should be 1
|
||||
IndexReader reader4C = (IndexReader) reader3C.clone();
|
||||
SegmentReader segmentReader4C = SegmentReader.getOnlySegmentReader(reader4C);
|
||||
SegmentReader segmentReader4C = getOnlySegmentReader(reader4C);
|
||||
assertEquals(4, reader3CCNorm.bytesRef().get());
|
||||
reader4C.setNorm(5, "field1", 0.33f);
|
||||
|
||||
|
@ -215,7 +222,7 @@ public class TestIndexReaderCloneNorms extends LuceneTestCase {
|
|||
assertEquals(1, reader4CCNorm.bytesRef().get());
|
||||
|
||||
IndexReader reader5C = (IndexReader) reader4C.clone();
|
||||
SegmentReader segmentReader5C = SegmentReader.getOnlySegmentReader(reader5C);
|
||||
SegmentReader segmentReader5C = getOnlySegmentReader(reader5C);
|
||||
Norm reader5CCNorm = segmentReader5C.norms.get("field1");
|
||||
reader5C.setNorm(5, "field1", 0.7f);
|
||||
assertEquals(1, reader5CCNorm.bytesRef().get());
|
||||
|
|
|
@ -230,9 +230,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(new IndexReader[]
|
||||
{IndexReader.open(dir1, false),
|
||||
IndexReader.open(dir2, false)});
|
||||
return new MultiReader(IndexReader.open(dir1, false),
|
||||
IndexReader.open(dir2, false));
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -256,12 +255,11 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected IndexReader openReader() throws IOException {
|
||||
return new MultiReader(new IndexReader[]
|
||||
{IndexReader.open(dir3, false),
|
||||
IndexReader.open(dir4, false),
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
new FilterIndexReader(IndexReader.open(dir3, false))});
|
||||
return new MultiReader(IndexReader.open(dir3, false),
|
||||
IndexReader.open(dir4, false),
|
||||
// Does not implement reopen, so
|
||||
// hits exception:
|
||||
new FilterIndexReader(IndexReader.open(dir3, false)));
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -297,10 +295,8 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
ParallelReader pr = new ParallelReader();
|
||||
pr.add(IndexReader.open(dir1, false));
|
||||
pr.add(IndexReader.open(dir2, false));
|
||||
MultiReader mr = new MultiReader(new IndexReader[] {
|
||||
IndexReader.open(dir3, false), IndexReader.open(dir4, false)});
|
||||
return new MultiReader(new IndexReader[] {
|
||||
pr, mr, IndexReader.open(dir5, false)});
|
||||
MultiReader mr = new MultiReader(IndexReader.open(dir3, false), IndexReader.open(dir4, false));
|
||||
return new MultiReader(pr, mr, IndexReader.open(dir5, false));
|
||||
}
|
||||
});
|
||||
dir1.close();
|
||||
|
@ -612,7 +608,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
createIndex(random, dir1, false);
|
||||
|
||||
IndexReader reader1 = IndexReader.open(dir1, false);
|
||||
SegmentReader segmentReader1 = SegmentReader.getOnlySegmentReader(reader1);
|
||||
SegmentReader segmentReader1 = getOnlySegmentReader(reader1);
|
||||
IndexReader modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(0);
|
||||
modifier.close();
|
||||
|
@ -624,7 +620,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
modifier.close();
|
||||
|
||||
IndexReader reader3 = reader2.reopen();
|
||||
SegmentReader segmentReader3 = SegmentReader.getOnlySegmentReader(reader3);
|
||||
SegmentReader segmentReader3 = getOnlySegmentReader(reader3);
|
||||
modifier = IndexReader.open(dir1, false);
|
||||
modifier.deleteDocument(2);
|
||||
modifier.close();
|
||||
|
@ -1167,7 +1163,7 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
IndexReader[] rs2 = r2.getSequentialSubReaders();
|
||||
|
||||
SegmentReader sr1 = SegmentReader.getOnlySegmentReader(r1);
|
||||
SegmentReader sr1 = getOnlySegmentReader(r1);
|
||||
SegmentReader sr2 = (SegmentReader) rs2[0];
|
||||
|
||||
// At this point they share the same BitVector
|
||||
|
@ -1190,9 +1186,13 @@ public class TestIndexReaderReopen extends LuceneTestCase {
|
|||
|
||||
public void testReopenOnCommit() throws Throwable {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()).setIndexDeletionPolicy(new KeepAllCommits()).setMaxBufferedDocs(-1));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setIndexDeletionPolicy(new KeepAllCommits()).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int i=0;i<4;i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("id", ""+i, Field.Store.NO, Field.Index.NOT_ANALYZED));
|
||||
|
|
|
@ -495,8 +495,13 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
*/
|
||||
public void testCommitOnCloseDiskUsage() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setReaderPooling(false));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(10).
|
||||
setReaderPooling(false).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int j=0;j<30;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
}
|
||||
|
@ -505,10 +510,16 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
dir.setTrackDiskUsage(true);
|
||||
long startDiskUsage = dir.getMaxUsedSizeInBytes();
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).setMaxBufferedDocs(10).setMergeScheduler(
|
||||
new SerialMergeScheduler()).setReaderPooling(false));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.APPEND).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergeScheduler(new SerialMergeScheduler()).
|
||||
setReaderPooling(false).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
|
||||
);
|
||||
for(int j=0;j<1470;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
}
|
||||
|
@ -546,8 +557,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
// test uses IW.rollback which easily results in
|
||||
// writing to same file more than once
|
||||
dir.setPreventDoubleWrite(false);
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
for(int j=0;j<17;j++) {
|
||||
addDocWithIndex(writer, j);
|
||||
}
|
||||
|
@ -657,8 +672,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
public void testSmallRAMBuffer() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.000001));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setRAMBufferSizeMB(0.000001).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
int lastNumFile = dir.listAll().length;
|
||||
for(int j=0;j<9;j++) {
|
||||
Document doc = new Document();
|
||||
|
@ -674,133 +693,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Make sure it's OK to change RAM buffer size and // maxBufferedDocs in a
|
||||
* write session
|
||||
*
|
||||
* @deprecated after all the setters on IW go away (4.0), this test can be
|
||||
* removed because changing ram buffer settings during a write
|
||||
* session won't be possible.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testChangingRAMBuffer() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setRAMBufferSizeMB(
|
||||
IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
|
||||
int lastFlushCount = -1;
|
||||
for(int j=1;j<52;j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
if (j == 1)
|
||||
lastFlushCount = flushCount;
|
||||
else if (j < 10)
|
||||
// No new files should be created
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
else if (10 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
writer.setRAMBufferSizeMB(0.000001);
|
||||
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (j < 20) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (20 == j) {
|
||||
writer.setRAMBufferSizeMB(16);
|
||||
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 30) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (30 == j) {
|
||||
writer.setRAMBufferSizeMB(0.000001);
|
||||
writer.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (j < 40) {
|
||||
assertTrue(flushCount> lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (40 == j) {
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 50) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
writer.setMaxBufferedDocs(10);
|
||||
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (50 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
}
|
||||
}
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated after setters on IW go away, this test can be deleted because
|
||||
* changing those settings on IW won't be possible.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testChangingRAMBuffer2() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(10).setMaxBufferedDeleteTerms(
|
||||
10).setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
|
||||
for(int j=1;j<52;j++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "aaa" + j, Field.Store.YES, Field.Index.ANALYZED));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
int lastFlushCount = -1;
|
||||
for(int j=1;j<52;j++) {
|
||||
writer.deleteDocuments(new Term("field", "aaa" + j));
|
||||
_TestUtil.syncConcurrentMerges(writer);
|
||||
int flushCount = writer.getFlushCount();
|
||||
if (j == 1)
|
||||
lastFlushCount = flushCount;
|
||||
else if (j < 10) {
|
||||
// No new files should be created
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (10 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
writer.setRAMBufferSizeMB(0.000001);
|
||||
writer.setMaxBufferedDeleteTerms(1);
|
||||
} else if (j < 20) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (20 == j) {
|
||||
writer.setRAMBufferSizeMB(16);
|
||||
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 30) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
} else if (30 == j) {
|
||||
writer.setRAMBufferSizeMB(0.000001);
|
||||
writer.setMaxBufferedDeleteTerms(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
writer.setMaxBufferedDeleteTerms(1);
|
||||
} else if (j < 40) {
|
||||
assertTrue(flushCount> lastFlushCount);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (40 == j) {
|
||||
writer.setMaxBufferedDeleteTerms(10);
|
||||
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
lastFlushCount = flushCount;
|
||||
} else if (j < 50) {
|
||||
assertEquals(flushCount, lastFlushCount);
|
||||
writer.setMaxBufferedDeleteTerms(10);
|
||||
writer.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
|
||||
} else if (50 == j) {
|
||||
assertTrue(flushCount > lastFlushCount);
|
||||
}
|
||||
}
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testDiverseDocs() throws IOException {
|
||||
MockDirectoryWrapper dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.5));
|
||||
|
@ -968,11 +860,14 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
public void testFlushWithNoMerging() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
|
||||
for(int i=0;i<19;i++)
|
||||
writer.addDocument(doc);
|
||||
writer.flush(false, true, true);
|
||||
|
@ -1010,12 +905,15 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
Directory dir = newDirectory();
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(101);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(101))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS));
|
||||
doc.add(newField("field", "aaa", Store.YES, Index.ANALYZED, TermVector.WITH_POSITIONS_OFFSETS));
|
||||
for(int i=0;i<200;i++)
|
||||
writer.addDocument(doc);
|
||||
writer.optimize(false);
|
||||
|
@ -1200,20 +1098,17 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
for(int pass=0;pass<2;pass++) {
|
||||
|
||||
IndexWriterConfig conf = newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer()).setOpenMode(OpenMode.CREATE)
|
||||
.setMaxBufferedDocs(2);
|
||||
if (pass == 2) {
|
||||
conf.setMergeScheduler(new SerialMergeScheduler());
|
||||
}
|
||||
IndexWriter writer = new IndexWriter(directory, conf);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(100);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setOpenMode(OpenMode.CREATE).
|
||||
setMaxBufferedDocs(2).
|
||||
// have to use compound file to prevent running out of
|
||||
// descripters when newDirectory returns a file-system
|
||||
// backed directory:
|
||||
setMergePolicy(newLogMergePolicy(false, 10))
|
||||
);
|
||||
|
||||
// have to use compound file to prevent running out of
|
||||
// descripters when newDirectory returns a file-system
|
||||
// backed directory:
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setUseCompoundFile(true);
|
||||
|
||||
//System.out.println("TEST: pass=" + pass + " cms=" + (pass >= 2));
|
||||
for(int iter=0;iter<10;iter++) {
|
||||
//System.out.println("TEST: iter=" + iter);
|
||||
|
@ -1309,10 +1204,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
public void testForceCommit() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
|
@ -1412,21 +1309,23 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
// LUCENE-325: test expungeDeletes, when many adjacent merges are required
|
||||
public void testExpungeDeletes2() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
|
||||
IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
|
||||
setMergePolicy(newLogMergePolicy(50))
|
||||
);
|
||||
|
||||
Document document = new Document();
|
||||
|
||||
document = new Document();
|
||||
Field storedField = newField("stored", "stored", Field.Store.YES,
|
||||
Field.Index.NO);
|
||||
Field storedField = newField("stored", "stored", Store.YES,
|
||||
Index.NO);
|
||||
document.add(storedField);
|
||||
Field termVectorField = newField("termVector", "termVector",
|
||||
Field.Store.NO, Field.Index.NOT_ANALYZED,
|
||||
Field.TermVector.WITH_POSITIONS_OFFSETS);
|
||||
Store.NO, Index.NOT_ANALYZED,
|
||||
TermVector.WITH_POSITIONS_OFFSETS);
|
||||
document.add(termVectorField);
|
||||
for(int i=0;i<98;i++)
|
||||
writer.addDocument(document);
|
||||
|
@ -1440,9 +1339,11 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer()));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
assertEquals(49, writer.numDocs());
|
||||
writer.expungeDeletes();
|
||||
writer.close();
|
||||
|
@ -1457,11 +1358,13 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
// many adjacent merges are required
|
||||
public void testExpungeDeletes3() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setRAMBufferSizeMB(
|
||||
IndexWriterConfig.DISABLE_AUTO_FLUSH));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(50);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH).
|
||||
setMergePolicy(newLogMergePolicy(50))
|
||||
);
|
||||
|
||||
Document document = new Document();
|
||||
|
||||
|
@ -1485,9 +1388,11 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
assertEquals(49, ir.numDocs());
|
||||
ir.close();
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
// Force many merges to happen
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(3);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(3))
|
||||
);
|
||||
writer.expungeDeletes(false);
|
||||
writer.close();
|
||||
ir = IndexReader.open(dir, true);
|
||||
|
@ -1836,8 +1741,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
public void testPrepareCommit() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
|
@ -1888,8 +1797,12 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
MockDirectoryWrapper dir = newDirectory();
|
||||
dir.setPreventDoubleWrite(false);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()).setMaxBufferedDocs(2));
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
writer.commit();
|
||||
|
||||
for (int i = 0; i < 23; i++)
|
||||
|
@ -2091,7 +2004,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
IndexReader r1 = IndexReader.open(dir2, true);
|
||||
IndexReader r2 = (IndexReader) r1.clone();
|
||||
writer.addIndexes(new IndexReader[] {r1, r2});
|
||||
writer.addIndexes(r1, r2);
|
||||
writer.close();
|
||||
|
||||
IndexReader r3 = IndexReader.open(dir, true);
|
||||
|
@ -2564,11 +2477,13 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public void testDeleteUnusedFiles() throws Exception {
|
||||
|
||||
for(int iter=0;iter<2;iter++) {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
((LogMergePolicy) w.getMergePolicy()).setUseCompoundFile(true);
|
||||
IndexWriter w = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(true))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(newField("field", "go", Field.Store.NO, Field.Index.ANALYZED));
|
||||
w.addDocument(doc);
|
||||
|
@ -2965,7 +2880,7 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).setRAMBufferSizeMB(0.01));
|
||||
((LogMergePolicy) indexWriter.getMergePolicy()).setUseCompoundFile(false);
|
||||
((LogMergePolicy) indexWriter.getConfig().getMergePolicy()).setUseCompoundFile(false);
|
||||
|
||||
String BIG="alskjhlaksjghlaksjfhalksvjepgjioefgjnsdfjgefgjhelkgjhqewlrkhgwlekgrhwelkgjhwelkgrhwlkejg";
|
||||
BIG=BIG+BIG+BIG+BIG;
|
||||
|
|
|
@ -249,52 +249,4 @@ public class TestIndexWriterConfig extends LuceneTestCase {
|
|||
conf.setMergePolicy(null);
|
||||
assertEquals(LogByteSizeMergePolicy.class, conf.getMergePolicy().getClass());
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated should be removed once all the deprecated setters are removed
|
||||
* from IndexWriter.
|
||||
*/
|
||||
@Test @Deprecated
|
||||
public void testIndexWriterSetters() throws Exception {
|
||||
// This test intentionally tests deprecated methods. The purpose is to pass
|
||||
// whatever the user set on IW to IWC, so that if the user calls
|
||||
// iw.getConfig().getXYZ(), he'll get the same value he passed to
|
||||
// iw.setXYZ().
|
||||
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer());
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(dir, conf);
|
||||
|
||||
writer.setSimilarity(new MySimilarity());
|
||||
assertEquals(MySimilarity.class, writer.getConfig().getSimilarity().getClass());
|
||||
|
||||
writer.setMaxBufferedDeleteTerms(4);
|
||||
assertEquals(4, writer.getConfig().getMaxBufferedDeleteTerms());
|
||||
|
||||
writer.setMaxBufferedDocs(10);
|
||||
assertEquals(10, writer.getConfig().getMaxBufferedDocs());
|
||||
|
||||
writer.setMaxFieldLength(10);
|
||||
assertEquals(10, writer.getConfig().getMaxFieldLength());
|
||||
|
||||
writer.setMergeScheduler(new SerialMergeScheduler());
|
||||
assertEquals(SerialMergeScheduler.class, writer.getConfig().getMergeScheduler().getClass());
|
||||
|
||||
writer.setRAMBufferSizeMB(1.5);
|
||||
assertEquals(1.5, writer.getConfig().getRAMBufferSizeMB(), 0.0);
|
||||
|
||||
writer.setTermIndexInterval(40);
|
||||
assertEquals(40, writer.getConfig().getTermIndexInterval());
|
||||
|
||||
writer.setWriteLockTimeout(100);
|
||||
assertEquals(100, writer.getConfig().getWriteLockTimeout());
|
||||
|
||||
writer.setMergedSegmentWarmer(new MyWarmer());
|
||||
assertEquals(MyWarmer.class, writer.getConfig().getMergedSegmentWarmer().getClass());
|
||||
|
||||
writer.setMergePolicy(new LogDocMergePolicy());
|
||||
assertEquals(LogDocMergePolicy.class, writer.getConfig().getMergePolicy().getClass());
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -794,7 +794,7 @@ public class TestIndexWriterDelete extends LuceneTestCase {
|
|||
|
||||
public void testDeleteNullQuery() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter modifier = new IndexWriter(dir, new MockAnalyzer(MockTokenizer.WHITESPACE, false), IndexWriter.MaxFieldLength.UNLIMITED);
|
||||
IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(MockTokenizer.WHITESPACE, false)));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
addDoc(modifier, i, 2*i);
|
||||
|
|
|
@ -612,8 +612,12 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
MockDirectoryWrapper dir = newDirectory();
|
||||
|
||||
{
|
||||
final IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, analyzer).setMaxBufferedDocs(-1));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setMergeFactor(10);
|
||||
final IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).
|
||||
setMaxBufferedDocs(-1).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
final int finalI = i;
|
||||
|
||||
Thread[] threads = new Thread[NUM_THREAD];
|
||||
|
@ -740,10 +744,14 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
FailOnlyInSync failure = new FailOnlyInSync();
|
||||
dir.failOn(failure);
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(2).setMergeScheduler(new ConcurrentMergeScheduler()));
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(2).
|
||||
setMergeScheduler(new ConcurrentMergeScheduler()).
|
||||
setMergePolicy(newLogMergePolicy(5))
|
||||
);
|
||||
failure.setDoFail();
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setMergeFactor(5);
|
||||
|
||||
for (int i = 0; i < 23; i++) {
|
||||
addDoc(writer);
|
||||
|
@ -1005,9 +1013,12 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
|
|||
|
||||
IndexWriter writer = null;
|
||||
|
||||
writer = new IndexWriter(dir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer()));
|
||||
((LogMergePolicy) writer.getMergePolicy()).setUseCompoundFile(true);
|
||||
((LogMergePolicy) writer.getMergePolicy()).setNoCFSRatio(1.0);
|
||||
writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMergePolicy(newLogMergePolicy(true))
|
||||
);
|
||||
((LogMergePolicy) writer.getConfig().getMergePolicy()).setNoCFSRatio(1.0);
|
||||
|
||||
// add 100 documents
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
|
|
@ -100,9 +100,12 @@ public class TestIndexWriterMergePolicy extends LuceneTestCase {
|
|||
public void testMergeFactorChange() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(
|
||||
TEST_VERSION_CURRENT, new MockAnalyzer())
|
||||
.setMaxBufferedDocs(10).setMergePolicy(new LogDocMergePolicy()));
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer()).
|
||||
setMaxBufferedDocs(10).
|
||||
setMergePolicy(newLogMergePolicy())
|
||||
);
|
||||
|
||||
for (int i = 0; i < 250; i++) {
|
||||
addDoc(writer);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue