LUCENE-5900: More Version cleanup

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1620509 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Ryan Ernst 2014-08-26 05:19:34 +00:00
parent 751bc33ea9
commit f566386416
39 changed files with 140 additions and 126 deletions

View File

@ -105,6 +105,12 @@ New Features
PushPostingsWriterBase for single-pass push of docs/positions to the
postings format. (Mike McCandless)
API Changes:
* LUCENE-5900: Deprecated more constructors taking Version in *InfixSuggester and
ICUCollationKeyAnalyzer, and removed TEST_VERSION_CURRENT from the test framework.
(Ryan Ernst)
Bug Fixes
* LUCENE-5650: Enforce read-only access to any path outside the temporary

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ClasspathResourceLoader;
import org.apache.lucene.analysis.util.ResourceLoader;
import org.apache.lucene.util.Version;
import java.io.StringReader;
@ -38,7 +39,7 @@ public class TestCommonGramsFilterFactory extends BaseTokenStreamFactoryTestCase
public void testInform() throws Exception {
ResourceLoader loader = new ClasspathResourceLoader(TestStopFilter.class);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsFilterFactory factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", TEST_VERSION_CURRENT, loader,
CommonGramsFilterFactory factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", Version.LATEST, loader,
"words", "stop-1.txt",
"ignoreCase", "true");
CharArraySet words = factory.getCommonWords();
@ -48,7 +49,7 @@ public class TestCommonGramsFilterFactory extends BaseTokenStreamFactoryTestCase
assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory
.isIgnoreCase() == true);
factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", TEST_VERSION_CURRENT, loader,
factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", Version.LATEST, loader,
"words", "stop-1.txt, stop-2.txt",
"ignoreCase", "true");
words = factory.getCommonWords();
@ -58,7 +59,7 @@ public class TestCommonGramsFilterFactory extends BaseTokenStreamFactoryTestCase
assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory
.isIgnoreCase() == true);
factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", TEST_VERSION_CURRENT, loader,
factory = (CommonGramsFilterFactory) tokenFilterFactory("CommonGrams", Version.LATEST, loader,
"words", "stop-snowball.txt",
"format", "snowball",
"ignoreCase", "true");

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.ClasspathResourceLoader;
import org.apache.lucene.analysis.util.ResourceLoader;
import org.apache.lucene.util.Version;
import java.io.StringReader;
@ -38,7 +39,7 @@ public class TestCommonGramsQueryFilterFactory extends BaseTokenStreamFactoryTes
public void testInform() throws Exception {
ResourceLoader loader = new ClasspathResourceLoader(TestStopFilter.class);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsQueryFilterFactory factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", TEST_VERSION_CURRENT, loader,
CommonGramsQueryFilterFactory factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", Version.LATEST, loader,
"words", "stop-1.txt",
"ignoreCase", "true");
CharArraySet words = factory.getCommonWords();
@ -48,7 +49,7 @@ public class TestCommonGramsQueryFilterFactory extends BaseTokenStreamFactoryTes
assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory
.isIgnoreCase() == true);
factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", TEST_VERSION_CURRENT, loader,
factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", Version.LATEST, loader,
"words", "stop-1.txt, stop-2.txt",
"ignoreCase", "true");
words = factory.getCommonWords();
@ -58,7 +59,7 @@ public class TestCommonGramsQueryFilterFactory extends BaseTokenStreamFactoryTes
assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory
.isIgnoreCase() == true);
factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", TEST_VERSION_CURRENT, loader,
factory = (CommonGramsQueryFilterFactory) tokenFilterFactory("CommonGramsQuery", Version.LATEST, loader,
"words", "stop-snowball.txt",
"format", "snowball",
"ignoreCase", "true");

View File

@ -54,6 +54,7 @@ import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
/**
* Tests that any newly added Tokenizers/TokenFilters/CharFilters have a
@ -121,7 +122,7 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
}
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("luceneMatchVersion", Version.LATEST.toString());
if (Tokenizer.class.isAssignableFrom(c)) {
String clazzName = c.getSimpleName();

View File

@ -36,6 +36,7 @@ import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.Version;
/**
* Sanity check some things about all factories,
@ -123,7 +124,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
/** tries to initialize a factory with no arguments */
private AbstractAnalysisFactory initialize(Class<? extends AbstractAnalysisFactory> factoryClazz) throws IOException {
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("luceneMatchVersion", Version.LATEST.toString());
Constructor<? extends AbstractAnalysisFactory> ctor;
try {
ctor = factoryClazz.getConstructor(Map.class);

View File

@ -74,7 +74,7 @@ public class TestKeywordAnalyzer extends BaseTokenStreamTestCase {
PerFieldAnalyzerWrapper analyzer = new PerFieldAnalyzerWrapper(new SimpleAnalyzer());
analyzer.addAnalyzer("partnum", new KeywordAnalyzer());
QueryParser queryParser = new QueryParser(TEST_VERSION_CURRENT, "description", analyzer);
QueryParser queryParser = new QueryParser("description", analyzer);
Query query = queryParser.parse("partnum:Q36 AND SPACE");
ScoreDoc[] hits = searcher.search(query, null, 1000).scoreDocs;

View File

@ -352,7 +352,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
put(Version.class, new ArgProducer() {
@Override public Object create(Random random) {
// we expect bugs in emulating old versions
return TEST_VERSION_CURRENT;
return Version.LATEST;
}
});
put(AttributeFactory.class, new ArgProducer() {

View File

@ -23,6 +23,7 @@ import java.io.StringReader;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.util.Version;
/**
* Simple tests to ensure the keyword marker filter factory is working.
@ -32,7 +33,7 @@ public class TestKeywordMarkerFilterFactory extends BaseTokenStreamFactoryTestCa
public void testKeywords() throws Exception {
Reader reader = new StringReader("dogs cats");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("KeywordMarker", Version.LATEST,
new StringMockResourceLoader("cats"),
"protected", "protwords.txt").create(stream);
stream = tokenFilterFactory("PorterStem").create(stream);
@ -51,8 +52,7 @@ public class TestKeywordMarkerFilterFactory extends BaseTokenStreamFactoryTestCa
public void testKeywordsMixed() throws Exception {
Reader reader = new StringReader("dogs cats birds");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
new StringMockResourceLoader("cats"),
stream = tokenFilterFactory("KeywordMarker", Version.LATEST, new StringMockResourceLoader("cats"),
"protected", "protwords.txt",
"pattern", "birds|Dogs").create(stream);
stream = tokenFilterFactory("PorterStem").create(stream);
@ -62,8 +62,7 @@ public class TestKeywordMarkerFilterFactory extends BaseTokenStreamFactoryTestCa
public void testKeywordsCaseInsensitive() throws Exception {
Reader reader = new StringReader("dogs cats Cats");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
new StringMockResourceLoader("cats"),
stream = tokenFilterFactory("KeywordMarker", Version.LATEST, new StringMockResourceLoader("cats"),
"protected", "protwords.txt",
"ignoreCase", "true").create(stream);
stream = tokenFilterFactory("PorterStem").create(stream);
@ -83,7 +82,7 @@ public class TestKeywordMarkerFilterFactory extends BaseTokenStreamFactoryTestCa
public void testKeywordsCaseInsensitiveMixed() throws Exception {
Reader reader = new StringReader("dogs cats Cats Birds birds");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("KeywordMarker", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("KeywordMarker", Version.LATEST,
new StringMockResourceLoader("cats"),
"protected", "protwords.txt",
"pattern", "birds",

View File

@ -23,6 +23,7 @@ import java.io.StringReader;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.util.Version;
/**
* Simple tests to ensure the stemmer override filter factory is working.
@ -32,7 +33,7 @@ public class TestStemmerOverrideFilterFactory extends BaseTokenStreamFactoryTest
// our stemdict stems dogs to 'cat'
Reader reader = new StringReader("testing dogs");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("StemmerOverride", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("StemmerOverride", Version.LATEST,
new StringMockResourceLoader("dogs\tcat"),
"dictionary", "stemdict.txt").create(stream);
stream = tokenFilterFactory("PorterStem").create(stream);
@ -43,7 +44,7 @@ public class TestStemmerOverrideFilterFactory extends BaseTokenStreamFactoryTest
public void testKeywordsCaseInsensitive() throws Exception {
Reader reader = new StringReader("testing DoGs");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("StemmerOverride", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("StemmerOverride", Version.LATEST,
new StringMockResourceLoader("dogs\tcat"),
"dictionary", "stemdict.txt",
"ignoreCase", "true").create(stream);

View File

@ -20,6 +20,7 @@ import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.util.Version;
import org.tartarus.snowball.ext.EnglishStemmer;
import java.io.Reader;
@ -50,7 +51,7 @@ public class TestSnowballPorterFilterFactory extends BaseTokenStreamFactoryTestC
public void testProtected() throws Exception {
Reader reader = new StringReader("ridding of some stemming");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("SnowballPorter", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("SnowballPorter", Version.LATEST,
new StringMockResourceLoader("ridding"),
"protected", "protwords.txt",
"language", "English").create(stream);

View File

@ -20,6 +20,7 @@ package org.apache.lucene.analysis.synonym;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.util.Version;
import java.io.Reader;
import java.io.StringReader;
@ -32,7 +33,7 @@ public class TestMultiWordSynonyms extends BaseTokenStreamFactoryTestCase {
public void testMultiWordSynonyms() throws Exception {
Reader reader = new StringReader("a e");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("Synonym", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("Synonym", Version.LATEST,
new StringMockResourceLoader("a b c,d"),
"synonyms", "synonyms.txt").create(stream);
// This fails because ["e","e"] is the value of the token stream

View File

@ -25,6 +25,7 @@ import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.StringMockResourceLoader;
import org.apache.lucene.analysis.cjk.CJKAnalyzer;
import org.apache.lucene.util.Version;
public class TestSynonymFilterFactory extends BaseTokenStreamFactoryTestCase {
@ -59,7 +60,7 @@ public class TestSynonymFilterFactory extends BaseTokenStreamFactoryTestCase {
public void testEmptySynonyms() throws Exception {
Reader reader = new StringReader("GB");
TokenStream stream = whitespaceMockTokenizer(reader);
stream = tokenFilterFactory("Synonym", TEST_VERSION_CURRENT,
stream = tokenFilterFactory("Synonym", Version.LATEST,
new StringMockResourceLoader(""), // empty file!
"synonyms", "synonyms.txt").create(stream);
assertTokenStreamContents(stream, new String[] { "GB" });

View File

@ -79,7 +79,7 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
* be on the test classpath.
*/
protected TokenizerFactory tokenizerFactory(String name, String... keysAndValues) throws Exception {
return tokenizerFactory(name, TEST_VERSION_CURRENT, keysAndValues);
return tokenizerFactory(name, Version.LATEST, keysAndValues);
}
/**
@ -114,7 +114,7 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
* be on the test classpath.
*/
protected TokenFilterFactory tokenFilterFactory(String name, String... keysAndValues) throws Exception {
return tokenFilterFactory(name, TEST_VERSION_CURRENT, keysAndValues);
return tokenFilterFactory(name, Version.LATEST, keysAndValues);
}
/**
@ -131,7 +131,7 @@ public abstract class BaseTokenStreamFactoryTestCase extends BaseTokenStreamTest
* be on the test classpath.
*/
protected CharFilterFactory charFilterFactory(String name, String... keysAndValues) throws Exception {
return charFilterFactory(name, TEST_VERSION_CURRENT, new ClasspathResourceLoader(getClass()), keysAndValues);
return charFilterFactory(name, Version.LATEST, new ClasspathResourceLoader(getClass()), keysAndValues);
}
/**

View File

@ -25,12 +25,13 @@ import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory;
import org.apache.lucene.analysis.miscellaneous.RemoveDuplicatesTokenFilterFactory;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
public class TestAnalysisSPILoader extends LuceneTestCase {
private Map<String,String> versionArgOnly() {
return new HashMap<String,String>() {{
put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
put("luceneMatchVersion", Version.LATEST.toString());
}};
}

View File

@ -71,10 +71,17 @@ public final class ICUCollationKeyAnalyzer extends Analyzer {
/**
* Create a new ICUCollationKeyAnalyzer, using the specified collator.
*
* @param matchVersion compatibility version
*
* @param collator CollationKey generator
*/
public ICUCollationKeyAnalyzer(Collator collator) {
this.factory = new ICUCollationAttributeFactory(collator);
}
/**
* @deprecated Use {@link #ICUCollationKeyAnalyzer(Collator)}
*/
@Deprecated
public ICUCollationKeyAnalyzer(Version matchVersion, Collator collator) {
this.factory = new ICUCollationAttributeFactory(collator);
}

View File

@ -14,7 +14,6 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- :Post-Release-Update-Version.LUCENE_XY: - several mentions in this file -->
<html>
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
@ -115,9 +114,9 @@ algorithm.
<h3>Farsi Range Queries</h3>
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("ar"));
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, collator);
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628",
Field.Store.YES, Field.Index.ANALYZED));
@ -125,7 +124,7 @@ algorithm.
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser aqp = new QueryParser(Version.LUCENE_5_0, "content", analyzer);
QueryParser aqp = new QueryParser("content", analyzer);
aqp.setAnalyzeRangeTerms(true);
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -141,9 +140,9 @@ algorithm.
<h3>Danish Sorting</h3>
<pre class="prettyprint">
Analyzer analyzer
= new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, Collator.getInstance(new ULocale("da", "dk")));
= new ICUCollationKeyAnalyzer(Collator.getInstance(new ULocale("da", "dk")));
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(analyzer));
String[] tracer = new String[] { "A", "B", "C", "D", "E" };
String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
@ -169,15 +168,15 @@ algorithm.
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("tr", "TR"));
collator.setStrength(Collator.PRIMARY);
Analyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, collator);
Analyzer analyzer = new ICUCollationKeyAnalyzer(collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new Field("contents", "DIGY", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser parser = new QueryParser(Version.LUCENE_5_0, "contents", analyzer);
QueryParser parser = new QueryParser("contents", analyzer);
Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);

View File

@ -29,7 +29,7 @@ import java.util.Locale;
public class TestICUCollationKeyAnalyzer extends CollationTestBase {
private Collator collator = Collator.getInstance(new Locale("fa"));
private Analyzer analyzer = new ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, collator);
private Analyzer analyzer = new ICUCollationKeyAnalyzer(collator);
private BytesRef firstRangeBeginning = new BytesRef
(collator.getCollationKey(firstRangeBeginningOriginal).toByteArray());
@ -62,7 +62,7 @@ public class TestICUCollationKeyAnalyzer extends CollationTestBase {
Locale locale = Locale.GERMAN;
Collator collator = Collator.getInstance(locale);
collator.setStrength(Collator.IDENTICAL);
assertThreadSafe(new ICUCollationKeyAnalyzer(TEST_VERSION_CURRENT, collator));
assertThreadSafe(new ICUCollationKeyAnalyzer(collator));
}
}
}

View File

@ -25,6 +25,7 @@ import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.util.Version;
/**
* Simple tests for {@link JapanesePartOfSpeechStopFilterFactory}
@ -40,7 +41,7 @@ public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenStreamTe
TokenStream ts = tokenizerFactory.create();
((Tokenizer)ts).setReader(new StringReader("私は制限スピードを超える。"));
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("luceneMatchVersion", Version.LATEST.toString());
args.put("tags", "stoptags.txt");
JapanesePartOfSpeechStopFilterFactory factory = new JapanesePartOfSpeechStopFilterFactory(args);
factory.inform(new StringMockResourceLoader(tags));
@ -54,7 +55,7 @@ public class TestJapanesePartOfSpeechStopFilterFactory extends BaseTokenStreamTe
public void testBogusArguments() throws Exception {
try {
new JapanesePartOfSpeechStopFilterFactory(new HashMap<String,String>() {{
put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
put("luceneMatchVersion", Version.LATEST.toString());
put("bogusArg", "bogusValue");
}});
fail();

View File

@ -58,7 +58,7 @@ import java.util.regex.Pattern;
* <li>zero or more TokenFilterFactory's</li>
* </ol>
*
* Each component analysis factory map specify <tt>luceneMatchVersion</tt> (defaults to
* Each component analysis factory may specify <tt>luceneMatchVersion</tt> (defaults to
* {@link Version#LATEST}) and any of the args understood by the specified
* *Factory class, in the above-describe param format.
* <p/>

View File

@ -30,6 +30,7 @@ import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.junit.BeforeClass;
/** Tests the functionality of {@link AddIndexesTask}. */
@ -57,7 +58,7 @@ public class AddIndexesTaskTest extends BenchmarkTestCase {
private PerfRunData createPerfRunData() throws Exception {
Properties props = new Properties();
props.setProperty("writer.version", TEST_VERSION_CURRENT.toString());
props.setProperty("writer.version", Version.LATEST.toString());
props.setProperty("print.props", "false"); // don't print anything
props.setProperty("directory", "RAMDirectory");
props.setProperty(AddIndexesTask.ADDINDEXES_INPUT_DIR, inputDir.getAbsolutePath());

View File

@ -23,13 +23,14 @@ import org.apache.lucene.benchmark.BenchmarkTestCase;
import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.util.Version;
/** Tests the functionality of {@link CreateIndexTask}. */
public class CommitIndexTaskTest extends BenchmarkTestCase {
private PerfRunData createPerfRunData() throws Exception {
Properties props = new Properties();
props.setProperty("writer.version", TEST_VERSION_CURRENT.toString());
props.setProperty("writer.version", Version.LATEST.toString());
props.setProperty("print.props", "false"); // don't print anything
props.setProperty("directory", "RAMDirectory");
Config config = new Config(props);

View File

@ -37,8 +37,7 @@ public class CreateIndexTaskTest extends BenchmarkTestCase {
private PerfRunData createPerfRunData(String infoStreamValue) throws Exception {
Properties props = new Properties();
// :Post-Release-Update-Version.LUCENE_XY:
props.setProperty("writer.version", Version.LUCENE_5_0_0.toString());
props.setProperty("writer.version", Version.LATEST.toString());
props.setProperty("print.props", "false"); // don't print anything
props.setProperty("directory", "RAMDirectory");
if (infoStreamValue != null) {

View File

@ -68,7 +68,6 @@ public class DatasetSplitter {
Analyzer analyzer, String... fieldNames) throws IOException {
// create IWs for train / test / cv IDXs
// :Post-Release-Update-Version.LUCENE_XY:
IndexWriter testWriter = new IndexWriter(testIndex, new IndexWriterConfig(analyzer));
IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(analyzer));
IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(analyzer));

View File

@ -31,7 +31,6 @@ import org.apache.lucene.util.IOUtils;
// - let subclass dictate policy...?
// - rename to MergeCacheingDir? NRTCachingDir
// :Post-Release-Update-Version.LUCENE_X_Y: (in <pre> block in javadoc below)
/**
* Wraps a {@link RAMDirectory}
* around any provided delegate directory, to
@ -52,7 +51,7 @@ import org.apache.lucene.util.IOUtils;
* <pre class="prettyprint">
* Directory fsDir = FSDirectory.open(new File("/path/to/index"));
* NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_5_0_0, analyzer);
* IndexWriterConfig conf = new IndexWriterConfig(analyzer);
* IndexWriter writer = new IndexWriter(cachedFSDir, conf);
* </pre>
*

View File

@ -489,7 +489,7 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// LUCENE-2593
public void testCorruptionAfterDiskFullDuringMerge() throws IOException {
MockDirectoryWrapper dir = newMockDirectory();
//IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).setReaderPooling(true));
//IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)).setReaderPooling(true));
IndexWriter w = new IndexWriter(
dir,
newIndexWriterConfig(new MockAnalyzer(random()))

View File

@ -30,7 +30,6 @@ import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import java.io.BufferedReader;
import java.io.File;
@ -87,7 +86,6 @@ public class IndexFiles {
System.out.println("Indexing to directory '" + indexPath + "'...");
Directory dir = FSDirectory.open(new File(indexPath));
// :Post-Release-Update-Version.LUCENE_XY:
Analyzer analyzer = new StandardAnalyzer();
IndexWriterConfig iwc = new IndexWriterConfig(analyzer);

View File

@ -1,33 +0,0 @@
package org.apache.lucene.demo.facet;
import org.apache.lucene.util.Version;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Hold various constants used by facet examples.
*
* @lucene.experimental
*/
public interface FacetExamples {
// :Post-Release-Update-Version.LUCENE_XY:
/** The Lucene {@link Version} used by the example code. */
public static final Version EXAMPLES_VER = Version.LUCENE_5_0_0;
}

View File

@ -282,7 +282,6 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// TODO: should we use a more optimized Codec, e.g. Pulsing (or write custom)?
// The taxonomy has a unique structure, where each term is associated with one document
// :Post-Release-Update-Version.LUCENE_XY:
// Make sure we use a MergePolicy which always merges adjacent segments and thus
// keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
return new IndexWriterConfig(null).setOpenMode(openMode).setMergePolicy(

View File

@ -23,6 +23,7 @@ import org.apache.lucene.document.Field;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.Version;
public class TestMultiPassIndexSplitter extends LuceneTestCase {
IndexReader input;
@ -65,7 +66,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
newDirectory(),
newDirectory()
};
splitter.split(TEST_VERSION_CURRENT, input, dirs, false);
splitter.split(Version.LATEST, input, dirs, false);
IndexReader ir;
ir = DirectoryReader.open(dirs[0]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1); // rounding error
@ -110,7 +111,7 @@ public class TestMultiPassIndexSplitter extends LuceneTestCase {
newDirectory(),
newDirectory()
};
splitter.split(TEST_VERSION_CURRENT, input, dirs, true);
splitter.split(Version.LATEST, input, dirs, true);
IndexReader ir;
ir = DirectoryReader.open(dirs[0]);
assertTrue(ir.numDocs() - NUM_DOCS / 3 <= 1);

View File

@ -1048,7 +1048,7 @@ public class TestQPHelper extends LuceneTestCase {
// Todo: Convert from DateField to DateUtil
// public void testLocalDateFormat() throws IOException, QueryNodeException {
// Directory ramDir = newDirectory();
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
// iw.close();

View File

@ -941,7 +941,7 @@ public abstract class QueryParserTestBase extends LuceneTestCase {
// Todo: convert this from DateField to DateUtil
// public void testLocalDateFormat() throws IOException, ParseException {
// Directory ramDir = newDirectory();
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig( TEST_VERSION_CURRENT, new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(new MockAnalyzer(random, MockTokenizer.WHITESPACE, false)));
// addDateDoc("a", 2005, 12, 2, 10, 15, 33, iw);
// addDateDoc("b", 2005, 12, 4, 22, 15, 00, iw);
// iw.close();

View File

@ -146,10 +146,19 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
* private to the infix suggester (i.e., not an external
* Lucene index). Note that {@link #close}
* will also close the provided directory. */
public AnalyzingInfixSuggester(Directory dir, Analyzer analyzer) throws IOException {
this(dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS, false);
}
/**
* @deprecated Use {@link #AnalyzingInfixSuggester(Directory, Analyzer)}
*/
@Deprecated
public AnalyzingInfixSuggester(Version matchVersion, Directory dir, Analyzer analyzer) throws IOException {
this(matchVersion, dir, analyzer, analyzer, DEFAULT_MIN_PREFIX_CHARS, false);
}
/** Create a new instance, loading from a previously built
* AnalyzingInfixSuggester directory, if it exists. This directory must be
* private to the infix suggester (i.e., not an external
@ -165,6 +174,15 @@ public class AnalyzingInfixSuggester extends Lookup implements Closeable {
* @param commitOnBuild Call commit after the index has finished building. This would persist the
* suggester index to disk and future instances of this suggester can use this pre-built dictionary.
*/
public AnalyzingInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars,
boolean commitOnBuild) throws IOException {
this(indexAnalyzer.getVersion(), dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild);
}
/**
* @deprecated Use {@link #AnalyzingInfixSuggester(Directory, Analyzer, Analyzer, int, boolean)}
*/
@Deprecated
public AnalyzingInfixSuggester(Version matchVersion, Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer, int minPrefixChars,
boolean commitOnBuild) throws IOException {

View File

@ -94,6 +94,14 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
* Create a new instance, loading from a previously built
* directory, if it exists.
*/
public BlendedInfixSuggester(Directory dir, Analyzer analyzer) throws IOException {
this(analyzer.getVersion(), dir, analyzer);
}
/**
* @deprecated Use {@link #BlendedInfixSuggester(Directory, Analyzer)}
*/
@Deprecated
public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer analyzer) throws IOException {
super(matchVersion, dir, analyzer);
this.blenderType = BlenderType.POSITION_LINEAR;
@ -110,6 +118,15 @@ public class BlendedInfixSuggester extends AnalyzingInfixSuggester {
* suggester index to disk and future instances of this suggester can use this pre-built dictionary.
* @throws IOException If there are problems opening the underlying Lucene index.
*/
public BlendedInfixSuggester(Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer,
int minPrefixChars, BlenderType blenderType, int numFactor, boolean commitOnBuild) throws IOException {
this(indexAnalyzer.getVersion(), dir, indexAnalyzer, queryAnalyzer, minPrefixChars, blenderType, numFactor, commitOnBuild);
}
/**
* @deprecated Use {@link #BlendedInfixSuggester(Directory, Analyzer, Analyzer, int, BlendedInfixSuggester.BlenderType, int, boolean)}
*/
@Deprecated
public BlendedInfixSuggester(Version matchVersion, Directory dir, Analyzer indexAnalyzer, Analyzer queryAnalyzer,
int minPrefixChars, BlenderType blenderType, int numFactor, boolean commitOnBuild) throws IOException {
super(matchVersion, dir, indexAnalyzer, queryAnalyzer, minPrefixChars, commitOnBuild);

View File

@ -163,7 +163,7 @@ public class LookupBenchmarkTest extends LuceneTestCase {
} catch (InstantiationException e) {
Analyzer a = new MockAnalyzer(random, MockTokenizer.KEYWORD, false);
if (cls == AnalyzingInfixSuggester.class) {
lookup = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, FSDirectory.open(createTempDir("LookupBenchmarkTest")), a);
lookup = new AnalyzingInfixSuggester(FSDirectory.open(createTempDir("LookupBenchmarkTest")), a);
} else {
Constructor<? extends Lookup> ctor = cls.getConstructor(Analyzer.class);
lookup = ctor.newInstance(a);

View File

@ -51,7 +51,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true);
@ -94,12 +94,12 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
File tempDir = createTempDir("AnalyzingInfixSuggesterTest");
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
assertEquals(2, suggester.getCount());
suggester.close();
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true);
assertEquals(2, results.size());
assertEquals("a penny saved is a penny <b>ear</b>ned", results.get(0).key);
@ -138,7 +138,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false) {
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) {
@Override
protected Object highlight(String text, Set<String> matchedTokens, String prefixToken) throws IOException {
try (TokenStream ts = queryAnalyzer.tokenStream("text", new StringReader(text))) {
@ -214,7 +214,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
int minPrefixLength = random().nextInt(10);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, minPrefixLength, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false);
suggester.build(new InputArrayIterator(keys));
for(int i=0;i<2;i++) {
@ -271,7 +271,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
// Make sure things still work after close and reopen:
suggester.close();
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, minPrefixLength, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixLength, false);
}
suggester.close();
}
@ -282,7 +282,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true);
assertEquals(1, results.size());
@ -296,7 +296,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, true);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("penn", random()), 10, true, true);
assertEquals(1, results.size());
@ -305,7 +305,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
// Try again, but overriding addPrefixMatch to highlight
// the entire hit:
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false) {
suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false) {
@Override
protected void addPrefixMatch(StringBuilder sb, String surface, String analyzed, String prefixToken) {
sb.append("<b>");
@ -326,7 +326,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
suggester.close();
suggester.close();
@ -352,7 +352,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
}
};
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), indexAnalyzer, queryAnalyzer, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), indexAnalyzer, queryAnalyzer, 3, false);
Input keys[] = new Input[] {
new Input("a bob for apples", 10, new BytesRef("foobaz")),
@ -367,7 +367,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
public void testEmptyAtStart() throws Exception {
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(new Input[0]));
suggester.add(new BytesRef("a penny saved is a penny earned"), null, 10, new BytesRef("foobaz"));
suggester.add(new BytesRef("lend me your ear"), null, 8, new BytesRef("foobar"));
@ -405,7 +405,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
public void testBothExactAndPrefix() throws Exception {
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(new Input[0]));
suggester.add(new BytesRef("the pen is pretty"), null, 10, new BytesRef("foobaz"));
suggester.refresh();
@ -478,7 +478,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
System.out.println(" minPrefixChars=" + minPrefixChars);
}
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, minPrefixChars, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixChars, false);
// Initial suggester built with nothing:
suggester.build(new InputArrayIterator(new Input[0]));
@ -558,7 +558,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
}
lookupThread.finish();
suggester.close();
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, minPrefixChars, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, minPrefixChars, false);
lookupThread = new LookupThread(suggester);
lookupThread.start();
@ -730,7 +730,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newDirectory(), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newDirectory(), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
List<LookupResult> results = suggester.lookup(TestUtil.stringToCharSequence("ear", random()), 10, true, true);
@ -794,7 +794,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
String[] keys = new String[] {"python", "java", "c", "scala", "ruby", "clojure", "erlang", "go", "swift", "lisp"};
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
File tempDir = createTempDir("AIS_NRT_PERSIST_TEST");
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
AnalyzingInfixSuggester suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
Thread[] multiAddThreads = new Thread[10];
try {
suggester.refresh();
@ -822,7 +822,7 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
suggester.commit();
suggester.close();
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
results = suggester.lookup(TestUtil.stringToCharSequence("python", random()), 10, true, false);
assertEquals(1, results.size());
assertEquals("python", results.get(0).key);
@ -871,11 +871,11 @@ public class AnalyzingInfixSuggesterTest extends LuceneTestCase {
AnalyzingInfixSuggester suggester;
Analyzer a = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
if (iter == 0) {
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
suggester.build(new InputArrayIterator(keys));
} else {
// Test again, after close/reopen:
suggester = new AnalyzingInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a, 3, false);
suggester = new AnalyzingInfixSuggester(newFSDirectory(tempDir), a, a, 3, false);
}
// No context provided, all results returned

View File

@ -47,7 +47,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
File tempDir = createTempDir("BlendedInfixSuggesterTest");
Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
BlendedInfixSuggester.BlenderType.POSITION_LINEAR,
BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false);
@ -87,7 +87,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
// BlenderType.LINEAR is used by default (remove position*10%)
BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a);
BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a);
suggester.build(new InputArrayIterator(keys));
assertEquals(w, getInResults(suggester, "top", pl, 1));
@ -97,7 +97,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
suggester.close();
// BlenderType.RECIPROCAL is using 1/(1+p) * w where w is weight and p the position of the word
suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false);
suggester.build(new InputArrayIterator(keys));
@ -129,7 +129,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
// if factor is small, we don't get the expected element
BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 1, false);
@ -149,7 +149,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
suggester.close();
// if we increase the factor we have it
suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL, 2, false);
suggester.build(new InputArrayIterator(keys));
@ -181,7 +181,7 @@ public class BlendedInfixSuggesterTest extends LuceneTestCase {
Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
// if factor is small, we don't get the expected element
BlendedInfixSuggester suggester = new BlendedInfixSuggester(TEST_VERSION_CURRENT, newFSDirectory(tempDir), a, a,
BlendedInfixSuggester suggester = new BlendedInfixSuggester(newFSDirectory(tempDir), a, a,
AnalyzingInfixSuggester.DEFAULT_MIN_PREFIX_CHARS,
BlendedInfixSuggester.BlenderType.POSITION_RECIPROCAL,
BlendedInfixSuggester.DEFAULT_NUM_FACTOR, false);

View File

@ -371,13 +371,6 @@ public abstract class LuceneTestCase extends Assert {
// for all suites ever since.
// -----------------------------------------------------------------
// :Post-Release-Update-Version.LUCENE_XY:
/**
* Use this constant when creating Analyzers and any other version-dependent stuff.
* <p><b>NOTE:</b> Change this when development starts for new Lucene version:
*/
public static final Version TEST_VERSION_CURRENT = Version.LUCENE_5_0_0;
/**
* True if and only if tests are run in verbose mode. If this flag is false
* tests are not expected to print any messages. Enforced with {@link TestRuleLimitSysouts}.

View File

@ -24,6 +24,7 @@ import java.util.Map;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.Version;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilterFactory;
import org.apache.lucene.analysis.util.ResourceLoader;
@ -200,7 +201,7 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 {
String testText = "I borrowed $5,400.00 at 25% interest-rate";
ResourceLoader loader = new SolrResourceLoader("solr/collection1");
Map<String,String> args = new HashMap<>();
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("luceneMatchVersion", Version.LATEST.toString());
args.put("generateWordParts", "1");
args.put("generateNumberParts", "1");
args.put("catenateWords", "1");
@ -224,7 +225,7 @@ public class TestWordDelimiterFilterFactory extends SolrTestCaseJ4 {
/* custom behavior */
args = new HashMap<>();
// use a custom type mapping
args.put("luceneMatchVersion", TEST_VERSION_CURRENT.toString());
args.put("luceneMatchVersion", Version.LATEST.toString());
args.put("generateWordParts", "1");
args.put("generateNumberParts", "1");
args.put("catenateWords", "1");

View File

@ -101,7 +101,7 @@ public class TestStressLucene extends TestRTGBase {
// RAMDirectory dir = new RAMDirectory();
// final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
// final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new WhitespaceAnalyzer()));
Directory dir = newDirectory();