LUCENE-5364: Replace hard-coded Version.LUCENE_XY that doesn't have to be hard-coded (because of back-compat testing or version dependent behavior, or demo code that should exemplify pinning versions in user code), with Version.LUCENE_CURRENT in non-test code, or with LuceneTestCase.TEST_VERSION_CURRENT in test code; upgrade hard-coded Version.LUCENE_XY constants that should track the next release version to the next release version if they aren't already there, and put a token near them so that they can be found and upgraded when the next release version changes: ':Post-Release-Update-Version.LUCENE_XY:'

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1549701 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Steven Rowe 2013-12-09 22:53:38 +00:00
parent 0991a217d1
commit a1461ad9b1
32 changed files with 72 additions and 66 deletions

View File

@ -73,7 +73,7 @@ CharacterEntities = ( "AElig" | "Aacute" | "Acirc" | "Agrave" | "Alpha"
upperCaseVariantsAccepted.put("amp", "AMP");
}
private static final CharArrayMap<Character> entityValues
= new CharArrayMap<Character>(Version.LUCENE_40, 253, false);
= new CharArrayMap<Character>(Version.LUCENE_CURRENT, 253, false);
static {
String[] entities = {
"AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2",

View File

@ -30673,7 +30673,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
upperCaseVariantsAccepted.put("amp", "AMP");
}
private static final CharArrayMap<Character> entityValues
= new CharArrayMap<Character>(Version.LUCENE_40, 253, false);
= new CharArrayMap<Character>(Version.LUCENE_CURRENT, 253, false);
static {
String[] entities = {
"AElig", "\u00C6", "Aacute", "\u00C1", "Acirc", "\u00C2",
@ -30812,7 +30812,7 @@ public final class HTMLStripCharFilter extends BaseCharFilter {
escapeSTYLE = true;
} else {
if (null == this.escapedTags) {
this.escapedTags = new CharArraySet(Version.LUCENE_40, 16, true);
this.escapedTags = new CharArraySet(Version.LUCENE_CURRENT, 16, true);
}
this.escapedTags.add(tag);
}

View File

@ -197,7 +197,7 @@ InlineElment = ( [aAbBiIqQsSuU] |
escapeSTYLE = true;
} else {
if (null == this.escapedTags) {
this.escapedTags = new CharArraySet(Version.LUCENE_40, 16, true);
this.escapedTags = new CharArraySet(Version.LUCENE_CURRENT, 16, true);
}
this.escapedTags.add(tag);
}

View File

@ -61,7 +61,7 @@ def main():
print ' upperCaseVariantsAccepted.put("amp", "AMP");'
print ' }'
print ' private static final CharArrayMap<Character> entityValues'
print ' = new CharArrayMap<Character>(Version.LUCENE_40, %i, false);' % len(keys)
print ' = new CharArrayMap<Character>(Version.LUCENE_CURRENT, %i, false);' % len(keys)
print ' static {'
print ' String[] entities = {'
output_line = ' '

View File

@ -196,7 +196,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc4 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc4 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("θ", "δ", "ελ", "γαλ", "ν", "π", "ιδ", "παρ"),
false);
@ -222,7 +222,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc6 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc6 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("αλ", "αδ", "ενδ", "αμαν", "αμμοχαλ", "ηθ", "ανηθ",
"αντιδ", "φυσ", "βρωμ", "γερ", "εξωδ", "καλπ", "καλλιν", "καταδ",
"μουλ", "μπαν", "μπαγιατ", "μπολ", "μποσ", "νιτ", "ξικ", "συνομηλ",
@ -247,7 +247,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc7 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc7 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("αναπ", "αποθ", "αποκ", "αποστ", "βουβ", "ξεθ", "ουλ",
"πεθ", "πικρ", "ποτ", "σιχ", "χ"),
false);
@ -274,11 +274,11 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc8a = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc8a = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("τρ", "τσ"),
false);
private static final CharArraySet exc8b = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc8b = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("βετερ", "βουλκ", "βραχμ", "γ", "δραδουμ", "θ", "καλπουζ",
"καστελ", "κορμορ", "λαοπλ", "μωαμεθ", "μ", "μουσουλμ", "ν", "ουλ",
"π", "πελεκ", "πλ", "πολισ", "πορτολ", "σαρακατσ", "σουλτ",
@ -337,7 +337,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc9 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc9 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("αβαρ", "βεν", "εναρ", "αβρ", "αδ", "αθ", "αν", "απλ",
"βαρον", "ντρ", "σκ", "κοπ", "μπορ", "νιφ", "παγ", "παρακαλ", "σερπ",
"σκελ", "συρφ", "τοκ", "υ", "δ", "εμ", "θαρρ", "θ"),
@ -425,11 +425,11 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc12a = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc12a = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("π", "απ", "συμπ", "ασυμπ", "ακαταπ", "αμεταμφ"),
false);
private static final CharArraySet exc12b = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc12b = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("αλ", "αρ", "εκτελ", "ζ", "μ", "ξ", "παρακαλ", "αρ", "προ", "νισ"),
false);
@ -449,7 +449,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc13 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc13 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("διαθ", "θ", "παρακαταθ", "προσθ", "συνθ"),
false);
@ -483,7 +483,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc14 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc14 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("φαρμακ", "χαδ", "αγκ", "αναρρ", "βρομ", "εκλιπ", "λαμπιδ",
"λεχ", "μ", "πατ", "ρ", "λ", "μεδ", "μεσαζ", "υποτειν", "αμ", "αιθ",
"ανηκ", "δεσποζ", "ενδιαφερ", "δε", "δευτερευ", "καθαρευ", "πλε",
@ -521,7 +521,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc15a = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc15a = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("αβαστ", "πολυφ", "αδηφ", "παμφ", "ρ", "ασπ", "αφ", "αμαλ",
"αμαλλι", "ανυστ", "απερ", "ασπαρ", "αχαρ", "δερβεν", "δροσοπ",
"ξεφ", "νεοπ", "νομοτ", "ολοπ", "ομοτ", "προστ", "προσωποπ", "συμπ",
@ -530,7 +530,7 @@ public class GreekStemmer {
"ουλαμ", "ουρ", "π", "τρ", "μ"),
false);
private static final CharArraySet exc15b = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc15b = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("ψοφ", "ναυλοχ"),
false);
@ -567,7 +567,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc16 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc16 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("ν", "χερσον", "δωδεκαν", "ερημον", "μεγαλον", "επταν"),
false);
@ -587,7 +587,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc17 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc17 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("ασβ", "σβ", "αχρ", "χρ", "απλ", "αειμν", "δυσχρ", "ευχρ", "κοινοχρ", "παλιμψ"),
false);
@ -601,7 +601,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc18 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc18 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("ν", "ρ", "σπι", "στραβομουτσ", "κακομουτσ", "εξων"),
false);
@ -625,7 +625,7 @@ public class GreekStemmer {
return len;
}
private static final CharArraySet exc19 = new CharArraySet(Version.LUCENE_50,
private static final CharArraySet exc19 = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList("παρασουσ", "φ", "χ", "ωριοπλ", "αζ", "αλλοσουσ", "ασουσ"),
false);

View File

@ -280,10 +280,7 @@ public class KStemmer {
DictEntry defaultEntry;
DictEntry entry;
CharArrayMap<DictEntry> d = new CharArrayMap<DictEntry>(
Version.LUCENE_50, 1000, false);
d = new CharArrayMap<DictEntry>(Version.LUCENE_50, 1000, false);
CharArrayMap<DictEntry> d = new CharArrayMap<DictEntry>(Version.LUCENE_CURRENT, 1000, false);
for (int i = 0; i < exceptionWords.length; i++) {
if (!d.containsKey(exceptionWords[i])) {
entry = new DictEntry(exceptionWords[i], true);

View File

@ -34,7 +34,7 @@ public class HunspellStemmer {
private final int recursionCap;
private final HunspellDictionary dictionary;
private final StringBuilder segment = new StringBuilder();
private CharacterUtils charUtils = CharacterUtils.getInstance(Version.LUCENE_40);
private CharacterUtils charUtils = CharacterUtils.getInstance(Version.LUCENE_CURRENT);
/**
* Constructs a new HunspellStemmer which will use the provided HunspellDictionary to create its stems. Uses the
@ -324,7 +324,8 @@ public class HunspellStemmer {
InputStream affixInputStream = new FileInputStream(args[offset++]);
InputStream dicInputStream = new FileInputStream(args[offset++]);
HunspellDictionary dictionary = new HunspellDictionary(affixInputStream, dicInputStream, Version.LUCENE_40, ignoreCase);
// :Post-Release-Update-Version.LUCENE_XY:
HunspellDictionary dictionary = new HunspellDictionary(affixInputStream, dicInputStream, Version.LUCENE_50, ignoreCase);
affixInputStream.close();
dicInputStream.close();

View File

@ -35,7 +35,7 @@ public final class RemoveDuplicatesTokenFilter extends TokenFilter {
private final PositionIncrementAttribute posIncAttribute = addAttribute(PositionIncrementAttribute.class);
// use a fixed version, as we don't care about case sensitivity.
private final CharArraySet previous = new CharArraySet(Version.LUCENE_50, 8, false);
private final CharArraySet previous = new CharArraySet(Version.LUCENE_CURRENT, 8, false);
/**
* Creates a new RemoveDuplicatesTokenFilter

View File

@ -134,7 +134,7 @@ public abstract class RSLPStemmerBase {
if (!exceptions[i].endsWith(suffix))
throw new RuntimeException("useless exception '" + exceptions[i] + "' does not end with '" + suffix + "'");
}
this.exceptions = new CharArraySet(Version.LUCENE_50,
this.exceptions = new CharArraySet(Version.LUCENE_CURRENT,
Arrays.asList(exceptions), false);
}

View File

@ -133,8 +133,8 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_50, reader) : factory.create(reader);
TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_50, tokenizer) : tokenizer;
Tokenizer tokenizer = factory == null ? new WhitespaceTokenizer(Version.LUCENE_CURRENT, reader) : factory.create(reader);
TokenStream stream = ignoreCase ? new LowerCaseFilter(Version.LUCENE_CURRENT, tokenizer) : tokenizer;
return new TokenStreamComponents(tokenizer, stream);
}
};
@ -201,7 +201,7 @@ public class SynonymFilterFactory extends TokenFilterFactory implements Resource
private Analyzer loadAnalyzer(ResourceLoader loader, String cname) throws IOException {
Class<? extends Analyzer> clazz = loader.findClass(cname, Analyzer.class);
try {
Analyzer analyzer = clazz.getConstructor(Version.class).newInstance(Version.LUCENE_50);
Analyzer analyzer = clazz.getConstructor(Version.class).newInstance(Version.LUCENE_CURRENT);
if (analyzer instanceof ResourceLoaderAware) {
((ResourceLoaderAware) analyzer).inform(loader);
}

View File

@ -60,7 +60,7 @@ public class TestStopAnalyzer extends BaseTokenStreamTestCase {
public void testStopList() throws IOException {
CharArraySet stopWordsSet = new CharArraySet(TEST_VERSION_CURRENT, asSet("good", "test", "analyzer"), false);
StopAnalyzer newStop = new StopAnalyzer(Version.LUCENE_40, stopWordsSet);
StopAnalyzer newStop = new StopAnalyzer(TEST_VERSION_CURRENT, stopWordsSet);
try (TokenStream stream = newStop.tokenStream("test", "This is a good test of the english stop analyzer")) {
assertNotNull(stream);
CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);

View File

@ -94,7 +94,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
// LUCENE-3849: make sure after .end() we see the "ending" posInc
public void testEndStopword() throws Exception {
CharArraySet stopSet = StopFilter.makeStopSet(TEST_VERSION_CURRENT, "of");
StopFilter stpf = new StopFilter(Version.LUCENE_40, new MockTokenizer(new StringReader("test of"), MockTokenizer.WHITESPACE, false), stopSet);
StopFilter stpf = new StopFilter(TEST_VERSION_CURRENT, new MockTokenizer(new StringReader("test of"), MockTokenizer.WHITESPACE, false), stopSet);
assertTokenStreamContents(stpf, new String[] { "test" },
new int[] {0},
new int[] {4},

View File

@ -14,6 +14,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- :Post-Release-Update-Version.LUCENE_XY: - several mentions in this file -->
<html>
<head>
<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
@ -114,9 +115,9 @@ algorithm.
<h3>Farsi Range Queries</h3>
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("ar"));
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_40, collator);
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_50, collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_40, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_50, analyzer));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628",
Field.Store.YES, Field.Index.ANALYZED));
@ -124,7 +125,7 @@ algorithm.
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser aqp = new QueryParser(Version.LUCENE_40, "content", analyzer);
QueryParser aqp = new QueryParser(Version.LUCENE_50, "content", analyzer);
aqp.setAnalyzeRangeTerms(true);
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -140,9 +141,9 @@ algorithm.
<h3>Danish Sorting</h3>
<pre class="prettyprint">
Analyzer analyzer
= new ICUCollationKeyAnalyzer(Version.LUCENE_40, Collator.getInstance(new ULocale("da", "dk")));
= new ICUCollationKeyAnalyzer(Version.LUCENE_50, Collator.getInstance(new ULocale("da", "dk")));
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_40, analyzer));
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_50, analyzer));
String[] tracer = new String[] { "A", "B", "C", "D", "E" };
String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
@ -168,15 +169,15 @@ algorithm.
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("tr", "TR"));
collator.setStrength(Collator.PRIMARY);
Analyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_40, collator);
Analyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_50, collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_40, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_50, analyzer));
Document doc = new Document();
doc.add(new Field("contents", "DIGY", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser parser = new QueryParser(Version.LUCENE_40, "contents", analyzer);
QueryParser parser = new QueryParser(Version.LUCENE_50, "contents", analyzer);
Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);

View File

@ -97,7 +97,8 @@ public class CreateIndexTask extends PerfTask {
}
public static IndexWriterConfig createWriterConfig(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) {
Version version = Version.valueOf(config.get("writer.version", Version.LUCENE_40.toString()));
// :Post-Release-Update-Version.LUCENE_XY:
Version version = Version.valueOf(config.get("writer.version", Version.LUCENE_50.toString()));
IndexWriterConfig iwConf = new IndexWriterConfig(version, runData.getAnalyzer());
iwConf.setOpenMode(mode);
IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config);

View File

@ -37,7 +37,8 @@ public class CreateIndexTaskTest extends BenchmarkTestCase {
private PerfRunData createPerfRunData(String infoStreamValue) throws Exception {
Properties props = new Properties();
props.setProperty("writer.version", Version.LUCENE_40.toString());
// :Post-Release-Update-Version.LUCENE_XY:
props.setProperty("writer.version", Version.LUCENE_50.toString());
props.setProperty("print.props", "false"); // don't print anything
props.setProperty("directory", "RAMDirectory");
if (infoStreamValue != null) {

View File

@ -69,6 +69,7 @@ public class DatasetSplitter {
Analyzer analyzer, String... fieldNames) throws IOException {
// create IWs for train / test / cv IDXs
// :Post-Release-Update-Version.LUCENE_XY:
IndexWriter testWriter = new IndexWriter(testIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));

View File

@ -50,7 +50,7 @@ import org.apache.lucene.util.IOUtils;
* <pre class="prettyprint">
* Directory fsDir = FSDirectory.open(new File("/path/to/index"));
* NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_32, analyzer);
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_50, analyzer);
* IndexWriter writer = new IndexWriter(cachedFSDir, conf);
* </pre>
*

View File

@ -217,7 +217,7 @@ public class TestPhraseQuery extends LuceneTestCase {
Directory directory = newDirectory();
Analyzer stopAnalyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET);
RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
newIndexWriterConfig( Version.LUCENE_40, stopAnalyzer));
newIndexWriterConfig(TEST_VERSION_CURRENT, stopAnalyzer));
Document doc = new Document();
doc.add(newTextField("field", "the stop words are here", Field.Store.YES));
writer.addDocument(doc);

View File

@ -86,8 +86,9 @@ public class IndexFiles {
System.out.println("Indexing to directory '" + indexPath + "'...");
Directory dir = FSDirectory.open(new File(indexPath));
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_40, analyzer);
// :Post-Release-Update-Version.LUCENE_XY:
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_50);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_50, analyzer);
if (create) {
// Create a new index in the directory, removing any

View File

@ -90,7 +90,8 @@ public class SearchFiles {
IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
IndexSearcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
// :Post-Release-Update-Version.LUCENE_XY:
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_50);
BufferedReader in = null;
if (queries != null) {
@ -98,7 +99,8 @@ public class SearchFiles {
} else {
in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
}
QueryParser parser = new QueryParser(Version.LUCENE_40, field, analyzer);
// :Post-Release-Update-Version.LUCENE_XY:
QueryParser parser = new QueryParser(Version.LUCENE_50, field, analyzer);
while (true) {
if (queries == null && queryString == null) { // prompt the user
System.out.println("Enter query: ");

View File

@ -25,7 +25,8 @@ import org.apache.lucene.util.Version;
* @lucene.experimental
*/
public interface FacetExamples {
// :Post-Release-Update-Version.LUCENE_XY:
/** The Lucene {@link Version} used by the example code. */
public static final Version EXAMPLES_VER = Version.LUCENE_50;

View File

@ -133,7 +133,7 @@ public class FormBasedXmlQueryDemo extends HttpServlet {
private void openExampleIndex() throws IOException {
//Create a RAM-based index from our test data file
RAMDirectory rd = new RAMDirectory();
IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_40, analyzer);
IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_CURRENT, analyzer);
IndexWriter writer = new IndexWriter(rd, iwConfig);
InputStream dataIn = getServletContext().getResourceAsStream("/WEB-INF/data.tsv");
BufferedReader br = new BufferedReader(new InputStreamReader(dataIn, IOUtils.CHARSET_UTF_8));

View File

@ -299,7 +299,8 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
protected IndexWriterConfig createIndexWriterConfig(OpenMode openMode) {
// TODO: should we use a more optimized Codec, e.g. Pulsing (or write custom)?
// The taxonomy has a unique structure, where each term is associated with one document
// :Post-Release-Update-Version.LUCENE_XY:
// Make sure we use a MergePolicy which always merges adjacent segments and thus
// keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
return new IndexWriterConfig(Version.LUCENE_50, null).setOpenMode(openMode).setMergePolicy(

View File

@ -65,7 +65,7 @@ public class TestParser extends LuceneTestCase {
BufferedReader d = new BufferedReader(new InputStreamReader(
TestParser.class.getResourceAsStream("reuters21578.txt"), "US-ASCII"));
dir = newDirectory();
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(Version.LUCENE_40, analyzer));
IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
String line = d.readLine();
while (line != null) {
int endOfDate = line.indexOf('\t');

View File

@ -301,7 +301,7 @@ public class FreeTextSuggester extends Lookup {
Directory dir = FSDirectory.open(tempIndexPath);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_46, indexAnalyzer);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_CURRENT, indexAnalyzer);
iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
iwc.setRAMBufferSizeMB(ramBufferSizeMB);
IndexWriter writer = new IndexWriter(dir, iwc);

View File

@ -228,6 +228,7 @@ public abstract class LuceneTestCase extends Assert {
// for all suites ever since.
// -----------------------------------------------------------------
// :Post-Release-Update-Version.LUCENE_XY:
/**
* Use this constant when creating Analyzers and any other version-dependent stuff.
* <p><b>NOTE:</b> Change this when development starts for new Lucene version:

View File

@ -187,8 +187,7 @@ public class ICUCollationField extends FieldType {
rbc.setVariableTop(variableTop);
}
// we use 4.0 because it ensures we just encode the pure byte[] keys.
analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_40, collator);
analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_CURRENT, collator);
}
/**

View File

@ -147,8 +147,7 @@ public class CollationField extends FieldType {
else
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid decomposition: " + decomposition);
}
// we use 4.0 because it ensures we just encode the pure byte[] keys.
analyzer = new CollationKeyAnalyzer(Version.LUCENE_40, collator);
analyzer = new CollationKeyAnalyzer(Version.LUCENE_CURRENT, collator);
}
/**

View File

@ -43,7 +43,7 @@ public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
//explicitly creates the temp dataDir so we know where the index will be located
createTempDir();
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_40, null);
IndexWriterConfig indexWriterConfig = new IndexWriterConfig(TEST_VERSION_CURRENT, null);
Directory directory = newFSDirectory(new File(dataDir, "index"));
//creates a new index on the known location
new IndexWriter(
@ -58,7 +58,7 @@ public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
Directory directory = newFSDirectory(new File(dataDir, "index"), new SimpleFSLockFactory());
//creates a new IndexWriter without releasing the lock yet
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_40, null));
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
ignoreException("locked");
try {
@ -84,7 +84,7 @@ public class SolrCoreCheckLockOnStartupTest extends SolrTestCaseJ4 {
log.info("Acquiring lock on {}", indexDir.getAbsolutePath());
Directory directory = newFSDirectory(indexDir, new NativeFSLockFactory());
//creates a new IndexWriter without releasing the lock yet
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(Version.LUCENE_40, null));
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(TEST_VERSION_CURRENT, null));
ignoreException("locked");
try {

View File

@ -118,7 +118,7 @@ public class TestArbitraryIndexDir extends AbstractSolrTestCase{
Directory dir = newFSDirectory(newDir);
IndexWriter iw = new IndexWriter(
dir,
new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40))
new IndexWriterConfig(TEST_VERSION_CURRENT, new StandardAnalyzer(TEST_VERSION_CURRENT))
);
Document doc = new Document();
doc.add(new TextField("id", "2", Field.Store.YES));

View File

@ -101,7 +101,7 @@ public class TestStressLucene extends TestRTGBase {
// RAMDirectory dir = new RAMDirectory();
// final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Version.LUCENE_40, new WhitespaceAnalyzer(Version.LUCENE_40)));
// final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(TEST_VERSION_CURRENT, new WhitespaceAnalyzer(TEST_VERSION_CURRENT)));
Directory dir = newDirectory();

View File

@ -25,7 +25,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.LuceneTestCase;
import java.util.Collection;
import java.util.HashSet;
@ -41,7 +41,7 @@ class SimpleQueryConverter extends SpellingQueryConverter {
@Override
public Collection<Token> convert(String origQuery) {
Collection<Token> result = new HashSet<Token>();
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(Version.LUCENE_40);
WhitespaceAnalyzer analyzer = new WhitespaceAnalyzer(LuceneTestCase.TEST_VERSION_CURRENT);
try (TokenStream ts = analyzer.tokenStream("", origQuery)) {
// TODO: support custom attributes