Merged revision(s) 1591333 from lucene/dev/branches/branch_4x:

LUCENE-5632: Transition Version constants from LUCENE_MN to LUCENE_M_N

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1591365 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Uwe Schindler 2014-04-30 15:50:35 +00:00
parent 13767516ca
commit bc0d7bbb25
51 changed files with 104 additions and 92 deletions

View File

@ -107,6 +107,13 @@ API Changes
longer receives the number of fields that will be added (Robert
Muir, Mike McCandless)
* LUCENE-5632: In preparation for coming Lucene versions, the Version
enum constants were renamed to make them better readable. The constant
for Lucene 4.9 is now "LUCENE_4_9". Version.parseLeniently() is still
able to parse the old strings ("LUCENE_49"). The old identifiers got
deprecated and will be removed in Lucene 5.0. (Uwe Schindler,
Robert Muir)
Optimizations
* LUCENE-5603: hunspell stemmer more efficiently strips prefixes

View File

@ -157,7 +157,7 @@ public abstract class CompoundWordTokenFilterBase extends TokenFilter {
int startOff = CompoundWordTokenFilterBase.this.offsetAtt.startOffset();
int endOff = CompoundWordTokenFilterBase.this.offsetAtt.endOffset();
if (matchVersion.onOrAfter(Version.LUCENE_44) ||
if (matchVersion.onOrAfter(Version.LUCENE_4_4) ||
endOff - startOff != CompoundWordTokenFilterBase.this.termAtt.length()) {
// if length by start + end offsets doesn't match the term text then assume
// this is a synonym and don't adjust the offsets.

View File

@ -208,7 +208,7 @@ public final class WordDelimiterFilter extends TokenFilter {
*/
public WordDelimiterFilter(Version matchVersion, TokenStream in, byte[] charTypeTable, int configurationFlags, CharArraySet protWords) {
super(in);
if (!matchVersion.onOrAfter(Version.LUCENE_48)) {
if (!matchVersion.onOrAfter(Version.LUCENE_4_8)) {
throw new IllegalArgumentException("This class only works with Lucene 4.8+. To emulate the old (broken) behavior of WordDelimiterFilter, use Lucene47WordDelimiterFilter");
}
this.flags = configurationFlags;

View File

@ -118,7 +118,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
@Override
public TokenFilter create(TokenStream input) {
if (luceneMatchVersion.onOrAfter(Version.LUCENE_48)) {
if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_8)) {
return new WordDelimiterFilter(luceneMatchVersion, input, typeTable == null ? WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE : typeTable,
flags, protectedWords);
} else {

View File

@ -79,7 +79,7 @@ public final class EdgeNGramTokenFilter extends TokenFilter {
throw new IllegalArgumentException("minGram must not be greater than maxGram");
}
this.charUtils = version.onOrAfter(Version.LUCENE_44)
this.charUtils = version.onOrAfter(Version.LUCENE_4_4)
? CharacterUtils.getInstance(version)
: CharacterUtils.getJava4Instance();
this.minGram = minGram;

View File

@ -41,7 +41,7 @@ import org.apache.lucene.util.Version;
* increasing length (meaning that "abc" will give "a", "ab", "abc", "b", "bc",
* "c").</li></ul>
* <p>You can make this filter use the old behavior by providing a version &lt;
* {@link Version#LUCENE_44} in the constructor but this is not recommended as
* {@link Version#LUCENE_4_4} in the constructor but this is not recommended as
* it will lead to broken {@link TokenStream}s that will cause highlighting
* bugs.
* <p>If you were using this {@link TokenFilter} to perform partial highlighting,
@ -83,7 +83,7 @@ public final class NGramTokenFilter extends TokenFilter {
public NGramTokenFilter(Version version, TokenStream input, int minGram, int maxGram) {
super(new CodepointCountFilter(version, input, minGram, Integer.MAX_VALUE));
this.version = version;
this.charUtils = version.onOrAfter(Version.LUCENE_44)
this.charUtils = version.onOrAfter(Version.LUCENE_4_4)
? CharacterUtils.getInstance(version)
: CharacterUtils.getJava4Instance();
if (minGram < 1) {
@ -94,7 +94,7 @@ public final class NGramTokenFilter extends TokenFilter {
}
this.minGram = minGram;
this.maxGram = maxGram;
if (version.onOrAfter(Version.LUCENE_44)) {
if (version.onOrAfter(Version.LUCENE_4_4)) {
posIncAtt = addAttribute(PositionIncrementAttribute.class);
posLenAtt = addAttribute(PositionLengthAttribute.class);
} else {
@ -149,7 +149,7 @@ public final class NGramTokenFilter extends TokenFilter {
hasIllegalOffsets = (tokStart + curTermLength) != tokEnd;
}
}
if (version.onOrAfter(Version.LUCENE_44)) {
if (version.onOrAfter(Version.LUCENE_4_4)) {
if (curGramSize > maxGram || (curPos + curGramSize) > curCodePointCount) {
++curPos;
curGramSize = minGram;

View File

@ -116,10 +116,10 @@ public class NGramTokenizer extends Tokenizer {
}
private void init(Version version, int minGram, int maxGram, boolean edgesOnly) {
if (!edgesOnly && !version.onOrAfter(Version.LUCENE_44)) {
if (!edgesOnly && !version.onOrAfter(Version.LUCENE_4_4)) {
throw new IllegalArgumentException("This class only works with Lucene 4.4+. To emulate the old (broken) behavior of NGramTokenizer, use Lucene43NGramTokenizer");
}
charUtils = version.onOrAfter(Version.LUCENE_44)
charUtils = version.onOrAfter(Version.LUCENE_4_4)
? CharacterUtils.getInstance(version)
: CharacterUtils.getJava4Instance();
if (minGram < 1) {

View File

@ -52,7 +52,7 @@ public class NGramTokenizerFactory extends TokenizerFactory {
/** Creates the {@link TokenStream} of n-grams from the given {@link Reader} and {@link AttributeFactory}. */
@Override
public Tokenizer create(AttributeFactory factory) {
if (luceneMatchVersion.onOrAfter(Version.LUCENE_44)) {
if (luceneMatchVersion.onOrAfter(Version.LUCENE_4_4)) {
return new NGramTokenizer(luceneMatchVersion, factory, minGramSize, maxGramSize);
} else {
return new Lucene43NGramTokenizer(factory, minGramSize, maxGramSize);

View File

@ -102,7 +102,7 @@ public final class ThaiAnalyzer extends StopwordAnalyzerBase {
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
if (matchVersion.onOrAfter(Version.LUCENE_48)) {
if (matchVersion.onOrAfter(Version.LUCENE_4_8)) {
final Tokenizer source = new ThaiTokenizer();
TokenStream result = new LowerCaseFilter(matchVersion, source);
result = new StopFilter(matchVersion, result, stopwords);

View File

@ -122,7 +122,7 @@ public final class TurkishAnalyzer extends StopwordAnalyzerBase {
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer(matchVersion);
TokenStream result = new StandardFilter(matchVersion, source);
if(matchVersion.onOrAfter(Version.LUCENE_48))
if(matchVersion.onOrAfter(Version.LUCENE_4_8))
result = new ApostropheFilter(result);
result = new TurkishLowerCaseFilter(result);
result = new StopFilter(matchVersion, result, stopwords);

View File

@ -73,7 +73,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
StringReader reader = new StringReader(sb.toString());
final MockTokenizer in = new MockTokenizer(MockTokenizer.WHITESPACE, false);
in.setReader(reader);
StopFilter stpf = new StopFilter(Version.LUCENE_40, in, stopSet);
StopFilter stpf = new StopFilter(Version.LUCENE_4_0, in, stopSet);
doTestStopPositons(stpf);
// with increments, concatenating two stop filters
ArrayList<String> a0 = new ArrayList<>();

View File

@ -173,7 +173,7 @@ public class NGramTokenFilterTest extends BaseTokenStreamTestCase {
}
public void testLucene43() throws IOException {
NGramTokenFilter filter = new NGramTokenFilter(Version.LUCENE_43, input, 2, 3);
NGramTokenFilter filter = new NGramTokenFilter(Version.LUCENE_4_3, input, 2, 3);
assertTokenStreamContents(filter,
new String[]{"ab","bc","cd","de","abc","bcd","cde"},
new int[]{0,1,2,3,0,1,2},

View File

@ -115,9 +115,9 @@ algorithm.
<h3>Farsi Range Queries</h3>
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("ar"));
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_50, collator);
ICUCollationKeyAnalyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
Document doc = new Document();
doc.add(new Field("content", "\u0633\u0627\u0628",
Field.Store.YES, Field.Index.ANALYZED));
@ -125,7 +125,7 @@ algorithm.
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser aqp = new QueryParser(Version.LUCENE_50, "content", analyzer);
QueryParser aqp = new QueryParser(Version.LUCENE_5_0, "content", analyzer);
aqp.setAnalyzeRangeTerms(true);
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
@ -141,9 +141,9 @@ algorithm.
<h3>Danish Sorting</h3>
<pre class="prettyprint">
Analyzer analyzer
= new ICUCollationKeyAnalyzer(Version.LUCENE_50, Collator.getInstance(new ULocale("da", "dk")));
= new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, Collator.getInstance(new ULocale("da", "dk")));
RAMDirectory indexStore = new RAMDirectory();
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter writer = new IndexWriter(indexStore, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
String[] tracer = new String[] { "A", "B", "C", "D", "E" };
String[] data = new String[] { "HAT", "HUT", "H\u00C5T", "H\u00D8T", "HOT" };
String[] sortedTracerOrder = new String[] { "A", "E", "B", "D", "C" };
@ -169,15 +169,15 @@ algorithm.
<pre class="prettyprint">
Collator collator = Collator.getInstance(new ULocale("tr", "TR"));
collator.setStrength(Collator.PRIMARY);
Analyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_50, collator);
Analyzer analyzer = new ICUCollationKeyAnalyzer(Version.LUCENE_5_0, collator);
RAMDirectory ramDir = new RAMDirectory();
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter writer = new IndexWriter(ramDir, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
Document doc = new Document();
doc.add(new Field("contents", "DIGY", Field.Store.NO, Field.Index.ANALYZED));
writer.addDocument(doc);
writer.close();
IndexSearcher is = new IndexSearcher(ramDir, true);
QueryParser parser = new QueryParser(Version.LUCENE_50, "contents", analyzer);
QueryParser parser = new QueryParser(Version.LUCENE_5_0, "contents", analyzer);
Query query = parser.parse("d\u0131gy"); // U+0131: dotless i
ScoreDoc[] result = is.search(query, null, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);

View File

@ -137,7 +137,7 @@ public final class SmartChineseAnalyzer extends Analyzer {
public TokenStreamComponents createComponents(String fieldName) {
final Tokenizer tokenizer;
TokenStream result;
if (matchVersion.onOrAfter(Version.LUCENE_48)) {
if (matchVersion.onOrAfter(Version.LUCENE_4_8)) {
tokenizer = new HMMChineseTokenizer();
result = tokenizer;
} else {

View File

@ -15,7 +15,7 @@
# * limitations under the License.
# */
writer.version=LUCENE_40
writer.version=4.0
analyzer=org.apache.lucene.analysis.standard.StandardAnalyzer
directory=FSDirectory

View File

@ -17,7 +17,7 @@
# -------------------------------------------------------------------------------------
# multi val params are iterated by NewRound's, added to reports, start with column name.
writer.version=LUCENE_40
writer.version=4.0
#merge.factor=mrg:10:100:10:100:10:100:10:100
#max.buffered=buf:10:10:100:100:10:10:100:100
ram.flush.mb=flush:32:40:48:56:32:40:48:56

View File

@ -17,7 +17,7 @@
# -------------------------------------------------------------------------------------
# multi val params are iterated by NewRound's, added to reports, start with column name.
writer.version=LUCENE_40
writer.version=4.0
#merge.factor=mrg:10:100:10:100:10:100:10:100
#max.buffered=buf:10:10:100:100:10:10:100:100
ram.flush.mb=flush:32:40:48:56:32:40:48:56

View File

@ -17,7 +17,7 @@
# -------------------------------------------------------------------------------------
# multi val params are iterated by NewRound's, added to reports, start with column name.
writer.version=LUCENE_40
writer.version=4.0
merge.factor=mrg:10:100:10:100:10:100:10:100
max.buffered=buf:10:10:100:100:10:10:100:100
#ram.flush.mb=flush:32:40:48:56:32:40:48:56

View File

@ -17,7 +17,7 @@
# -------------------------------------------------------------------------------------
# multi val params are iterated by NewRound's, added to reports, start with column name.
writer.version=LUCENE_40
writer.version=4.0
merge.factor=mrg:10:100:10:100:10:100:10:100
max.buffered=buf:10:10:100:100:10:10:100:100
#ram.flush.mb=flush:32:40:48:56:32:40:48:56

View File

@ -17,7 +17,7 @@
# -------------------------------------------------------------------------------------
# multi val params are iterated by NewRound's, added to reports, start with column name.
writer.version=LUCENE_40
writer.version=4.0
merge.factor=mrg:10:100:10:100:10:100:10:100
max.buffered=buf:10:10:100:100:10:10:100:100
compound=cmpnd:true:true:true:true:false:false:false:false

View File

@ -68,7 +68,7 @@ import java.util.regex.Pattern;
* positionIncrementGap:100,
* HTMLStripCharFilter,
* MappingCharFilter(mapping:'mapping-FoldToASCII.txt'),
* WhitespaceTokenizer(luceneMatchVersion:LUCENE_42),
* WhitespaceTokenizer(luceneMatchVersion:LUCENE_5_0),
* TokenLimitFilter(maxTokenCount:10000, consumeAllTokens:false))
* [...]
* -NewAnalyzer('strip html, fold to ascii, whitespace tokenize, max 10k tokens')

View File

@ -96,8 +96,8 @@ public class CreateIndexTask extends PerfTask {
}
public static IndexWriterConfig createWriterConfig(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) {
// :Post-Release-Update-Version.LUCENE_XY:
Version version = Version.valueOf(config.get("writer.version", Version.LUCENE_50.toString()));
@SuppressWarnings("deprecation")
Version version = Version.parseLeniently(config.get("writer.version", Version.LUCENE_CURRENT.toString()));
IndexWriterConfig iwConf = new IndexWriterConfig(version, runData.getAnalyzer());
iwConf.setOpenMode(mode);
IndexDeletionPolicy indexDeletionPolicy = getIndexDeletionPolicy(config);

View File

@ -38,7 +38,7 @@ public class CreateIndexTaskTest extends BenchmarkTestCase {
private PerfRunData createPerfRunData(String infoStreamValue) throws Exception {
Properties props = new Properties();
// :Post-Release-Update-Version.LUCENE_XY:
props.setProperty("writer.version", Version.LUCENE_50.toString());
props.setProperty("writer.version", Version.LUCENE_5_0.toString());
props.setProperty("print.props", "false"); // don't print anything
props.setProperty("directory", "RAMDirectory");
if (infoStreamValue != null) {

View File

@ -70,9 +70,9 @@ public class DatasetSplitter {
// create IWs for train / test / cv IDXs
// :Post-Release-Update-Version.LUCENE_XY:
IndexWriter testWriter = new IndexWriter(testIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(Version.LUCENE_50, analyzer));
IndexWriter testWriter = new IndexWriter(testIndex, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
IndexWriter cvWriter = new IndexWriter(crossValidationIndex, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
IndexWriter trainingWriter = new IndexWriter(trainingIndex, new IndexWriterConfig(Version.LUCENE_5_0, analyzer));
try {
int size = originalIndex.maxDoc();

View File

@ -908,7 +908,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
* If there are running merges or uncommitted
* changes:
* <ul>
* <li> If config.matchVersion >= LUCENE_50 then the
* <li> If config.matchVersion >= LUCENE_5_0 then the
* changes are silently discarded.
* <li> Otherwise, a RuntimeException is thrown to
* indicate what was lost, but the IndexWriter is
@ -921,7 +921,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
* @throws IOException if there is a low-level IO error
* (the IndexWriter will still be closed)
* @throws RuntimeException if config.matchVersion <
* LUCENE_50 and there were pending changes that were
* LUCENE_5_0 and there were pending changes that were
* lost (the IndexWriter will still be closed)
*/
@Override
@ -934,7 +934,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit{
boolean lostChanges = false;
// Only check for lost changes if the version earlier than 5.0:
if (config.getMatchVersion().onOrAfter(Version.LUCENE_50) == false) {
if (config.getMatchVersion().onOrAfter(Version.LUCENE_5_0) == false) {
lostChanges = hasUncommittedChanges();
if (lostChanges == false) {
synchronized(this) {

View File

@ -30,6 +30,7 @@ import org.apache.lucene.util.IOUtils;
// - let subclass dictate policy...?
// - rename to MergeCacheingDir? NRTCachingDir
// :Post-Release-Update-Version.LUCENE_X_Y: (in <pre> block in javadoc below)
/**
* Wraps a {@link RAMDirectory}
* around any provided delegate directory, to
@ -50,7 +51,7 @@ import org.apache.lucene.util.IOUtils;
* <pre class="prettyprint">
* Directory fsDir = FSDirectory.open(new File("/path/to/index"));
* NRTCachingDirectory cachedFSDir = new NRTCachingDirectory(fsDir, 5.0, 60.0);
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_50, analyzer);
* IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_5_0, analyzer);
* IndexWriter writer = new IndexWriter(cachedFSDir, conf);
* </pre>
*

View File

@ -38,77 +38,77 @@ public enum Version {
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_40,
LUCENE_4_0,
/**
* Match settings and bugs in Lucene's 4.1 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_41,
LUCENE_4_1,
/**
* Match settings and bugs in Lucene's 4.2 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_42,
LUCENE_4_2,
/**
* Match settings and bugs in Lucene's 4.3 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_43,
LUCENE_4_3,
/**
* Match settings and bugs in Lucene's 4.4 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_44,
LUCENE_4_4,
/**
* Match settings and bugs in Lucene's 4.5 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_45,
LUCENE_4_5,
/**
* Match settings and bugs in Lucene's 4.6 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_46,
LUCENE_4_6,
/**
* Match settings and bugs in Lucene's 4.7 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_47,
LUCENE_4_7,
/**
* Match settings and bugs in Lucene's 4.8 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_48,
LUCENE_4_8,
/**
* Match settings and bugs in Lucene's 4.9 release.
* @deprecated (5.0) Use latest
*/
@Deprecated
LUCENE_49,
LUCENE_4_9,
/** Match settings and bugs in Lucene's 5.0 release.
* <p>
* Use this to get the latest &amp; greatest settings, bug
* fixes, etc, for Lucene.
*/
LUCENE_50,
LUCENE_5_0,
/* Add new constants for later versions **here** to respect order! */
@ -136,7 +136,10 @@ public enum Version {
}
public static Version parseLeniently(String version) {
String parsedMatchVersion = version.toUpperCase(Locale.ROOT);
return Version.valueOf(parsedMatchVersion.replaceFirst("^(\\d)\\.(\\d)$", "LUCENE_$1$2"));
final String parsedMatchVersion = version
.toUpperCase(Locale.ROOT)
.replaceFirst("^(\\d+)\\.(\\d+)$", "LUCENE_$1_$2")
.replaceFirst("^LUCENE_(\\d)(\\d)$", "LUCENE_$1_$2");
return Version.valueOf(parsedMatchVersion);
}
}

View File

@ -2431,7 +2431,7 @@ public class TestIndexWriter extends LuceneTestCase {
Directory dir = newDirectory();
// If version is < 50 IW.close should throw an exception
// on uncommitted changes:
IndexWriterConfig iwc = newIndexWriterConfig(Version.LUCENE_48, new MockAnalyzer(random()));
IndexWriterConfig iwc = newIndexWriterConfig(Version.LUCENE_4_8, new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new SortedDocValuesField("dv", new BytesRef("foo!")));
@ -2455,7 +2455,7 @@ public class TestIndexWriter extends LuceneTestCase {
// If version is < 50 IW.close should throw an exception
// on still-running merges:
IndexWriterConfig iwc = newIndexWriterConfig(Version.LUCENE_48, new MockAnalyzer(random()));
IndexWriterConfig iwc = newIndexWriterConfig(Version.LUCENE_4_8, new MockAnalyzer(random()));
LogDocMergePolicy mp = new LogDocMergePolicy();
mp.setMergeFactor(2);
iwc.setMergePolicy(mp);

View File

@ -475,7 +475,7 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc
final SnapshotDeletionPolicy sdp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
final Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir("nrt")), 5, 128);
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_46,
IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_6,
new MockAnalyzer(random()));
config.setIndexDeletionPolicy(sdp);
config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);

View File

@ -23,13 +23,14 @@ public class TestVersion extends LuceneTestCase {
for (Version v : Version.values()) {
assertTrue("LUCENE_CURRENT must be always onOrAfter("+v+")", Version.LUCENE_CURRENT.onOrAfter(v));
}
assertTrue(Version.LUCENE_50.onOrAfter(Version.LUCENE_40));
assertFalse(Version.LUCENE_40.onOrAfter(Version.LUCENE_50));
assertTrue(Version.LUCENE_5_0.onOrAfter(Version.LUCENE_4_0));
assertFalse(Version.LUCENE_4_0.onOrAfter(Version.LUCENE_5_0));
}
public void testParseLeniently() {
assertEquals(Version.LUCENE_40, Version.parseLeniently("4.0"));
assertEquals(Version.LUCENE_40, Version.parseLeniently("LUCENE_40"));
assertEquals(Version.LUCENE_4_0, Version.parseLeniently("4.0"));
assertEquals(Version.LUCENE_4_0, Version.parseLeniently("LUCENE_40"));
assertEquals(Version.LUCENE_4_0, Version.parseLeniently("LUCENE_4_0"));
assertEquals(Version.LUCENE_CURRENT, Version.parseLeniently("LUCENE_CURRENT"));
}

View File

@ -88,8 +88,8 @@ public class IndexFiles {
Directory dir = FSDirectory.open(new File(indexPath));
// :Post-Release-Update-Version.LUCENE_XY:
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_50);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_50, analyzer);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_5_0);
IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_5_0, analyzer);
if (create) {
// Create a new index in the directory, removing any

View File

@ -92,7 +92,7 @@ public class SearchFiles {
IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
IndexSearcher searcher = new IndexSearcher(reader);
// :Post-Release-Update-Version.LUCENE_XY:
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_50);
Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_5_0);
BufferedReader in = null;
if (queries != null) {
@ -101,7 +101,7 @@ public class SearchFiles {
in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
}
// :Post-Release-Update-Version.LUCENE_XY:
QueryParser parser = new QueryParser(Version.LUCENE_50, field, analyzer);
QueryParser parser = new QueryParser(Version.LUCENE_5_0, field, analyzer);
while (true) {
if (queries == null && queryString == null) { // prompt the user
System.out.println("Enter query: ");

View File

@ -28,6 +28,6 @@ public interface FacetExamples {
// :Post-Release-Update-Version.LUCENE_XY:
/** The Lucene {@link Version} used by the example code. */
public static final Version EXAMPLES_VER = Version.LUCENE_50;
public static final Version EXAMPLES_VER = Version.LUCENE_5_0;
}

View File

@ -286,7 +286,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
// :Post-Release-Update-Version.LUCENE_XY:
// Make sure we use a MergePolicy which always merges adjacent segments and thus
// keeps the doc IDs ordered as well (this is crucial for the taxonomy index).
return new IndexWriterConfig(Version.LUCENE_50, null).setOpenMode(openMode).setMergePolicy(
return new IndexWriterConfig(Version.LUCENE_5_0, null).setOpenMode(openMode).setMergePolicy(
new LogByteSizeMergePolicy());
}

View File

@ -113,7 +113,7 @@ public class TestMultiAnalyzer extends BaseTokenStreamTestCase {
}
public void testPosIncrementAnalyzer() throws ParseException {
QueryParser qp = new QueryParser(Version.LUCENE_40, "", new PosIncrementAnalyzer());
QueryParser qp = new QueryParser(Version.LUCENE_4_0, "", new PosIncrementAnalyzer());
assertEquals("quick brown", qp.parse("the quick brown").toString());
assertEquals("quick brown fox", qp.parse("the quick brown fox").toString());
}

View File

@ -374,7 +374,7 @@ public abstract class LuceneTestCase extends Assert {
* Use this constant when creating Analyzers and any other version-dependent stuff.
* <p><b>NOTE:</b> Change this when development starts for new Lucene version:
*/
public static final Version TEST_VERSION_CURRENT = Version.LUCENE_50;
public static final Version TEST_VERSION_CURRENT = Version.LUCENE_5_0;
/**
* True if and only if tests are run in verbose mode. If this flag is false

View File

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_43</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<!-- lib directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in

View File

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_43</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<!-- <lib/> directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in

View File

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_43</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<!-- <lib/> directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in

View File

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_43</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<!-- lib directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in

View File

@ -35,7 +35,7 @@
that you fully re-index after changing this setting as it can
affect both how text is indexed and queried.
-->
<luceneMatchVersion>LUCENE_43</luceneMatchVersion>
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
<!-- <lib/> directives can be used to instruct Solr to load an Jars
identified and use them to resolve any "plugins" specified in

View File

@ -163,19 +163,19 @@ public class SolrConfig extends Config {
luceneMatchVersion = getLuceneVersion("luceneMatchVersion");
String indexConfigPrefix;
// Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_40.
// Old indexDefaults and mainIndex sections are deprecated and fails fast for luceneMatchVersion=>LUCENE_4_0.
// For older solrconfig.xml's we allow the old sections, but never mixed with the new <indexConfig>
boolean hasDeprecatedIndexConfig = (getNode("indexDefaults", false) != null) || (getNode("mainIndex", false) != null);
boolean hasNewIndexConfig = getNode("indexConfig", false) != null;
if(hasDeprecatedIndexConfig){
if(luceneMatchVersion.onOrAfter(Version.LUCENE_40)) {
if(luceneMatchVersion.onOrAfter(Version.LUCENE_4_0)) {
throw new SolrException(ErrorCode.FORBIDDEN, "<indexDefaults> and <mainIndex> configuration sections are discontinued. Use <indexConfig> instead.");
} else {
// Still allow the old sections for older LuceneMatchVersion's
if(hasNewIndexConfig) {
throw new SolrException(ErrorCode.FORBIDDEN, "Cannot specify both <indexDefaults>, <mainIndex> and <indexConfig> at the same time. Please use <indexConfig> only.");
}
log.warn("<indexDefaults> and <mainIndex> configuration sections are deprecated and will fail for luceneMatchVersion=LUCENE_40 and later. Please use <indexConfig> instead.");
log.warn("<indexDefaults> and <mainIndex> configuration sections are deprecated and will fail for luceneMatchVersion=LUCENE_4_0 and later. Please use <indexConfig> instead.");
defaultIndexConfig = new SolrIndexConfig(this, "indexDefaults", null);
mainIndexConfig = new SolrIndexConfig(this, "mainIndex", defaultIndexConfig);
indexConfigPrefix = "mainIndex";

View File

@ -407,7 +407,7 @@ public final class FieldTypePluginLoader
Version version = (configuredVersion != null) ?
Config.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion();
if (!version.onOrAfter(Version.LUCENE_40)) {
if (!version.onOrAfter(Version.LUCENE_4_0)) {
log.warn(pluginClassName + " is using deprecated " + version +
" emulation. You should at some point declare and reindex to at least 4.0, because " +
"3.x emulation is deprecated and will be removed in 5.0");

View File

@ -481,7 +481,7 @@ public class IndexSchema {
similarityFactory = new DefaultSimilarityFactory();
final NamedList similarityParams = new NamedList();
Version luceneVersion = getDefaultLuceneMatchVersion();
if (!luceneVersion.onOrAfter(Version.LUCENE_47)) {
if (!luceneVersion.onOrAfter(Version.LUCENE_4_7)) {
similarityParams.add(DefaultSimilarityFactory.DISCOUNT_OVERLAPS, false);
}
similarityFactory.init(SolrParams.toSolrParams(similarityParams));

View File

@ -125,7 +125,7 @@ public class SolrIndexConfig {
luceneVersion = solrConfig.luceneMatchVersion;
// Assert that end-of-life parameters or syntax is not in our config.
// Warn for luceneMatchVersion's before LUCENE_36, fail fast above
// Warn for luceneMatchVersion's before LUCENE_3_6, fail fast above
assertWarnOrFail("The <mergeScheduler>myclass</mergeScheduler> syntax is no longer supported in solrconfig.xml. Please use syntax <mergeScheduler class=\"myclass\"/> instead.",
!((solrConfig.getNode(prefix+"/mergeScheduler",false) != null) && (solrConfig.get(prefix+"/mergeScheduler/@class",null) == null)),
true);

View File

@ -38,7 +38,7 @@
</analyzer>
</fieldtype>
<fieldtype name="textStandardAnalyzer40" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.standard.StandardAnalyzer" luceneMatchVersion="LUCENE_40"/>
<analyzer class="org.apache.lucene.analysis.standard.StandardAnalyzer" luceneMatchVersion="LUCENE_4_0"/>
</fieldtype>
<fieldtype name="textStandardAnalyzerDefault" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.standard.StandardAnalyzer"/>

View File

@ -20,8 +20,8 @@
<fieldtype name="explicitLuceneMatchVersions" class="org.apache.solr.schema.TextField">
<analyzer>
<charFilter class="org.apache.solr.analysis.MockCharFilterFactory" luceneMatchVersion="LUCENE_40" remainder="0"/>
<tokenizer class="org.apache.solr.analysis.MockTokenizerFactory" luceneMatchVersion="LUCENE_40" />
<filter class="org.apache.solr.analysis.MockTokenFilterFactory" luceneMatchVersion="LUCENE_40" stopset="empty"/>
<tokenizer class="org.apache.solr.analysis.MockTokenizerFactory" luceneMatchVersion="LUCENE_4_0" />
<filter class="org.apache.solr.analysis.MockTokenFilterFactory" luceneMatchVersion="4.0" stopset="empty"/>
</analyzer>
</fieldtype>
<fieldtype name="noLuceneMatchVersions" class="org.apache.solr.schema.TextField">

View File

@ -21,7 +21,7 @@
discovery-based core configuration. Trying a minimal configuration to cut down the setup time.
use in conjunction with schema-minimal.xml perhaps? -->
<config>
<luceneMatchVersion>LUCENE_41</luceneMatchVersion>
<luceneMatchVersion>LUCENE_4_1</luceneMatchVersion>
<dataDir>${solr.data.dir:}</dataDir>

View File

@ -53,8 +53,8 @@ public class TestLuceneMatchVersion extends SolrTestCaseJ4 {
type = schema.getFieldType("text40");
ana = (TokenizerChain) type.getAnalyzer();
assertEquals(Version.LUCENE_40, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_50, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
assertEquals(Version.LUCENE_4_0, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_5_0, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
// this is a hack to get the private matchVersion field in StandardAnalyzer's superclass, may break in later lucene versions - we have no getter :(
final Field matchVersionField = StandardAnalyzer.class.getSuperclass().getDeclaredField("matchVersion");
@ -68,6 +68,6 @@ public class TestLuceneMatchVersion extends SolrTestCaseJ4 {
type = schema.getFieldType("textStandardAnalyzer40");
ana1 = type.getAnalyzer();
assertTrue(ana1 instanceof StandardAnalyzer);
assertEquals(Version.LUCENE_40, matchVersionField.get(ana1));
assertEquals(Version.LUCENE_4_0, matchVersionField.get(ana1));
}
}

View File

@ -45,13 +45,13 @@ public class TestSerializedLuceneMatchVersion extends RestTestBase {
"count(/response/lst[@name='fieldType']) = 1",
"//lst[str[@name='class'][.='org.apache.solr.analysis.MockCharFilterFactory']]"
+" [str[@name='luceneMatchVersion'][.='LUCENE_40']]",
+" [str[@name='luceneMatchVersion'][.='LUCENE_4_0']]",
"//lst[str[@name='class'][.='org.apache.solr.analysis.MockTokenizerFactory']]"
+" [str[@name='luceneMatchVersion'][.='LUCENE_40']]",
+" [str[@name='luceneMatchVersion'][.='LUCENE_4_0']]",
"//lst[str[@name='class'][.='org.apache.solr.analysis.MockTokenFilterFactory']]"
+" [str[@name='luceneMatchVersion'][.='LUCENE_40']]");
+" [str[@name='luceneMatchVersion'][.='LUCENE_4_0']]");
}
@Test

View File

@ -43,14 +43,14 @@ public class TestNonDefinedSimilarityFactory extends BaseSimilarityTestCase {
}
public void test47() throws Exception {
System.setProperty("tests.luceneMatchVersion", Version.LUCENE_47.toString());
System.setProperty("tests.luceneMatchVersion", Version.LUCENE_4_7.toString());
initCore("solrconfig-basic.xml","schema-tiny.xml");
DefaultSimilarity sim = getSimilarity("text", DefaultSimilarity.class);
assertEquals(true, sim.getDiscountOverlaps());
}
public void test46() throws Exception {
System.setProperty("tests.luceneMatchVersion", Version.LUCENE_46.toString());
System.setProperty("tests.luceneMatchVersion", Version.LUCENE_4_6.toString());
initCore("solrconfig-basic.xml","schema-tiny.xml");
DefaultSimilarity sim = getSimilarity("text", DefaultSimilarity.class);
assertEquals(false, sim.getDiscountOverlaps());