Merge pull request #15850 from nik9000/more_deprecation
Handle some deprecation warnings
This commit is contained in:
commit
dfe77879b9
|
@ -20,9 +20,7 @@
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
import org.apache.lucene.util.Version;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -43,7 +41,6 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
private final int minGram;
|
private final int minGram;
|
||||||
private final int maxGram;
|
private final int maxGram;
|
||||||
private final CharMatcher matcher;
|
private final CharMatcher matcher;
|
||||||
private org.elasticsearch.Version esVersion;
|
|
||||||
|
|
||||||
static final Map<String, CharMatcher> MATCHERS;
|
static final Map<String, CharMatcher> MATCHERS;
|
||||||
|
|
||||||
|
@ -92,30 +89,19 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||||
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
|
this.matcher = parseTokenChars(settings.getAsArray("token_chars"));
|
||||||
this.esVersion = indexSettings.getIndexVersionCreated();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
@Override
|
@Override
|
||||||
public Tokenizer create() {
|
public Tokenizer create() {
|
||||||
if (version.onOrAfter(Version.LUCENE_4_3) && esVersion.onOrAfter(org.elasticsearch.Version.V_0_90_2)) {
|
if (matcher == null) {
|
||||||
/*
|
return new NGramTokenizer(minGram, maxGram);
|
||||||
* We added this in 0.90.2 but 0.90.1 used LUCENE_43 already so we can not rely on the lucene version.
|
|
||||||
* Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version.
|
|
||||||
*/
|
|
||||||
final Version version = this.version == Version.LUCENE_4_3 ? Version.LUCENE_4_4 : this.version; // always use 4.4 or higher
|
|
||||||
if (matcher == null) {
|
|
||||||
return new NGramTokenizer(minGram, maxGram);
|
|
||||||
} else {
|
|
||||||
return new NGramTokenizer(minGram, maxGram) {
|
|
||||||
@Override
|
|
||||||
protected boolean isTokenChar(int chr) {
|
|
||||||
return matcher.isTokenChar(chr);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
return new Lucene43NGramTokenizer(minGram, maxGram);
|
return new NGramTokenizer(minGram, maxGram) {
|
||||||
|
@Override
|
||||||
|
protected boolean isTokenChar(int chr) {
|
||||||
|
return matcher.isTokenChar(chr);
|
||||||
|
}
|
||||||
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -604,14 +604,14 @@ public abstract class QueryBuilders {
|
||||||
* Facilitates creating template query requests using an inline script
|
* Facilitates creating template query requests using an inline script
|
||||||
*/
|
*/
|
||||||
public static TemplateQueryBuilder templateQuery(String template, Map<String, Object> vars) {
|
public static TemplateQueryBuilder templateQuery(String template, Map<String, Object> vars) {
|
||||||
return new TemplateQueryBuilder(template, vars);
|
return new TemplateQueryBuilder(new Template(template, ScriptService.ScriptType.INLINE, null, null, vars));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Facilitates creating template query requests
|
* Facilitates creating template query requests
|
||||||
*/
|
*/
|
||||||
public static TemplateQueryBuilder templateQuery(String template, ScriptService.ScriptType templateType, Map<String, Object> vars) {
|
public static TemplateQueryBuilder templateQuery(String template, ScriptService.ScriptType templateType, Map<String, Object> vars) {
|
||||||
return new TemplateQueryBuilder(template, templateType, vars);
|
return new TemplateQueryBuilder(new Template(template, templateType, null, null, vars));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -118,6 +118,7 @@ import static java.util.Collections.unmodifiableMap;
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
||||||
|
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||||
|
|
||||||
static final String CODEC = "store";
|
static final String CODEC = "store";
|
||||||
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
||||||
|
@ -466,7 +467,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
|
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
|
||||||
} else {
|
} else {
|
||||||
assert metadata.writtenBy() != null;
|
assert metadata.writtenBy() != null;
|
||||||
assert metadata.writtenBy().onOrAfter(Version.LUCENE_4_8);
|
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||||
}
|
}
|
||||||
success = true;
|
success = true;
|
||||||
|
@ -490,7 +491,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
return directory().openInput(filename, context);
|
return directory().openInput(filename, context);
|
||||||
}
|
}
|
||||||
assert metadata.writtenBy() != null;
|
assert metadata.writtenBy() != null;
|
||||||
assert metadata.writtenBy().onOrAfter(Version.LUCENE_4_8_0);
|
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
return new VerifyingIndexInput(directory().openInput(filename, context));
|
return new VerifyingIndexInput(directory().openInput(filename, context));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,7 +519,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
||||||
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
||||||
}
|
}
|
||||||
if (md.writtenBy() != null && md.writtenBy().onOrAfter(Version.LUCENE_4_8_0)) {
|
if (md.writtenBy() != null && md.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||||
// throw exception if the file is corrupt
|
// throw exception if the file is corrupt
|
||||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||||
// throw exception if metadata is inconsistent
|
// throw exception if metadata is inconsistent
|
||||||
|
@ -766,7 +767,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
*/
|
*/
|
||||||
public final static class MetadataSnapshot implements Iterable<StoreFileMetaData>, Writeable<MetadataSnapshot> {
|
public final static class MetadataSnapshot implements Iterable<StoreFileMetaData>, Writeable<MetadataSnapshot> {
|
||||||
private static final ESLogger logger = Loggers.getLogger(MetadataSnapshot.class);
|
private static final ESLogger logger = Loggers.getLogger(MetadataSnapshot.class);
|
||||||
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8;
|
|
||||||
|
|
||||||
private final Map<String, StoreFileMetaData> metadata;
|
private final Map<String, StoreFileMetaData> metadata;
|
||||||
|
|
||||||
|
@ -843,6 +843,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
||||||
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
||||||
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
|
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
|
||||||
for (SegmentCommitInfo info : segmentCommitInfos) {
|
for (SegmentCommitInfo info : segmentCommitInfos) {
|
||||||
final Version version = info.info.getVersion();
|
final Version version = info.info.getVersion();
|
||||||
|
@ -907,6 +908,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
* @param directory the directory to read checksums from
|
* @param directory the directory to read checksums from
|
||||||
* @return a map of file checksums and the checksum file version
|
* @return a map of file checksums and the checksum file version
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings("deprecation") // Legacy checksum needs legacy methods
|
||||||
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
|
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
|
||||||
synchronized (directory) {
|
synchronized (directory) {
|
||||||
long lastFound = -1;
|
long lastFound = -1;
|
||||||
|
@ -922,10 +924,10 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
if (lastFound > -1) {
|
if (lastFound > -1) {
|
||||||
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
|
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
|
||||||
indexInput.readInt(); // version
|
indexInput.readInt(); // version
|
||||||
return new Tuple(indexInput.readStringStringMap(), lastFound);
|
return new Tuple<>(indexInput.readStringStringMap(), lastFound);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new Tuple(new HashMap<>(), -1l);
|
return new Tuple<>(new HashMap<>(), -1l);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1243,6 +1245,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation") // Legacy checksum uses legacy methods
|
||||||
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
||||||
// Make sure if clock goes backwards we still move version forwards:
|
// Make sure if clock goes backwards we still move version forwards:
|
||||||
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
||||||
|
|
|
@ -73,15 +73,10 @@ import java.util.Locale;
|
||||||
*/
|
*/
|
||||||
public enum PreBuiltAnalyzers {
|
public enum PreBuiltAnalyzers {
|
||||||
|
|
||||||
STANDARD(CachingStrategy.ELASTICSEARCH) { // we don't do stopwords anymore from 1.0Beta on
|
STANDARD(CachingStrategy.ELASTICSEARCH) {
|
||||||
@Override
|
@Override
|
||||||
protected Analyzer create(Version version) {
|
protected Analyzer create(Version version) {
|
||||||
final Analyzer a;
|
final Analyzer a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
|
||||||
if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
|
|
||||||
a = new StandardAnalyzer(CharArraySet.EMPTY_SET);
|
|
||||||
} else {
|
|
||||||
a = new StandardAnalyzer();
|
|
||||||
}
|
|
||||||
a.setVersion(version.luceneVersion);
|
a.setVersion(version.luceneVersion);
|
||||||
return a;
|
return a;
|
||||||
}
|
}
|
||||||
|
@ -151,22 +146,14 @@ public enum PreBuiltAnalyzers {
|
||||||
PATTERN(CachingStrategy.ELASTICSEARCH) {
|
PATTERN(CachingStrategy.ELASTICSEARCH) {
|
||||||
@Override
|
@Override
|
||||||
protected Analyzer create(Version version) {
|
protected Analyzer create(Version version) {
|
||||||
if (version.onOrAfter(Version.V_1_0_0_RC1)) {
|
return new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET);
|
||||||
return new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, CharArraySet.EMPTY_SET);
|
|
||||||
}
|
|
||||||
return new PatternAnalyzer(Regex.compile("\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/, null), true, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
STANDARD_HTML_STRIP(CachingStrategy.ELASTICSEARCH) {
|
STANDARD_HTML_STRIP(CachingStrategy.ELASTICSEARCH) {
|
||||||
@Override
|
@Override
|
||||||
protected Analyzer create(Version version) {
|
protected Analyzer create(Version version) {
|
||||||
final Analyzer analyzer;
|
final Analyzer analyzer = new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET);
|
||||||
if (version.onOrAfter(Version.V_1_0_0_RC1)) {
|
|
||||||
analyzer = new StandardHtmlStripAnalyzer(CharArraySet.EMPTY_SET);
|
|
||||||
} else {
|
|
||||||
analyzer = new StandardHtmlStripAnalyzer();
|
|
||||||
}
|
|
||||||
analyzer.setVersion(version.luceneVersion);
|
analyzer.setVersion(version.luceneVersion);
|
||||||
return analyzer;
|
return analyzer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
||||||
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
||||||
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
||||||
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
import org.apache.lucene.analysis.core.LowerCaseFilter;
|
||||||
import org.apache.lucene.analysis.core.Lucene43StopFilter;
|
|
||||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||||
import org.apache.lucene.analysis.core.StopFilter;
|
import org.apache.lucene.analysis.core.StopFilter;
|
||||||
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
||||||
|
@ -45,9 +44,6 @@ import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
|
import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
import org.apache.lucene.analysis.miscellaneous.LengthFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
|
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter;
|
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter;
|
|
||||||
import org.apache.lucene.analysis.miscellaneous.Lucene47WordDelimiterFilter;
|
|
||||||
import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter;
|
import org.apache.lucene.analysis.miscellaneous.ScandinavianFoldingFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter;
|
import org.apache.lucene.analysis.miscellaneous.ScandinavianNormalizationFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
import org.apache.lucene.analysis.miscellaneous.TrimFilter;
|
||||||
|
@ -55,8 +51,6 @@ import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter;
|
import org.apache.lucene.analysis.miscellaneous.UniqueTokenFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenFilter;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
||||||
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
||||||
|
@ -86,49 +80,26 @@ public enum PreBuiltTokenFilters {
|
||||||
WORD_DELIMITER(CachingStrategy.ONE) {
|
WORD_DELIMITER(CachingStrategy.ONE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_8)) {
|
return new WordDelimiterFilter(tokenStream,
|
||||||
return new WordDelimiterFilter(tokenStream,
|
WordDelimiterFilter.GENERATE_WORD_PARTS |
|
||||||
WordDelimiterFilter.GENERATE_WORD_PARTS |
|
WordDelimiterFilter.GENERATE_NUMBER_PARTS |
|
||||||
WordDelimiterFilter.GENERATE_NUMBER_PARTS |
|
WordDelimiterFilter.SPLIT_ON_CASE_CHANGE |
|
||||||
WordDelimiterFilter.SPLIT_ON_CASE_CHANGE |
|
WordDelimiterFilter.SPLIT_ON_NUMERICS |
|
||||||
WordDelimiterFilter.SPLIT_ON_NUMERICS |
|
WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
||||||
WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
|
||||||
} else {
|
|
||||||
return new Lucene47WordDelimiterFilter(tokenStream,
|
|
||||||
WordDelimiterFilter.GENERATE_WORD_PARTS |
|
|
||||||
WordDelimiterFilter.GENERATE_NUMBER_PARTS |
|
|
||||||
WordDelimiterFilter.SPLIT_ON_CASE_CHANGE |
|
|
||||||
WordDelimiterFilter.SPLIT_ON_NUMERICS |
|
|
||||||
WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
},
|
},
|
||||||
|
|
||||||
STOP(CachingStrategy.LUCENE) {
|
STOP(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_4_0)) {
|
return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||||
return new StopFilter(tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43StopFilter(true, tokenStream, StopAnalyzer.ENGLISH_STOP_WORDS_SET);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
TRIM(CachingStrategy.LUCENE) {
|
TRIM(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_4_0)) {
|
return new TrimFilter(tokenStream);
|
||||||
return new TrimFilter(tokenStream);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43TrimFilter(tokenStream, true);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -149,13 +120,7 @@ public enum PreBuiltTokenFilters {
|
||||||
LENGTH(CachingStrategy.LUCENE) {
|
LENGTH(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_4_0)) {
|
return new LengthFilter(tokenStream, 0, Integer.MAX_VALUE);
|
||||||
return new LengthFilter(tokenStream, 0, Integer.MAX_VALUE);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43LengthFilter(true, tokenStream, 0, Integer.MAX_VALUE);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -211,26 +176,14 @@ public enum PreBuiltTokenFilters {
|
||||||
NGRAM(CachingStrategy.LUCENE) {
|
NGRAM(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_4_0)) {
|
return new NGramTokenFilter(tokenStream);
|
||||||
return new NGramTokenFilter(tokenStream);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43NGramTokenFilter(tokenStream);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
EDGE_NGRAM(CachingStrategy.LUCENE) {
|
EDGE_NGRAM(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_4_0)) {
|
return new EdgeNGramTokenFilter(tokenStream, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
|
||||||
return new EdgeNGramTokenFilter(tokenStream, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
|
|
||||||
} else {
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
final TokenStream filter = new Lucene43EdgeNGramTokenFilter(tokenStream, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE);
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
|
@ -24,16 +24,12 @@ import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.std40.StandardTokenizer40;
|
|
||||||
import org.apache.lucene.analysis.standard.std40.UAX29URLEmailTokenizer40;
|
|
||||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
|
@ -50,11 +46,7 @@ public enum PreBuiltTokenizers {
|
||||||
STANDARD(CachingStrategy.LUCENE) {
|
STANDARD(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_7_0)) {
|
return new StandardTokenizer();
|
||||||
return new StandardTokenizer();
|
|
||||||
} else {
|
|
||||||
return new StandardTokenizer40();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -68,11 +60,7 @@ public enum PreBuiltTokenizers {
|
||||||
UAX_URL_EMAIL(CachingStrategy.LUCENE) {
|
UAX_URL_EMAIL(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
if (version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_7_0)) {
|
return new UAX29URLEmailTokenizer();
|
||||||
return new UAX29URLEmailTokenizer();
|
|
||||||
} else {
|
|
||||||
return new UAX29URLEmailTokenizer40();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -114,28 +102,14 @@ public enum PreBuiltTokenizers {
|
||||||
NGRAM(CachingStrategy.LUCENE) {
|
NGRAM(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
// see NGramTokenizerFactory for an explanation of this logic:
|
return new NGramTokenizer();
|
||||||
// 4.4 patch was used before 4.4 was released
|
|
||||||
if (version.onOrAfter(org.elasticsearch.Version.V_0_90_2) &&
|
|
||||||
version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_3)) {
|
|
||||||
return new NGramTokenizer();
|
|
||||||
} else {
|
|
||||||
return new Lucene43NGramTokenizer();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
EDGE_NGRAM(CachingStrategy.LUCENE) {
|
EDGE_NGRAM(CachingStrategy.LUCENE) {
|
||||||
@Override
|
@Override
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
// see EdgeNGramTokenizerFactory for an explanation of this logic:
|
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
||||||
// 4.4 patch was used before 4.4 was released
|
|
||||||
if (version.onOrAfter(org.elasticsearch.Version.V_0_90_2) &&
|
|
||||||
version.luceneVersion.onOrAfter(org.apache.lucene.util.Version.LUCENE_4_3)) {
|
|
||||||
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
|
||||||
} else {
|
|
||||||
return new Lucene43EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
|
@ -23,11 +23,7 @@ import org.apache.lucene.analysis.MockTokenizer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
@ -118,79 +114,6 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase {
|
||||||
new String[] {" a", " a!"});
|
new String[] {" a", " a!"});
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBackwardsCompatibilityEdgeNgramTokenizer() throws Exception {
|
|
||||||
int iters = scaledRandomIntBetween(20, 100);
|
|
||||||
final Index index = new Index("test");
|
|
||||||
final String name = "ngr";
|
|
||||||
for (int i = 0; i < iters; i++) {
|
|
||||||
Version v = randomVersion(random());
|
|
||||||
if (v.onOrAfter(Version.V_0_90_2)) {
|
|
||||||
Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
|
|
||||||
boolean compatVersion = false;
|
|
||||||
if ((compatVersion = random().nextBoolean())) {
|
|
||||||
builder.put("version", "4." + random().nextInt(3));
|
|
||||||
builder.put("side", "back");
|
|
||||||
}
|
|
||||||
Settings settings = builder.build();
|
|
||||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
|
||||||
Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
|
||||||
edgeNGramTokenizer.setReader(new StringReader("foo bar"));
|
|
||||||
if (compatVersion) {
|
|
||||||
assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
|
|
||||||
} else {
|
|
||||||
assertThat(edgeNGramTokenizer, instanceOf(EdgeNGramTokenizer.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
|
|
||||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
|
||||||
Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
|
||||||
edgeNGramTokenizer.setReader(new StringReader("foo bar"));
|
|
||||||
assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
|
|
||||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
|
||||||
try {
|
|
||||||
new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
|
||||||
fail("should fail side:back is not supported anymore");
|
|
||||||
} catch (IllegalArgumentException ex) {
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testBackwardsCompatibilityNgramTokenizer() throws Exception {
|
|
||||||
int iters = scaledRandomIntBetween(20, 100);
|
|
||||||
for (int i = 0; i < iters; i++) {
|
|
||||||
final Index index = new Index("test");
|
|
||||||
final String name = "ngr";
|
|
||||||
Version v = randomVersion(random());
|
|
||||||
if (v.onOrAfter(Version.V_0_90_2)) {
|
|
||||||
Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
|
|
||||||
boolean compatVersion = false;
|
|
||||||
if ((compatVersion = random().nextBoolean())) {
|
|
||||||
builder.put("version", "4." + random().nextInt(3));
|
|
||||||
}
|
|
||||||
Settings settings = builder.build();
|
|
||||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
|
||||||
Tokenizer nGramTokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
|
||||||
nGramTokenizer.setReader(new StringReader("foo bar"));
|
|
||||||
if (compatVersion) {
|
|
||||||
assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
|
|
||||||
} else {
|
|
||||||
assertThat(nGramTokenizer, instanceOf(NGramTokenizer.class));
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).build();
|
|
||||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
|
||||||
Tokenizer nGramTokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
|
||||||
nGramTokenizer.setReader(new StringReader("foo bar"));
|
|
||||||
assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception {
|
public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception {
|
||||||
int iters = scaledRandomIntBetween(20, 100);
|
int iters = scaledRandomIntBetween(20, 100);
|
||||||
for (int i = 0; i < iters; i++) {
|
for (int i = 0; i < iters; i++) {
|
||||||
|
|
|
@ -19,8 +19,6 @@
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.index.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
|
||||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.compress.CompressedXContent;
|
import org.elasticsearch.common.compress.CompressedXContent;
|
||||||
|
@ -32,15 +30,11 @@ import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
|
||||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||||
import static org.hamcrest.Matchers.contains;
|
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
import static org.hamcrest.Matchers.not;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -54,76 +48,6 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase {
|
||||||
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
|
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testThatDefaultAndStandardAnalyzerChangedIn10Beta1() throws IOException {
|
|
||||||
Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_1_0_0_Beta1);
|
|
||||||
Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
|
|
||||||
|
|
||||||
// special case, these two are the same instance
|
|
||||||
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
|
|
||||||
PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
|
|
||||||
final int n = scaledRandomIntBetween(10, 100);
|
|
||||||
Version version = Version.CURRENT;
|
|
||||||
for(int i = 0; i < n; i++) {
|
|
||||||
if (version.equals(Version.V_1_0_0_Beta1)) {
|
|
||||||
assertThat(currentDefaultAnalyzer, is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version)));
|
|
||||||
} else {
|
|
||||||
assertThat(currentDefaultAnalyzer, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
|
||||||
}
|
|
||||||
Analyzer analyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(version);
|
|
||||||
TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
|
|
||||||
ts.reset();
|
|
||||||
CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
|
|
||||||
List<String> list = new ArrayList<>();
|
|
||||||
while(ts.incrementToken()) {
|
|
||||||
list.add(charTermAttribute.toString());
|
|
||||||
}
|
|
||||||
if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
|
|
||||||
assertThat(list.size(), is(4));
|
|
||||||
assertThat(list, contains("this", "is", "it", "dude"));
|
|
||||||
|
|
||||||
} else {
|
|
||||||
assertThat(list.size(), is(1));
|
|
||||||
assertThat(list, contains("dude"));
|
|
||||||
}
|
|
||||||
ts.close();
|
|
||||||
version = randomVersion(random());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testAnalyzerChangedIn10RC1() throws IOException {
|
|
||||||
Analyzer pattern = PreBuiltAnalyzers.PATTERN.getAnalyzer(Version.V_1_0_0_RC1);
|
|
||||||
Analyzer standardHtml = PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(Version.V_1_0_0_RC1);
|
|
||||||
final int n = scaledRandomIntBetween(10, 100);
|
|
||||||
Version version = Version.CURRENT;
|
|
||||||
for(int i = 0; i < n; i++) {
|
|
||||||
if (version.equals(Version.V_1_0_0_RC1)) {
|
|
||||||
assertThat(pattern, is(PreBuiltAnalyzers.PATTERN.getAnalyzer(version)));
|
|
||||||
assertThat(standardHtml, is(PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version)));
|
|
||||||
} else {
|
|
||||||
assertThat(pattern, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
|
||||||
assertThat(standardHtml, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
|
||||||
}
|
|
||||||
Analyzer analyzer = randomBoolean() ? PreBuiltAnalyzers.PATTERN.getAnalyzer(version) : PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version);
|
|
||||||
TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
|
|
||||||
ts.reset();
|
|
||||||
CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
|
|
||||||
List<String> list = new ArrayList<>();
|
|
||||||
while(ts.incrementToken()) {
|
|
||||||
list.add(charTermAttribute.toString());
|
|
||||||
}
|
|
||||||
if (version.onOrAfter(Version.V_1_0_0_RC1)) {
|
|
||||||
assertThat(list.toString(), list.size(), is(4));
|
|
||||||
assertThat(list, contains("this", "is", "it", "dude"));
|
|
||||||
|
|
||||||
} else {
|
|
||||||
assertThat(list.size(), is(1));
|
|
||||||
assertThat(list, contains("dude"));
|
|
||||||
}
|
|
||||||
ts.close();
|
|
||||||
version = randomVersion(random());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
|
public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
|
||||||
assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
|
assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
|
||||||
is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
|
is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
|
||||||
|
|
Loading…
Reference in New Issue