Remove tests for backwards compatibility we don't support any more
This commit is contained in:
parent
d8526f4d00
commit
4772b1fe9d
|
@ -23,11 +23,7 @@ import org.apache.lucene.analysis.MockTokenizer;
|
|||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -118,79 +114,6 @@ public class NGramTokenizerFactoryTests extends ESTokenStreamTestCase {
|
|||
new String[] {" a", " a!"});
|
||||
}
|
||||
|
||||
public void testBackwardsCompatibilityEdgeNgramTokenizer() throws Exception {
|
||||
int iters = scaledRandomIntBetween(20, 100);
|
||||
final Index index = new Index("test");
|
||||
final String name = "ngr";
|
||||
for (int i = 0; i < iters; i++) {
|
||||
Version v = randomVersion(random());
|
||||
if (v.onOrAfter(Version.V_0_90_2)) {
|
||||
Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
|
||||
boolean compatVersion = false;
|
||||
if ((compatVersion = random().nextBoolean())) {
|
||||
builder.put("version", "4." + random().nextInt(3));
|
||||
builder.put("side", "back");
|
||||
}
|
||||
Settings settings = builder.build();
|
||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
||||
Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
||||
edgeNGramTokenizer.setReader(new StringReader("foo bar"));
|
||||
if (compatVersion) {
|
||||
assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
|
||||
} else {
|
||||
assertThat(edgeNGramTokenizer, instanceOf(EdgeNGramTokenizer.class));
|
||||
}
|
||||
|
||||
} else {
|
||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
|
||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
||||
Tokenizer edgeNGramTokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
||||
edgeNGramTokenizer.setReader(new StringReader("foo bar"));
|
||||
assertThat(edgeNGramTokenizer, instanceOf(Lucene43EdgeNGramTokenizer.class));
|
||||
}
|
||||
}
|
||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("side", "back").build();
|
||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
try {
|
||||
new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
||||
fail("should fail side:back is not supported anymore");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testBackwardsCompatibilityNgramTokenizer() throws Exception {
|
||||
int iters = scaledRandomIntBetween(20, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
final Index index = new Index("test");
|
||||
final String name = "ngr";
|
||||
Version v = randomVersion(random());
|
||||
if (v.onOrAfter(Version.V_0_90_2)) {
|
||||
Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit");
|
||||
boolean compatVersion = false;
|
||||
if ((compatVersion = random().nextBoolean())) {
|
||||
builder.put("version", "4." + random().nextInt(3));
|
||||
}
|
||||
Settings settings = builder.build();
|
||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
||||
Tokenizer nGramTokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
||||
nGramTokenizer.setReader(new StringReader("foo bar"));
|
||||
if (compatVersion) {
|
||||
assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
|
||||
} else {
|
||||
assertThat(nGramTokenizer, instanceOf(NGramTokenizer.class));
|
||||
}
|
||||
|
||||
} else {
|
||||
Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).build();
|
||||
Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, v.id).build();
|
||||
Tokenizer nGramTokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
|
||||
nGramTokenizer.setReader(new StringReader("foo bar"));
|
||||
assertThat(nGramTokenizer, instanceOf(Lucene43NGramTokenizer.class));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception {
|
||||
int iters = scaledRandomIntBetween(20, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -32,15 +30,11 @@ import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
|
|||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -54,76 +48,6 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase {
|
|||
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
|
||||
}
|
||||
|
||||
public void testThatDefaultAndStandardAnalyzerChangedIn10Beta1() throws IOException {
|
||||
Analyzer currentStandardAnalyzer = PreBuiltAnalyzers.STANDARD.getAnalyzer(Version.V_1_0_0_Beta1);
|
||||
Analyzer currentDefaultAnalyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
|
||||
|
||||
// special case, these two are the same instance
|
||||
assertThat(currentDefaultAnalyzer, is(currentStandardAnalyzer));
|
||||
PreBuiltAnalyzers.DEFAULT.getAnalyzer(Version.V_1_0_0_Beta1);
|
||||
final int n = scaledRandomIntBetween(10, 100);
|
||||
Version version = Version.CURRENT;
|
||||
for(int i = 0; i < n; i++) {
|
||||
if (version.equals(Version.V_1_0_0_Beta1)) {
|
||||
assertThat(currentDefaultAnalyzer, is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version)));
|
||||
} else {
|
||||
assertThat(currentDefaultAnalyzer, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
||||
}
|
||||
Analyzer analyzer = PreBuiltAnalyzers.DEFAULT.getAnalyzer(version);
|
||||
TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
|
||||
ts.reset();
|
||||
CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
|
||||
List<String> list = new ArrayList<>();
|
||||
while(ts.incrementToken()) {
|
||||
list.add(charTermAttribute.toString());
|
||||
}
|
||||
if (version.onOrAfter(Version.V_1_0_0_Beta1)) {
|
||||
assertThat(list.size(), is(4));
|
||||
assertThat(list, contains("this", "is", "it", "dude"));
|
||||
|
||||
} else {
|
||||
assertThat(list.size(), is(1));
|
||||
assertThat(list, contains("dude"));
|
||||
}
|
||||
ts.close();
|
||||
version = randomVersion(random());
|
||||
}
|
||||
}
|
||||
|
||||
public void testAnalyzerChangedIn10RC1() throws IOException {
|
||||
Analyzer pattern = PreBuiltAnalyzers.PATTERN.getAnalyzer(Version.V_1_0_0_RC1);
|
||||
Analyzer standardHtml = PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(Version.V_1_0_0_RC1);
|
||||
final int n = scaledRandomIntBetween(10, 100);
|
||||
Version version = Version.CURRENT;
|
||||
for(int i = 0; i < n; i++) {
|
||||
if (version.equals(Version.V_1_0_0_RC1)) {
|
||||
assertThat(pattern, is(PreBuiltAnalyzers.PATTERN.getAnalyzer(version)));
|
||||
assertThat(standardHtml, is(PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version)));
|
||||
} else {
|
||||
assertThat(pattern, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
||||
assertThat(standardHtml, not(is(PreBuiltAnalyzers.DEFAULT.getAnalyzer(version))));
|
||||
}
|
||||
Analyzer analyzer = randomBoolean() ? PreBuiltAnalyzers.PATTERN.getAnalyzer(version) : PreBuiltAnalyzers.STANDARD_HTML_STRIP.getAnalyzer(version);
|
||||
TokenStream ts = analyzer.tokenStream("foo", "This is it Dude");
|
||||
ts.reset();
|
||||
CharTermAttribute charTermAttribute = ts.addAttribute(CharTermAttribute.class);
|
||||
List<String> list = new ArrayList<>();
|
||||
while(ts.incrementToken()) {
|
||||
list.add(charTermAttribute.toString());
|
||||
}
|
||||
if (version.onOrAfter(Version.V_1_0_0_RC1)) {
|
||||
assertThat(list.toString(), list.size(), is(4));
|
||||
assertThat(list, contains("this", "is", "it", "dude"));
|
||||
|
||||
} else {
|
||||
assertThat(list.size(), is(1));
|
||||
assertThat(list, contains("dude"));
|
||||
}
|
||||
ts.close();
|
||||
version = randomVersion(random());
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() {
|
||||
assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT),
|
||||
is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_0_18_0)));
|
||||
|
|
Loading…
Reference in New Issue