Improve phrase suggest test speed (#18633)

There is no reason to read the entire marvel hero file to test the features,
it might take several seconds to do so which is unnecessary.
This commit also splits SearchSuggestTests into core and modules/mustache 
also add @Nighlty to forbidden API to make sure we don't use it since they won't run in CI these days.
This commit is contained in:
Simon Willnauer 2016-05-30 17:22:03 +02:00
parent 04cae88ff4
commit f74a78940c
6 changed files with 1223 additions and 1083 deletions

View File

@ -1197,7 +1197,6 @@
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustachePlugin.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]RenderSearchTemplateTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SuggestSearchTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryParserTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />

View File

@ -21,5 +21,7 @@ com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded r
org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead
org.apache.lucene.util.LuceneTestCase$Slow @ Don't write slow tests
org.junit.Ignore @ Use AwaitsFix instead
org.apache.lucene.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point!
com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point!
org.junit.Test @defaultMessage Just name your test method testFooBar

View File

@ -43,15 +43,6 @@ import static org.hamcrest.Matchers.is;
@ESIntegTestCase.ClusterScope(minNumDataNodes = 2)
public class SearchWhileRelocatingIT extends ESIntegTestCase {
@Nightly
public void testSearchAndRelocateConcurrently0Replicas() throws Exception {
testSearchAndRelocateConcurrently(0);
}
@Nightly
public void testSearchAndRelocateConcurrently1Replicas() throws Exception {
testSearchAndRelocateConcurrently(1);
}
public void testSearchAndRelocateConcurrentlyRanodmReplicas() throws Exception {
testSearchAndRelocateConcurrently(randomIntBetween(0, 1));

File diff suppressed because it is too large Load Diff

View File

@ -59,7 +59,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
private final BytesRef preTag = new BytesRef("<em>");
private final BytesRef postTag = new BytesRef("</em>");
public void testMarvelHeros() throws IOException {
public void testNgram() throws IOException {
RAMDirectory dir = new RAMDirectory();
Map<String, Analyzer> mapping = new HashMap<>();
mapping.put("body_ngram", new Analyzer() {
@ -87,9 +87,23 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
IndexWriterConfig conf = new IndexWriterConfig(wrapper);
IndexWriter writer = new IndexWriter(dir, conf);
BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), StandardCharsets.UTF_8));
String line = null;
while ((line = reader.readLine()) != null) {
String[] strings = new String[]{
"Xorr the God-Jewel",
"Grog the God-Crusher",
"Xorn",
"Walter Newell",
"Wanda Maximoff",
"Captain America",
"American Ace",
"USA Hero",
"Wundarr the Aquarian",
"Will o' the Wisp",
"Xemnu the Titan",
"Fantastic Four",
"Quasar",
"Quasar II"
};
for (String line : strings) {
Document doc = new Document();
doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
@ -161,7 +175,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
TokenFilter filter = new LowerCaseFilter(t);
try {
SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
parser.parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
parser.parse(new StringReader("usa => usa, america, american"));
filter = new SynonymFilter(filter, parser.build(), true);
} catch (Exception e) {
throw new RuntimeException(e);
@ -191,7 +205,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
assertThat(corrections[0].join(space, preTag, postTag).utf8ToString(), equalTo("captain <em>america</em>"));
}
public void testMarvelHerosMultiGenerator() throws IOException {
public void testMultiGenerator() throws IOException {
RAMDirectory dir = new RAMDirectory();
Map<String, Analyzer> mapping = new HashMap<>();
mapping.put("body_ngram", new Analyzer() {
@ -228,9 +242,22 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
IndexWriterConfig conf = new IndexWriterConfig(wrapper);
IndexWriter writer = new IndexWriter(dir, conf);
BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), StandardCharsets.UTF_8));
String line = null;
while ((line = reader.readLine()) != null) {
String[] strings = new String[]{
"Xorr the God-Jewel",
"Grog the God-Crusher",
"Xorn",
"Walter Newell",
"Wanda Maximoff",
"Captain America",
"American Ace",
"Wundarr the Aquarian",
"Will o' the Wisp",
"Xemnu the Titan",
"Fantastic Four",
"Quasar",
"Quasar II"
};
for (String line : strings) {
Document doc = new Document();
doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
doc.add(new Field("body_reverse", line, TextField.TYPE_NOT_STORED));
@ -284,7 +311,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("quasar ii"));
}
public void testMarvelHerosTrigram() throws IOException {
public void testTrigram() throws IOException {
RAMDirectory dir = new RAMDirectory();
Map<String, Analyzer> mapping = new HashMap<>();
mapping.put("body_ngram", new Analyzer() {
@ -312,9 +339,23 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
IndexWriterConfig conf = new IndexWriterConfig(wrapper);
IndexWriter writer = new IndexWriter(dir, conf);
BufferedReader reader = new BufferedReader(new InputStreamReader(NoisyChannelSpellCheckerTests.class.getResourceAsStream("/config/names.txt"), StandardCharsets.UTF_8));
String line = null;
while ((line = reader.readLine()) != null) {
String[] strings = new String[]{
"Xorr the God-Jewel",
"Grog the God-Crusher",
"Xorn",
"Walter Newell",
"Wanda Maximoff",
"Captain America",
"American Ace",
"USA Hero",
"Wundarr the Aquarian",
"Will o' the Wisp",
"Xemnu the Titan",
"Fantastic Four",
"Quasar",
"Quasar II"
};
for (String line : strings) {
Document doc = new Document();
doc.add(new Field("body", line, TextField.TYPE_NOT_STORED));
doc.add(new Field("body_ngram", line, TextField.TYPE_NOT_STORED));
@ -370,7 +411,7 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
TokenFilter filter = new LowerCaseFilter(t);
try {
SolrSynonymParser parser = new SolrSynonymParser(true, false, new WhitespaceAnalyzer());
parser.parse(new StringReader("usa => usa, america, american\nursa => usa, america, american"));
parser.parse(new StringReader("usa => usa, america, american"));
filter = new SynonymFilter(filter, parser.build(), true);
} catch (Exception e) {
throw new RuntimeException(e);