SOLR-1674: speed up analysis tests (thanks Robert Muir!)

git-svn-id: https://svn.apache.org/repos/asf/lucene/solr/trunk@921427 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Mark Robert Miller 2010-03-10 16:19:37 +00:00
parent ca50eef4ca
commit ebc4ce0b14
14 changed files with 50 additions and 112 deletions

View File

@ -1,42 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.analysis;
import org.apache.solr.core.SolrConfig;
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.util.TestHarness;
import junit.framework.TestCase;
/**
*
*/
abstract public class AnalysisTestCase extends AbstractSolrTestCase {
protected SolrConfig solrConfig;
/** Creates a new instance of AnalysisTestCase */
public AnalysisTestCase() {
}
public String getSolrConfigFile() { return "solrconfig.xml"; }
public String getSchemaFile() { return "schema.xml"; }
public void setUp() throws Exception {
// if you override setUp or tearDown, you better call
// the super classes version
super.setUp();
solrConfig = TestHarness.createConfig(getSolrConfigFile());
}
}

View File

@ -20,6 +20,8 @@ package org.apache.solr.analysis;
import java.io.IOException;
import java.io.StringReader;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
@ -30,7 +32,7 @@ import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
/**
* General token testing helper functions
*/
public abstract class BaseTokenTestCase extends AnalysisTestCase
public abstract class BaseTokenTestCase extends TestCase
{
// some helpers to test Analyzers and TokenStreams:
// these are taken from Lucene's BaseTokenStreamTestCase

View File

@ -20,8 +20,8 @@ package org.apache.solr.analysis;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
import java.io.StringReader;
import java.util.Set;
@ -34,16 +34,9 @@ import java.util.HashMap;
* so this won't break if stop filter test files change
**/
public class CommonGramsFilterFactoryTest extends BaseTokenTestCase {
public String getSchemaFile() {
return "schema-stop-keep.xml";
}
public String getSolrConfigFile() {
return "solrconfig.xml";
}
public void testInform() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsFilterFactory factory = new CommonGramsFilterFactory();
Map<String, String> args = new HashMap<String, String>();
@ -75,7 +68,7 @@ public class CommonGramsFilterFactoryTest extends BaseTokenTestCase {
* If no words are provided, then a set of english default stopwords is used.
*/
public void testDefaults() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsFilterFactory factory = new CommonGramsFilterFactory();
Map<String, String> args = new HashMap<String, String>();

View File

@ -20,6 +20,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
import java.io.StringReader;
import java.util.Set;
@ -32,16 +33,9 @@ import java.util.HashMap;
* so this won't break if stop filter test files change
**/
public class CommonGramsQueryFilterFactoryTest extends BaseTokenTestCase {
public String getSchemaFile() {
return "schema-stop-keep.xml";
}
public String getSolrConfigFile() {
return "solrconfig.xml";
}
public void testInform() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory();
Map<String, String> args = new HashMap<String, String>();
@ -73,7 +67,7 @@ public class CommonGramsQueryFilterFactoryTest extends BaseTokenTestCase {
* If no words are provided, then a set of english default stopwords is used.
*/
public void testDefaults() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
CommonGramsQueryFilterFactory factory = new CommonGramsQueryFilterFactory();
Map<String, String> args = new HashMap<String, String>();

View File

@ -21,6 +21,7 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.common.util.StrUtils;
import org.apache.solr.core.SolrResourceLoader;
import org.tartarus.snowball.ext.EnglishStemmer;
import java.io.IOException;
@ -114,7 +115,7 @@ public class SnowballPorterFilterFactoryTest extends BaseTokenTestCase {
*/
public void testProtected() throws Exception {
SnowballPorterFilterFactory factory = new SnowballPorterFilterFactory();
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
Map<String,String> args = new HashMap<String,String>();
args.put("protected", "protwords.txt");
args.put("language", "English");

View File

@ -23,36 +23,23 @@ import java.util.Map;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.analysis.payloads.FloatEncoder;
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
import org.apache.lucene.analysis.payloads.FloatEncoder;
import org.apache.lucene.analysis.payloads.PayloadHelper;
import org.apache.lucene.util.Attribute;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.core.SolrResourceLoader;
public class TestDelimitedPayloadTokenFilterFactory extends AbstractSolrTestCase{
public String getSchemaFile() {
return "schema.xml";
}
public String getSolrConfigFile() {
return "solrconfig.xml";
}
public class TestDelimitedPayloadTokenFilterFactory extends TestCase {
public void testEncoder() throws Exception {
Map<String,String> args = new HashMap<String, String>();
args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, "float");
DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
factory.init(args);
ResourceLoader loader = h.getCore().getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
factory.inform(loader);
TokenStream input = new WhitespaceTokenizer(new StringReader("the|0.1 quick|0.1 red|0.1"));
@ -74,7 +61,7 @@ public class TestDelimitedPayloadTokenFilterFactory extends AbstractSolrTestCase
args.put(DelimitedPayloadTokenFilterFactory.DELIMITER_ATTR, "*");
DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
factory.init(args);
ResourceLoader loader = h.getCore().getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
factory.inform(loader);
TokenStream input = new WhitespaceTokenizer(new StringReader("the*0.1 quick*0.1 red*0.1"));

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
/**
* Simple tests to ensure the Dictionary compound filter factory is working.
@ -38,7 +39,7 @@ public class TestDictionaryCompoundWordTokenFilterFactory extends BaseTokenTestC
Reader reader = new StringReader("I like to play softball");
Tokenizer tokenizer = new WhitespaceTokenizer(reader);
DictionaryCompoundWordTokenFilterFactory factory = new DictionaryCompoundWordTokenFilterFactory();
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
Map<String,String> args = new HashMap<String,String>();
args.put("dictionary", "compoundDictionary.txt");
factory.init(args);

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
/**
* Simple tests to ensure the French elision filter factory is working.
@ -38,7 +39,7 @@ public class TestElisionFilterFactory extends BaseTokenTestCase {
Reader reader = new StringReader("l'avion");
Tokenizer tokenizer = new WhitespaceTokenizer(reader);
ElisionFilterFactory factory = new ElisionFilterFactory();
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
Map<String,String> args = new HashMap<String,String>();
args.put("articles", "frenchArticles.txt");
factory.init(args);

View File

@ -16,29 +16,24 @@ package org.apache.solr.analysis;
* limitations under the License.
*/
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
import java.util.Set;
import java.util.Map;
import java.util.HashMap;
import junit.framework.TestCase;
/**
*
*
**/
public class TestKeepFilterFactory extends AbstractSolrTestCase{
public String getSchemaFile() {
return "schema-stop-keep.xml";
}
public String getSolrConfigFile() {
return "solrconfig.xml";
}
public class TestKeepFilterFactory extends TestCase{
public void testInform() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
KeepWordFilterFactory factory = new KeepWordFilterFactory();
Map<String, String> args = new HashMap<String, String>();

View File

@ -25,6 +25,8 @@ import java.util.Set;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
/**
@ -40,13 +42,13 @@ public class TestKeepWordFilter extends BaseTokenTestCase {
String input = "aaa BBB ccc ddd EEE";
Map<String,String> args = new HashMap<String, String>();
ResourceLoader loader = new SolrResourceLoader(null, null);
// Test Stopwords
KeepWordFilterFactory factory = new KeepWordFilterFactory();
args.put( "ignoreCase", "true" );
factory.init( args );
factory.inform( solrConfig.getResourceLoader() );
factory.inform( loader );
factory.setWords( words );
assertTrue(factory.isIgnoreCase());
TokenStream stream = factory.create(new WhitespaceTokenizer(new StringReader(input)));
@ -56,7 +58,7 @@ public class TestKeepWordFilter extends BaseTokenTestCase {
factory = new KeepWordFilterFactory();
args = new HashMap<String, String>();
factory.init( args );
factory.inform( solrConfig.getResourceLoader() );
factory.inform( loader );
factory.setIgnoreCase(true);
factory.setWords( words );
assertTrue(factory.isIgnoreCase());
@ -67,7 +69,7 @@ public class TestKeepWordFilter extends BaseTokenTestCase {
args = new HashMap<String, String>();
args.put( "ignoreCase", "false" );
factory.init( args );
factory.inform( solrConfig.getResourceLoader() );
factory.inform( loader );
assertFalse(factory.isIgnoreCase());
stream = factory.create(new WhitespaceTokenizer(new StringReader(input)));
assertTokenStreamContents(stream, new String[] { "aaa" });

View File

@ -30,8 +30,11 @@ import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.search.Query;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.search.SolrQueryParser;
import org.apache.solr.util.AbstractSolrTestCase;
public class TestReversedWildcardFilterFactory extends BaseTokenTestCase {
import static org.apache.solr.analysis.BaseTokenTestCase.*;
public class TestReversedWildcardFilterFactory extends AbstractSolrTestCase {
Map<String,String> args = new HashMap<String, String>();
ReversedWildcardFilterFactory factory = new ReversedWildcardFilterFactory();
IndexSchema schema;

View File

@ -17,29 +17,24 @@ package org.apache.solr.analysis;
*/
import org.apache.solr.util.AbstractSolrTestCase;
import org.apache.solr.common.ResourceLoader;
import org.apache.solr.core.SolrResourceLoader;
import java.util.Set;
import java.util.Map;
import java.util.HashMap;
import junit.framework.TestCase;
/**
*
*
**/
public class TestStopFilterFactory extends AbstractSolrTestCase{
public String getSchemaFile() {
return "schema-stop-keep.xml";
}
public String getSolrConfigFile() {
return "solrconfig.xml";
}
public class TestStopFilterFactory extends TestCase {
public void testInform() throws Exception {
ResourceLoader loader = solrConfig.getResourceLoader();
ResourceLoader loader = new SolrResourceLoader(null, null);
assertTrue("loader is null and it shouldn't be", loader != null);
StopFilterFactory factory = new StopFilterFactory();
Map<String, String> args = new HashMap<String, String>();

View File

@ -22,9 +22,11 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.lucene.analysis.Token;
public class TestSynonymMap extends AnalysisTestCase {
public class TestSynonymMap extends TestCase {
public void testInvalidMappingRules() throws Exception {
SynonymMap synMap = new SynonymMap( true );

View File

@ -27,21 +27,25 @@ import org.apache.lucene.analysis.WhitespaceTokenizer;
import org.apache.lucene.analysis.miscellaneous.SingleTokenTokenStream;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;
import org.apache.solr.util.AbstractSolrTestCase;
import static org.apache.solr.analysis.BaseTokenTestCase.*;
import java.io.IOException;
import java.io.Reader;
import java.io.StringReader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
/**
* New WordDelimiterFilter tests... most of the tests are in ConvertedLegacyTest
*/
public class TestWordDelimiterFilter extends BaseTokenTestCase {
public class TestWordDelimiterFilter extends AbstractSolrTestCase {
public String getSchemaFile() { return "solr/conf/schema.xml"; }
public String getSolrConfigFile() { return "solr/conf/solrconfig.xml"; }
public void posTst(String v1, String v2, String s1, String s2) {
assertU(adoc("id", "42",
"subword", v1,