LUCENE-4044: fix more compil

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene2510@1364879 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2012-07-24 04:05:36 +00:00
parent 73d4b55f7e
commit ca658fd75d
7 changed files with 14 additions and 13 deletions

View File

@ -29,7 +29,6 @@ import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.charfilter.MappingCharFilter;
import org.apache.lucene.analysis.charfilter.NormalizeCharMap;
import org.apache.lucene.analysis.util.*;
import org.apache.solr.common.util.StrUtils;
/**
* Factory for {@link MappingCharFilter}.
@ -62,7 +61,7 @@ public class MappingCharFilterFactory extends CharFilterFactory implements
wlist = loader.getLines( mapping );
}
else{
List<String> files = StrUtils.splitFileNames( mapping );
List<String> files = splitFileNames( mapping );
wlist = new ArrayList<String>();
for( String file : files ){
List<String> lines = loader.getLines( file.trim() );

View File

@ -21,7 +21,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.core.TypeTokenFilter;
import org.apache.lucene.analysis.util.InitializationException;
import org.apache.lucene.analysis.util.ResourceLoader;
import org.apache.solr.common.util.StrUtils;
import org.apache.lucene.analysis.util.ResourceLoaderAware;
import org.apache.lucene.analysis.util.TokenFilterFactory;
@ -50,7 +49,7 @@ public class TypeTokenFilterFactory extends TokenFilterFactory implements Resour
useWhitelist = getBoolean("useWhitelist", false);
if (stopTypesFiles != null) {
try {
List<String> files = StrUtils.splitFileNames(stopTypesFiles);
List<String> files = splitFileNames(stopTypesFiles);
if (files.size() > 0) {
stopTypes = new HashSet<String>();
for (String file : files) {

View File

@ -23,7 +23,6 @@ import java.util.List;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
import org.apache.lucene.analysis.util.*;
import org.apache.solr.common.util.StrUtils;
/**
* Factory for {@link StemmerOverrideFilter}.
@ -45,7 +44,7 @@ public class StemmerOverrideFilterFactory extends TokenFilterFactory implements
ignoreCase = getBoolean("ignoreCase", false);
if (dictionaryFiles != null) {
assureMatchVersion();
List<String> files = StrUtils.splitFileNames(dictionaryFiles);
List<String> files = splitFileNames(dictionaryFiles);
try {
if (files.size() > 0) {
dictionary = new CharArrayMap<String>(luceneMatchVersion,

View File

@ -22,8 +22,6 @@ import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
import org.apache.lucene.analysis.miscellaneous.WordDelimiterIterator;
import org.apache.lucene.analysis.util.*;
import org.apache.solr.common.util.StrUtils;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -67,7 +65,7 @@ public class WordDelimiterFilterFactory extends TokenFilterFactory implements Re
String types = args.get(TYPES);
if (types != null) {
try {
List<String> files = StrUtils.splitFileNames( types );
List<String> files = splitFileNames( types );
List<String> wlist = new ArrayList<String>();
for( String file : files ){
List<String> lines = loader.getLines( file.trim() );

View File

@ -23,8 +23,8 @@ import java.util.Set;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.util.CharArraySet;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.solr.analysis.CommonGramsFilterFactory;
import org.apache.solr.analysis.StopFilterFactory;
import org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory;
import org.apache.lucene.analysis.core.StopFilterFactory;
import org.apache.solr.analysis.TokenizerChain;
import org.apache.solr.schema.IndexSchema;
import org.carrot2.core.LanguageCode;

View File

@ -20,8 +20,8 @@ package org.apache.solr.core;
import junit.framework.Assert;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.analysis.KeywordTokenizerFactory;
import org.apache.solr.analysis.NGramFilterFactory;
import org.apache.lucene.analysis.core.KeywordTokenizerFactory;
import org.apache.lucene.analysis.ngram.NGramFilterFactory;
import org.apache.solr.common.SolrException;
import org.apache.solr.handler.admin.LukeRequestHandler;
import org.apache.solr.handler.component.FacetComponent;

View File

@ -18,6 +18,12 @@ package org.apache.solr.schema;
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.charfilter.MappingCharFilterFactory;
import org.apache.lucene.analysis.core.KeywordTokenizerFactory;
import org.apache.lucene.analysis.core.LowerCaseFilterFactory;
import org.apache.lucene.analysis.core.WhitespaceTokenizerFactory;
import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory;
import org.apache.lucene.analysis.miscellaneous.TrimFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.analysis.*;