SOLR-6022: Rename getAnalyzer() to getIndexAnalyzer()

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1592076 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Ryan Ernst 2014-05-02 22:34:25 +00:00
parent 4bd9f8b0eb
commit ad37014a3a
32 changed files with 96 additions and 109 deletions

View File

@ -48,6 +48,9 @@ Upgrading from Solr 4.x
corresponding Trie-based field type and then re-index. See SOLR-5936 for more
information.
* getAnalyzer() in IndexSchema and FieldType that was deprecated in Solr 4.9 has
been removed. Use getIndexAnalyzer() instead. See SOLR-6022 for more information.
Detailed Change List
----------------------
@ -149,6 +152,10 @@ Other Changes
* SOLR-6013: Fix method visibility of Evaluator, refactor DateFormatEvaluator for
extensibility. (Aaron LaBella via shalin)
* SOLR-6022: Deprecate getAnalyzer() in IndexField and FieldType, and add getIndexAnalyzer().
(Ryan Ernst)
Build
---------------------

View File

@ -226,7 +226,7 @@ public class ICUCollationField extends FieldType {
}
@Override
public Analyzer getAnalyzer() {
public Analyzer getIndexAnalyzer() {
return analyzer;
}

View File

@ -88,7 +88,7 @@ public class SolrStopwordsCarrot2LexicalDataFactory implements ILexicalDataFacto
// of this class are not used by multiple threads at a time.
if (!solrStopWords.containsKey(fieldName)) {
final Analyzer fieldAnalyzer = core.getLatestSchema().getFieldType(fieldName)
.getAnalyzer();
.getIndexAnalyzer();
if (fieldAnalyzer instanceof TokenizerChain) {
final TokenFilterFactory[] filterFactories = ((TokenizerChain) fieldAnalyzer)
.getTokenFilterFactories();

View File

@ -79,7 +79,7 @@ public final class TokenizeTextBuilder implements CommandBuilder {
if (fieldType == null) {
throw new MorphlineCompilationException("Missing Solr field type in schema.xml for name: " + solrFieldType, config);
}
this.analyzer = fieldType.getAnalyzer();
this.analyzer = fieldType.getIndexAnalyzer();
Preconditions.checkNotNull(analyzer);
try { // register CharTermAttribute for later (implicit) reuse
this.token = analyzer.tokenStream("content", reader).addAttribute(CharTermAttribute.class);

View File

@ -264,7 +264,7 @@ public class SolrConfig extends Config {
// TODO: WTF is up with queryConverter???
// it aparently *only* works as a singleton? - SOLR-4304
// and even then -- only if there is a single SpellCheckComponent
// because of queryConverter.setAnalyzer
// because of queryConverter.setIndexAnalyzer
loadPluginInfo(QueryConverter.class,"queryConverter",
REQUIRE_NAME, REQUIRE_CLASS);

View File

@ -237,7 +237,7 @@ public class DocumentAnalysisRequestHandler extends AnalysisRequestHandlerBase {
}
}
Analyzer analyzer = fieldType.getAnalyzer();
Analyzer analyzer = fieldType.getIndexAnalyzer();
AnalysisContext analysisContext = new AnalysisContext(fieldType, analyzer, termsToMatch);
Collection<Object> fieldValues = document.getFieldValues(name);
NamedList<NamedList<? extends Object>> indexTokens

View File

@ -217,7 +217,7 @@ public class FieldAnalysisRequestHandler extends AnalysisRequestHandlerBase {
NamedList<NamedList> analyzeResults = new SimpleOrderedMap<>();
if (analysisRequest.getFieldValue() != null) {
AnalysisContext context = new AnalysisContext(fieldName, fieldType, fieldType.getAnalyzer(), termsToMatch);
AnalysisContext context = new AnalysisContext(fieldName, fieldType, fieldType.getIndexAnalyzer(), termsToMatch);
NamedList analyzedTokens = analyzeValue(analysisRequest.getFieldValue(), context);
analyzeResults.add("index", analyzedTokens);
}

View File

@ -296,7 +296,7 @@ public class MoreLikeThisHandler extends RequestHandlerBase
this.mlt = new MoreLikeThis( reader ); // TODO -- after LUCENE-896, we can use , searcher.getSimilarity() );
mlt.setFieldNames(fields);
mlt.setAnalyzer( searcher.getSchema().getAnalyzer() );
mlt.setAnalyzer( searcher.getSchema().getIndexAnalyzer() );
// configurable params

View File

@ -26,14 +26,12 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.*;
import org.apache.lucene.index.FieldInfo.IndexOptions;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.PriorityQueue;
@ -426,7 +424,7 @@ public class LukeRequestHandler extends RequestHandlerBase
field.add("fields", typeusemap.get( ft.getTypeName() ) );
field.add("tokenized", ft.isTokenized() );
field.add("className", ft.getClass().getName());
field.add("indexAnalyzer", getAnalyzerInfo(ft.getAnalyzer()));
field.add("indexAnalyzer", getAnalyzerInfo(ft.getIndexAnalyzer()));
field.add("queryAnalyzer", getAnalyzerInfo(ft.getQueryAnalyzer()));
field.add("similarity", getSimilarityInfo(ft.getSimilarity()));
types.add( ft.getTypeName(), field );
@ -521,8 +519,8 @@ public class LukeRequestHandler extends RequestHandlerBase
if (f == uniqueField){
field.add("uniqueKey", true);
}
if (ft.getAnalyzer().getPositionIncrementGap(f.getName()) != 0) {
field.add("positionIncrementGap", ft.getAnalyzer().getPositionIncrementGap(f.getName()));
if (ft.getIndexAnalyzer().getPositionIncrementGap(f.getName()) != 0) {
field.add("positionIncrementGap", ft.getIndexAnalyzer().getPositionIncrementGap(f.getName()));
}
field.add("copyDests", toListOfStringDests(schema.getCopyFieldsList(f.getName())));
field.add("copySources", schema.getCopySources(f.getName()));

View File

@ -20,8 +20,6 @@ import org.apache.lucene.analysis.CachingTokenFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.StorableField;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.search.Query;
@ -639,7 +637,7 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf
private TokenStream createAnalyzerTStream(IndexSchema schema, String fieldName, String docText) throws IOException {
TokenStream tstream;
TokenStream ts = schema.getAnalyzer().tokenStream(fieldName, docText);
TokenStream ts = schema.getIndexAnalyzer().tokenStream(fieldName, docText);
ts.reset();
tstream = new TokenOrderingFilter(ts, 10);
return tstream;

View File

@ -187,7 +187,7 @@ public class PostingsSolrHighlighter extends SolrHighlighter implements PluginIn
@Override
protected Analyzer getIndexAnalyzer(String field) {
if (params.getFieldBool(field, HighlightParams.HIGHLIGHT_MULTI_TERM, false)) {
return schema.getAnalyzer();
return schema.getIndexAnalyzer();
} else {
return null;
}

View File

@ -677,7 +677,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
return fac;
}
Analyzer a = fieldType.getAnalyzer();
Analyzer a = fieldType.getIndexAnalyzer();
if (a instanceof TokenizerChain) {
// examine the indexing analysis chain if it supports leading wildcards
TokenizerChain tc = (TokenizerChain)a;

View File

@ -98,7 +98,7 @@ public class BoolField extends PrimitiveFieldType {
@Override
public Analyzer getAnalyzer() {
public Analyzer getIndexAnalyzer() {
return boolAnalyzer;
}

View File

@ -201,7 +201,7 @@ public class CollationField extends FieldType {
}
@Override
public Analyzer getAnalyzer() {
public Analyzer getIndexAnalyzer() {
return analyzer;
}

View File

@ -58,7 +58,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -167,7 +166,7 @@ public abstract class FieldType extends FieldProperties {
String positionInc = initArgs.get(POSITION_INCREMENT_GAP);
if (positionInc != null) {
Analyzer analyzer = getAnalyzer();
Analyzer analyzer = getIndexAnalyzer();
if (analyzer instanceof SolrAnalyzer) {
((SolrAnalyzer)analyzer).setPositionIncrementGap(Integer.parseInt(positionInc));
} else {
@ -214,7 +213,7 @@ public abstract class FieldType extends FieldProperties {
public String toString() {
return typeName + "{class=" + this.getClass().getName()
// + propertiesToString(properties)
+ (analyzer != null ? ",analyzer=" + analyzer.getClass().getName() : "")
+ (indexAnalyzer != null ? ",analyzer=" + indexAnalyzer.getClass().getName() : "")
+ ",args=" + args
+"}";
}
@ -481,23 +480,9 @@ public abstract class FieldType extends FieldProperties {
}
}
/**
* Analyzer set by schema for text types to use when indexing fields
* of this type, subclasses can set analyzer themselves or override
* getAnalyzer()
* @see #getAnalyzer
* @see #setAnalyzer
*/
protected Analyzer analyzer=new DefaultAnalyzer(256);
private Analyzer indexAnalyzer = new DefaultAnalyzer(256);
/**
* Analyzer set by schema for text types to use when searching fields
* of this type, subclasses can set analyzer themselves or override
* getAnalyzer()
* @see #getQueryAnalyzer
* @see #setQueryAnalyzer
*/
protected Analyzer queryAnalyzer=analyzer;
private Analyzer queryAnalyzer = indexAnalyzer;
/**
* Returns the Analyzer to be used when indexing fields of this type.
@ -506,8 +491,8 @@ public abstract class FieldType extends FieldProperties {
* </p>
* @see #getQueryAnalyzer
*/
public Analyzer getAnalyzer() {
return analyzer;
public Analyzer getIndexAnalyzer() {
return indexAnalyzer;
}
/**
@ -515,52 +500,65 @@ public abstract class FieldType extends FieldProperties {
* <p>
* This method may be called many times, at any time.
* </p>
* @see #getAnalyzer
* @see #getIndexAnalyzer
*/
public Analyzer getQueryAnalyzer() {
return queryAnalyzer;
}
/**
* Returns true if this type supports index and query analyzers, false otherwise.
*/
protected boolean supportsAnalyzers() {
return false;
}
/**
* Sets the Analyzer to be used when indexing fields of this type.
*
* <p>
* The default implementation throws a SolrException.
* Subclasses that override this method need to ensure the behavior
* of the analyzer is consistent with the implementation of toInternal.
* Subclasses should override {@link #supportsAnalyzers()} to
* enable this function.
* </p>
*
* @see #toInternal
* @see #supportsAnalyzers()
* @see #setQueryAnalyzer
* @see #getAnalyzer
* @see #getIndexAnalyzer
*/
public void setAnalyzer(Analyzer analyzer) {
public final void setIndexAnalyzer(Analyzer analyzer) {
if (supportsAnalyzers()) {
indexAnalyzer = analyzer;
} else {
throw new SolrException
(ErrorCode.SERVER_ERROR,
"FieldType: " + this.getClass().getSimpleName() +
" (" + typeName + ") does not support specifying an analyzer");
}
}
/**
* Sets the Analyzer to be used when querying fields of this type.
*
* <p>
* The default implementation throws a SolrException.
* Subclasses that override this method need to ensure the behavior
* of the analyzer is consistent with the implementation of toInternal.
* Subclasses should override {@link #supportsAnalyzers()} to
* enable this function.
* </p>
*
* @see #toInternal
* @see #setAnalyzer
* @see #supportsAnalyzers()
* @see #setIndexAnalyzer
* @see #getQueryAnalyzer
*/
public void setQueryAnalyzer(Analyzer analyzer) {
public final void setQueryAnalyzer(Analyzer analyzer) {
if (supportsAnalyzers()) {
queryAnalyzer = analyzer;
} else {
throw new SolrException
(ErrorCode.SERVER_ERROR,
"FieldType: " + this.getClass().getSimpleName() +
" (" + typeName + ") does not support specifying an analyzer");
}
}
/** @lucene.internal */
protected SimilarityFactory similarityFactory;
@ -845,7 +843,7 @@ public abstract class FieldType extends FieldProperties {
if (isExplicitAnalyzer()) {
String analyzerProperty = isExplicitQueryAnalyzer() ? INDEX_ANALYZER : ANALYZER;
namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getAnalyzer()));
namedPropertyValues.add(analyzerProperty, getAnalyzerProperties(getIndexAnalyzer()));
}
if (isExplicitQueryAnalyzer()) {
String analyzerProperty = isExplicitAnalyzer() ? QUERY_ANALYZER : ANALYZER;

View File

@ -116,7 +116,7 @@ public final class FieldTypePluginLoader
}
if (null != analyzer) {
ft.setAnalyzer(analyzer);
ft.setIndexAnalyzer(analyzer);
ft.setQueryAnalyzer(queryAnalyzer);
if (ft instanceof TextField) {
if (null == multiAnalyzer) {

View File

@ -51,7 +51,6 @@ import javax.xml.xpath.XPath;
import javax.xml.xpath.XPathConstants;
import javax.xml.xpath.XPathExpressionException;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
@ -128,7 +127,7 @@ public class IndexSchema {
protected volatile DynamicField[] dynamicFields;
public DynamicField[] getDynamicFields() { return dynamicFields; }
private Analyzer analyzer;
private Analyzer indexAnalyzer;
private Analyzer queryAnalyzer;
protected List<SchemaAware> schemaAware = new ArrayList<>();
@ -276,7 +275,7 @@ public class IndexSchema {
* a field specific Analyzer based on the field type.
* </p>
*/
public Analyzer getAnalyzer() { return analyzer; }
public Analyzer getIndexAnalyzer() { return indexAnalyzer; }
/**
* Returns the Analyzer used when searching this index
@ -355,7 +354,7 @@ public class IndexSchema {
* @since solr 1.3
*/
public void refreshAnalyzers() {
analyzer = new SolrIndexAnalyzer();
indexAnalyzer = new SolrIndexAnalyzer();
queryAnalyzer = new SolrQueryAnalyzer();
}
@ -388,7 +387,7 @@ public class IndexSchema {
protected HashMap<String, Analyzer> analyzerCache() {
HashMap<String, Analyzer> cache = new HashMap<>();
for (SchemaField f : getFields().values()) {
Analyzer analyzer = f.getType().getAnalyzer();
Analyzer analyzer = f.getType().getIndexAnalyzer();
cache.put(f.getName(), analyzer);
}
return cache;
@ -397,7 +396,7 @@ public class IndexSchema {
@Override
protected Analyzer getWrappedAnalyzer(String fieldName) {
Analyzer analyzer = analyzers.get(fieldName);
return analyzer != null ? analyzer : getDynamicFieldType(fieldName).getAnalyzer();
return analyzer != null ? analyzer : getDynamicFieldType(fieldName).getIndexAnalyzer();
}
}

View File

@ -83,7 +83,7 @@ public class PreAnalyzedField extends FieldType {
}
@Override
public Analyzer getAnalyzer() {
public Analyzer getIndexAnalyzer() {
return new SolrAnalyzer() {
@Override
@ -96,7 +96,7 @@ public class PreAnalyzedField extends FieldType {
@Override
public Analyzer getQueryAnalyzer() {
return getAnalyzer();
return getIndexAnalyzer();
}
@Override

View File

@ -41,7 +41,7 @@ public class TextField extends FieldType {
/**
* Analyzer set by schema for text types to use when searching fields
* of this type, subclasses can set analyzer themselves or override
* getAnalyzer()
* getIndexAnalyzer()
* This analyzer is used to process wildcard, prefix, regex and other multiterm queries. It
* assembles a list of tokenizer +filters that "make sense" for this, primarily accent folding and
* lowercasing filters, and charfilters.
@ -76,7 +76,7 @@ public class TextField extends FieldType {
* <p>
* This method may be called many times, at any time.
* </p>
* @see #getAnalyzer
* @see #getIndexAnalyzer
*/
public Analyzer getMultiTermAnalyzer() {
return multiTermAnalyzer;
@ -112,16 +112,10 @@ public class TextField extends FieldType {
}
@Override
public void setAnalyzer(Analyzer analyzer) {
this.analyzer = analyzer;
protected boolean supportsAnalyzers() {
return true;
}
@Override
public void setQueryAnalyzer(Analyzer analyzer) {
this.queryAnalyzer = analyzer;
}
@Override
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
Analyzer multiAnalyzer = getMultiTermAnalyzer();

View File

@ -347,8 +347,6 @@ public class TrieDateField extends PrimitiveFieldType implements DateValueFieldT
@Override
protected void init(IndexSchema schema, Map<String, String> args) {
wrappedField.init(schema, args);
analyzer = wrappedField.analyzer;
queryAnalyzer = wrappedField.queryAnalyzer;
}
@Override

View File

@ -1230,7 +1230,7 @@ public class ExtendedDismaxQParser extends QParser {
}
TokenizerChain tcq = (TokenizerChain) qa;
Analyzer ia = ft.getAnalyzer();
Analyzer ia = ft.getIndexAnalyzer();
if (ia == qa || !(ia instanceof TokenizerChain)) {
return qa;
}

View File

@ -96,7 +96,7 @@ public class FileBasedSpellChecker extends AbstractLuceneSpellChecker {
IndexWriter writer = new IndexWriter(
ramDir,
new IndexWriterConfig(core.getSolrConfig().luceneMatchVersion, fieldType.getAnalyzer()).
new IndexWriterConfig(core.getSolrConfig().luceneMatchVersion, fieldType.getIndexAnalyzer()).
setMaxBufferedDocs(150).
setMergePolicy(mp).
setOpenMode(IndexWriterConfig.OpenMode.CREATE)

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.search.suggest.Lookup;
import org.apache.lucene.search.suggest.analyzing.AnalyzingInfixSuggester;
import org.apache.lucene.search.suggest.analyzing.AnalyzingSuggester;
import org.apache.lucene.store.FSDirectory;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
@ -76,7 +75,7 @@ public class AnalyzingInfixLookupFactory extends LookupFactory {
if (ft == null) {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getAnalyzer();
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
// optional parameters

View File

@ -87,7 +87,7 @@ public class AnalyzingLookupFactory extends LookupFactory {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getAnalyzer();
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
// optional parameters

View File

@ -74,7 +74,7 @@ public class BlendedInfixLookupFactory extends AnalyzingInfixLookupFactory {
if (ft == null) {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getAnalyzer();
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
// optional parameters

View File

@ -64,7 +64,7 @@ public class FreeTextLookupFactory extends LookupFactory {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getAnalyzer();
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
int grams = (params.get(NGRAMS) != null)

View File

@ -81,7 +81,7 @@ public class FuzzyLookupFactory extends LookupFactory {
if (ft == null) {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getAnalyzer();
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
// optional parameters

View File

@ -229,11 +229,11 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
}
if (cmd.isBlock()) {
writer.updateDocuments(updateTerm, cmd, schema.getAnalyzer());
writer.updateDocuments(updateTerm, cmd, schema.getIndexAnalyzer());
} else {
Document luceneDocument = cmd.getLuceneDocument();
// SolrCore.verbose("updateDocument",updateTerm,luceneDocument,writer);
writer.updateDocument(updateTerm, luceneDocument, schema.getAnalyzer());
writer.updateDocument(updateTerm, luceneDocument, schema.getIndexAnalyzer());
}
// SolrCore.verbose("updateDocument",updateTerm,"DONE");
@ -258,9 +258,9 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
} else {
// allow duplicates
if (cmd.isBlock()) {
writer.addDocuments(cmd, schema.getAnalyzer());
writer.addDocuments(cmd, schema.getIndexAnalyzer());
} else {
writer.addDocument(cmd.getLuceneDocument(), schema.getAnalyzer());
writer.addDocument(cmd.getLuceneDocument(), schema.getIndexAnalyzer());
}
if (ulog != null) ulog.add(cmd);
@ -437,7 +437,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
try {
IndexWriter writer = iw.get();
writer.updateDocument(idTerm, luceneDocument, cmd.getReq().getSchema()
.getAnalyzer());
.getIndexAnalyzer());
for (Query q : dbqList) {
writer.deleteDocuments(q);

View File

@ -47,12 +47,12 @@ public class TestLuceneMatchVersion extends SolrTestCaseJ4 {
final IndexSchema schema = h.getCore().getLatestSchema();
FieldType type = schema.getFieldType("textDefault");
TokenizerChain ana = (TokenizerChain) type.getAnalyzer();
TokenizerChain ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(DEFAULT_VERSION, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(DEFAULT_VERSION, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("text40");
ana = (TokenizerChain) type.getAnalyzer();
ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(Version.LUCENE_4_0, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_5_0, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
@ -61,12 +61,12 @@ public class TestLuceneMatchVersion extends SolrTestCaseJ4 {
matchVersionField.setAccessible(true);
type = schema.getFieldType("textStandardAnalyzerDefault");
Analyzer ana1 = type.getAnalyzer();
Analyzer ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof StandardAnalyzer);
assertEquals(DEFAULT_VERSION, matchVersionField.get(ana1));
type = schema.getFieldType("textStandardAnalyzer40");
ana1 = type.getAnalyzer();
ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof StandardAnalyzer);
assertEquals(Version.LUCENE_4_0, matchVersionField.get(ana1));
}

View File

@ -18,14 +18,12 @@ package org.apache.solr.analysis;
import java.io.IOException;
import java.io.StringReader;
import java.lang.reflect.Field;
import java.util.HashMap;
import java.util.Map;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.search.AutomatonQuery;
import org.apache.lucene.search.Query;
@ -82,7 +80,7 @@ public class TestReversedWildcardFilterFactory extends SolrTestCaseJ4 {
@Test
public void testIndexingAnalysis() throws Exception {
Analyzer a = schema.getAnalyzer();
Analyzer a = schema.getIndexAnalyzer();
String text = "one two three si\uD834\uDD1Ex";
// field one

View File

@ -51,7 +51,7 @@ public class MultiTermTest extends SolrTestCaseJ4 {
assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof LowerCaseFilterFactory));
}
analyzer = field.getType().getAnalyzer();
analyzer = field.getType().getIndexAnalyzer();
assertTrue(analyzer instanceof TokenizerChain);
assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof WhitespaceTokenizerFactory);
tc = (TokenizerChain) analyzer;

View File

@ -26,7 +26,6 @@ import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.schema.IndexSchema;
import org.apache.solr.schema.SchemaField;
import org.apache.solr.schema.TrieDateField;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.CursorPagingTest;
@ -35,7 +34,6 @@ import static org.apache.solr.common.params.CursorMarkParams.CURSOR_MARK_START;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Collection;
import java.util.Collections;
@ -265,7 +263,7 @@ public class CursorMarkTest extends SolrTestCaseJ4 {
private static Object getRandomCollation(SchemaField sf) throws IOException {
Object val;
Analyzer analyzer = sf.getType().getAnalyzer();
Analyzer analyzer = sf.getType().getIndexAnalyzer();
String term = TestUtil.randomRealisticUnicodeString(random());
try (TokenStream ts = analyzer.tokenStream("fake", term)) {
TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class);