SOLR-11592: Add OpenNLP language detection to the langid contrib

This commit is contained in:
Steve Rowe 2018-01-17 11:29:17 -05:00
parent 5e2ef5eb73
commit 03095ce4d2
15 changed files with 476 additions and 73 deletions

View File

@ -31,5 +31,6 @@
<orderEntry type="module" module-name="lucene-core" />
<orderEntry type="module" module-name="solr-core" />
<orderEntry type="module" module-name="solrj" />
<orderEntry type="module" module-name="analysis-common" />
</component>
</module>

View File

@ -94,6 +94,8 @@ New Features
* SOLR-11810: Upgrade Jetty to 9.4.8.v20171121 (Varun Thacker, Erick Erickson)
* SOLR-11592: Add OpenNLP language detection to the langid contrib. (Koji, Steve Rowe)
Bug Fixes
----------------------

View File

@ -19,3 +19,4 @@ Dependencies
------------
The Tika detector depends on Tika Core (which is part of extraction contrib)
The Langdetect detector depends on LangDetect library
The OpenNLP detector depends on OpenNLP tools and requires a previously trained user-supplied model

View File

@ -25,6 +25,17 @@
<import file="../contrib-build.xml"/>
<property name="test.model.dir" location="${tests.userdir}/langid/solr/collection1/conf"/>
<property name="test.leipzig.folder.link" value="http://pcai056.informatik.uni-leipzig.de/downloads/corpora"/>
<property name="test.build.models.dir" location="${build.dir}/build-test-models"/>
<property name="test.build.models.data.dir" location="${test.build.models.dir}/data"/>
<property name="test.build.models.sentences.dir" location="${test.build.models.dir}/train"/>
<property name="test.opennlp.model" value="opennlp-langdetect.eng-swe-spa-rus-deu.bin"/>
<path id="opennlp.jars">
<fileset dir="lib" includes="opennlp*.jar"/>
</path>
<path id="classpath">
<fileset dir="../extraction/lib" excludes="${common.classpath.excludes}"/>
<fileset dir="lib" excludes="${common.classpath.excludes}"/>
@ -39,4 +50,53 @@
</target>
<target name="compile-core" depends="resolve-extraction-libs,solr-contrib-build.compile-core"/>
<!--
Create test models using data for five languages from the Leipzig corpora.
See http://opennlp.apache.org/docs/1.8.3/manual/opennlp.html#tools.langdetect.training.leipzig
-->
<target name="train-test-models" description="Train small test models for unit tests" depends="resolve">
<download-leipzig language.code="eng"/>
<download-leipzig language.code="swe"/>
<download-leipzig language.code="spa"/>
<download-leipzig language.code="rus"/>
<download-leipzig language.code="deu"/>
<echo message="Train OpenNLP test model over data from the Leipzig corpora"/>
<java classname="opennlp.tools.cmdline.CLI" classpathref="opennlp.jars" fork="true" failonerror="true">
<arg value="LanguageDetectorTrainer.leipzig"/>
<arg value="-model"/>
<arg value="${test.model.dir}/${test.opennlp.model}"/>
<arg value="-params"/>
<arg value="${tests.userdir}/opennlp.langdetect.trainer.params.txt"/>
<arg value="-sentencesDir"/>
<arg value="${test.build.models.sentences.dir}"/>
<arg value="-sentencesPerSample"/>
<arg value="3"/>
<arg value="-samplesPerLanguage"/>
<arg value="10000"/>
</java>
</target>
<macrodef name="download-leipzig">
<attribute name="language.code"/>
<attribute name="leipzig.tarball" default="@{language.code}_news_2007_30K.tar.gz"/>
<sequential>
<mkdir dir="${test.build.models.data.dir}"/>
<get src="${test.leipzig.folder.link}/@{leipzig.tarball}" dest="${test.build.models.data.dir}"/>
<untar compression="gzip" src="${test.build.models.data.dir}/@{leipzig.tarball}"
dest="${test.build.models.sentences.dir}">
<patternset>
<include name="*-sentences.txt"/>
</patternset>
</untar>
</sequential>
</macrodef>
<target name="regenerate" depends="train-test-models"/>
</project>

View File

@ -25,6 +25,7 @@
<dependencies>
<dependency org="com.cybozu.labs" name="langdetect" rev="${/com.cybozu.labs/langdetect}" conf="compile"/>
<dependency org="net.arnx" name="jsonic" rev="${/net.arnx/jsonic}" conf="compile"/>
<dependency org="org.apache.opennlp" name="opennlp-tools" rev="${/org.apache.opennlp/opennlp-tools}" conf="compile"/>
<exclude org="*" ext="*" matcher="regexp" type="${ivy.exclude.types}"/>
</dependencies>

View File

@ -33,6 +33,7 @@ import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -399,4 +400,67 @@ public abstract class LanguageIdentifierUpdateProcessor extends UpdateRequestPro
this.enabled = enabled;
}
/**
* Concatenates content from multiple fields
*/
protected String concatFields(SolrInputDocument doc) {
StringBuilder sb = new StringBuilder(getExpectedSize(doc, inputFields));
for (String fieldName : inputFields) {
log.debug("Appending field " + fieldName);
if (doc.containsKey(fieldName)) {
Collection<Object> fieldValues = doc.getFieldValues(fieldName);
if (fieldValues != null) {
for (Object content : fieldValues) {
if (content instanceof String) {
String stringContent = (String) content;
if (stringContent.length() > maxFieldValueChars) {
sb.append(stringContent.substring(0, maxFieldValueChars));
} else {
sb.append(stringContent);
}
sb.append(" ");
if (sb.length() > maxTotalChars) {
sb.setLength(maxTotalChars);
break;
}
} else {
log.warn("Field " + fieldName + " not a String value, not including in detection");
}
}
}
}
}
return sb.toString();
}
/**
* Calculate expected string size.
*
* @param doc solr input document
* @param fields fields to select
* @return expected size of string value
*/
private int getExpectedSize(SolrInputDocument doc, String[] fields) {
int docSize = 0;
for (String field : fields) {
if (doc.containsKey(field)) {
Collection<Object> contents = doc.getFieldValues(field);
if (contents != null) {
for (Object content : contents) {
if (content instanceof String) {
docSize += Math.min(((String) content).length(), maxFieldValueChars);
}
}
if (docSize > maxTotalChars) {
docSize = maxTotalChars;
break;
}
}
}
}
return docSize;
}
}

View File

@ -0,0 +1,80 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.update.processor;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import opennlp.tools.langdetect.Language;
import opennlp.tools.langdetect.LanguageDetectorME;
import opennlp.tools.langdetect.LanguageDetectorModel;
/**
* Identifies the language of a set of input fields using <a href="https://opennlp.apache.org/">Apache OpenNLP</a>.
* <p>
* See "Language Detector" section of
* <a href="https://opennlp.apache.org/docs/1.8.3/manual/opennlp.html">https://opennlp.apache.org/docs/1.8.3/manual/opennlp.html</a>
*/
public class OpenNLPLangDetectUpdateProcessor extends LanguageIdentifierUpdateProcessor {
private final LanguageDetectorModel model;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
/** Maps ISO 639-3 (3-letter language code) to ISO 639-1 (2-letter language code) */
private static final Map<String,String> ISO639_MAP = make_ISO639_map();
public OpenNLPLangDetectUpdateProcessor(SolrQueryRequest req, SolrQueryResponse rsp,
UpdateRequestProcessor next, LanguageDetectorModel model) {
super(req, rsp, next);
this.model = model;
}
@Override
protected List<DetectedLanguage> detectLanguage(SolrInputDocument doc) {
List<DetectedLanguage> languages = new ArrayList<>();
String content = concatFields(doc);
if (content.length() != 0) {
LanguageDetectorME ldme = new LanguageDetectorME(model);
Language[] langs = ldme.predictLanguages(content);
for(Language language: langs){
languages.add(new DetectedLanguage(ISO639_MAP.get(language.getLang()), language.getConfidence()));
}
} else {
log.debug("No input text to detect language from, returning empty list");
}
return languages;
}
private static Map<String,String> make_ISO639_map() {
Map<String,String> map = new HashMap<>();
for (String lang : Locale.getISOLanguages()) {
Locale locale = new Locale(lang);
map.put(locale.getISO3Language(), locale.getLanguage());
}
return map;
}
}

View File

@ -0,0 +1,130 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.update.processor;
import java.io.IOException;
import java.io.InputStream;
import org.apache.commons.io.IOUtils;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.util.SolrPluginUtils;
import org.apache.solr.util.plugin.SolrCoreAware;
import opennlp.tools.langdetect.LanguageDetectorModel;
/**
* Identifies the language of a set of input fields using <a href="https://opennlp.apache.org/">Apache OpenNLP</a>.
* <p>
* The UpdateProcessorChain config entry can take a number of parameters
* which may also be passed as HTTP parameters on the update request
* and override the defaults. Here is the simplest processor config possible:
*
* <pre class="prettyprint" >
* &lt;processor class=&quot;org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory&quot;&gt;
* &lt;str name=&quot;langid.fl&quot;&gt;title,text&lt;/str&gt;
* &lt;str name=&quot;langid.langField&quot;&gt;language_s&lt;/str&gt;
* &lt;str name="langid.model"&gt;langdetect-183.bin&lt;/str&gt;
* &lt;/processor&gt;
* </pre>
* See <a href="http://wiki.apache.org/solr/LanguageDetection">http://wiki.apache.org/solr/LanguageDetection</a>
*/
public class OpenNLPLangDetectUpdateProcessorFactory extends UpdateRequestProcessorFactory
implements SolrCoreAware {
private static final String MODEL_PARAM = "langid.model";
private String modelFile;
private LanguageDetectorModel model;
protected SolrParams defaults;
protected SolrParams appends;
protected SolrParams invariants;
private SolrResourceLoader solrResourceLoader;
@Override
public void init( NamedList args )
{
if (args != null) {
Object o;
o = args.get("defaults");
if (o != null && o instanceof NamedList) {
defaults = SolrParams.toSolrParams((NamedList) o);
} else {
defaults = SolrParams.toSolrParams(args);
}
o = args.get("appends");
if (o != null && o instanceof NamedList) {
appends = SolrParams.toSolrParams((NamedList) o);
}
o = args.get("invariants");
if (o != null && o instanceof NamedList) {
invariants = SolrParams.toSolrParams((NamedList) o);
}
// Look for model filename in invariants, then in args, then defaults
if (invariants != null) {
modelFile = invariants.get(MODEL_PARAM);
}
if (modelFile == null) {
o = args.get(MODEL_PARAM);
if (o != null && o instanceof String) {
modelFile = (String)o;
} else {
modelFile = defaults.get(MODEL_PARAM);
if (modelFile == null) {
throw new RuntimeException("Couldn't load language model, will return empty languages always!");
}
}
}
}
}
@Override
public UpdateRequestProcessor getInstance(SolrQueryRequest req, SolrQueryResponse rsp, UpdateRequestProcessor next) {
// Process defaults, appends and invariants if we got a request
if (req != null) {
SolrPluginUtils.setDefaults(req, defaults, appends, invariants);
}
return new OpenNLPLangDetectUpdateProcessor(req, rsp, next, model);
}
private void loadModel() throws IOException {
InputStream is = null;
try{
if (modelFile != null) {
is = solrResourceLoader.openResource(modelFile);
model = new LanguageDetectorModel(is);
}
}
finally{
IOUtils.closeQuietly(is);
}
}
@Override
public void inform(SolrCore core){
solrResourceLoader = core.getResourceLoader();
try {
loadModel();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View File

@ -28,8 +28,6 @@ import org.apache.solr.common.SolrInputDocument;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
/**
* Identifies the language of a set of input fields using Tika's
* LanguageIdentifier.
@ -67,67 +65,4 @@ public class TikaLanguageIdentifierUpdateProcessor extends LanguageIdentifierUpd
}
return languages;
}
/**
* Concatenates content from multiple fields
*/
protected String concatFields(SolrInputDocument doc) {
StringBuilder sb = new StringBuilder(getExpectedSize(doc, inputFields));
for (String fieldName : inputFields) {
log.debug("Appending field " + fieldName);
if (doc.containsKey(fieldName)) {
Collection<Object> fieldValues = doc.getFieldValues(fieldName);
if (fieldValues != null) {
for (Object content : fieldValues) {
if (content instanceof String) {
String stringContent = (String) content;
if (stringContent.length() > maxFieldValueChars) {
sb.append(stringContent.substring(0, maxFieldValueChars));
} else {
sb.append(stringContent);
}
sb.append(" ");
if (sb.length() > maxTotalChars) {
sb.setLength(maxTotalChars);
break;
}
} else {
log.warn("Field " + fieldName + " not a String value, not including in detection");
}
}
}
}
}
return sb.toString();
}
/**
* Calculate expected string size.
*
* @param doc solr input document
* @param fields fields to select
* @return expected size of string value
*/
private int getExpectedSize(SolrInputDocument doc, String[] fields) {
int docSize = 0;
for (String field : fields) {
if (doc.containsKey(field)) {
Collection<Object> contents = doc.getFieldValues(field);
if (contents != null) {
for (Object content : contents) {
if (content instanceof String) {
docSize += Math.min(((String) content).length(), maxFieldValueChars);
}
}
if (docSize > maxTotalChars) {
docSize = maxTotalChars;
break;
}
}
}
}
return docSize;
}
}

View File

@ -57,11 +57,11 @@
<requestHandler name="/update" class="solr.UpdateRequestHandler" >
<lst name="defaults">
<str name="update.chain">lang_id</str>
<str name="update.chain">lang_id_tika</str>
</lst>
</requestHandler>
<updateRequestProcessorChain name="lang_id">
<updateRequestProcessorChain name="lang_id_tika">
<processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
@ -78,7 +78,7 @@
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="lang_id_alt">
<updateRequestProcessorChain name="lang_id_lang_detect">
<processor class="org.apache.solr.update.processor.LangDetectLanguageIdentifierUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
@ -95,4 +95,21 @@
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="lang_id_opennlp">
<processor class="org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory">
<!-- Can take defaults, invariants and appends just like req handlers-->
<lst name="defaults">
<bool name="langid">true</bool>
<str name="langid.fl">name,subject</str>
<bool name="langid.map">true</bool>
<str name="langid.langField">language_s</str>
<str name="langid.langsField">language_sm</str>
<str name="langid.map.lcmap">th:thai</str>
<float name="threshold">0.3</float>
<str name="langid.model">opennlp-langdetect.eng-swe-spa-rus-deu.bin</str>
</lst>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
</config>

View File

@ -0,0 +1,17 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Algorithm=PERCEPTRON
Cutoff=0

View File

@ -38,7 +38,11 @@ public abstract class LanguageIdentifierUpdateProcessorFactoryTestCase extends S
public static void beforeClass() throws Exception {
initCore("solrconfig-languageidentifier.xml", "schema.xml", getFile("langid/solr").getAbsolutePath());
SolrCore core = h.getCore();
UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("lang_id");
UpdateRequestProcessorChain chained = core.getUpdateProcessingChain("lang_id_tika");
assertNotNull(chained);
chained = core.getUpdateProcessingChain("lang_id_lang_detect");
assertNotNull(chained);
chained = core.getUpdateProcessingChain("lang_id_opennlp");
assertNotNull(chained);
}

View File

@ -0,0 +1,66 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.update.processor;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.request.SolrQueryRequest;
import org.junit.Test;
public class OpenNLPLangDetectUpdateProcessorFactoryTest extends LanguageIdentifierUpdateProcessorFactoryTestCase {
private static final String TEST_MODEL = "opennlp-langdetect.eng-swe-spa-rus-deu.bin";
@Override
protected OpenNLPLangDetectUpdateProcessor createLangIdProcessor(ModifiableSolrParams parameters) throws Exception {
if (parameters.get("langid.model") == null) { // handle superclass tests that don't provide the model filename
parameters.set("langid.model", TEST_MODEL);
}
if (parameters.get("langid.threshold") == null) { // handle superclass tests that don't provide confidence threshold
parameters.set("langid.threshold", "0.3");
}
SolrQueryRequest req = _parser.buildRequestFrom(h.getCore(), new ModifiableSolrParams(), null);
OpenNLPLangDetectUpdateProcessorFactory factory = new OpenNLPLangDetectUpdateProcessorFactory();
factory.init(parameters.toNamedList());
factory.inform(h.getCore());
return (OpenNLPLangDetectUpdateProcessor)factory.getInstance(req, resp, null);
}
// this one actually works better it seems with short docs
@Override
protected SolrInputDocument tooShortDoc() {
SolrInputDocument doc = new SolrInputDocument();
doc.addField("text", "");
return doc;
}
@Test @Override
public void testLangIdGlobal() throws Exception {
ModifiableSolrParams parameters = new ModifiableSolrParams();
parameters.add("langid.fl", "name,subject");
parameters.add("langid.langField", "language_s");
parameters.add("langid.model", TEST_MODEL);
parameters.add("langid.threshold", "0.3");
liProcessor = createLangIdProcessor(parameters);
assertLang("en", "id", "1en", "name", "Lucene", "subject", "Apache Lucene is a free/open source information retrieval software library, originally created in Java by Doug Cutting. It is supported by the Apache Software Foundation and is released under the Apache Software License.");
assertLang("sv", "id", "2sv", "name", "Maven", "subject", "Apache Maven är ett verktyg utvecklat av Apache Software Foundation och används inom systemutveckling av datorprogram i programspråket Java. Maven används för att automatiskt paketera (bygga) programfilerna till en distribuerbar enhet. Maven används inom samma område som Apache Ant men dess byggfiler är deklarativa till skillnad ifrån Ants skriptbaserade.");
assertLang("es", "id", "3es", "name", "Lucene", "subject", "Lucene es un API de código abierto para recuperación de información, originalmente implementada en Java por Doug Cutting. Está apoyado por el Apache Software Foundation y se distribuye bajo la Apache Software License. Lucene tiene versiones para otros lenguajes incluyendo Delphi, Perl, C#, C++, Python, Ruby y PHP.");
assertLang("ru", "id", "4ru", "name", "Lucene", "subject", "The Apache Lucene — это свободная библиотека для высокоскоростного полнотекстового поиска, написанная на Java. Может быть использована для поиска в интернете и других областях компьютерной лингвистики (аналитическая философия).");
assertLang("de", "id", "5de", "name", "Lucene", "subject", "Lucene ist ein Freie-Software-Projekt der Apache Software Foundation, das eine Suchsoftware erstellt. Durch die hohe Leistungsfähigkeit und Skalierbarkeit können die Lucene-Werkzeuge für beliebige Projektgrößen und Anforderungen eingesetzt werden. So setzt beispielsweise Wikipedia Lucene für die Volltextsuche ein. Zudem verwenden die beiden Desktop-Suchprogramme Beagle und Strigi eine C#- bzw. C++- Portierung von Lucene als Indexer.");
}
}

View File

@ -18,12 +18,13 @@
Solr can identify languages and map text to language-specific fields during indexing using the `langid` UpdateRequestProcessor.
Solr supports two implementations of this feature:
Solr supports three implementations of this feature:
* Tika's language detection feature: http://tika.apache.org/0.10/detection.html
* LangDetect language detection: https://github.com/shuyo/language-detection
* OpenNLP language detection: http://opennlp.apache.org/docs/1.8.4/manual/opennlp.html#tools.langdetect
You can see a comparison between the two implementations here: http://blog.mikemccandless.com/2011/10/accuracy-and-performance-of-googles.html. In general, the LangDetect implementation supports more languages with higher performance.
You can see a comparison between the Tika and LangDetect implementations here: http://blog.mikemccandless.com/2011/10/accuracy-and-performance-of-googles.html. In general, the LangDetect implementation supports more languages with higher performance.
For specific information on each of these language identification implementations, including a list of supported languages for each, see the relevant project websites.
@ -61,6 +62,30 @@ Here is an example of a minimal LangDetect `langid` configuration in `solrconfig
</processor>
----
=== Configuring OpenNLP Language Detection
Here is an example of a minimal OpenNLP `langid` configuration in `solrconfig.xml`:
[source,xml]
----
<processor class="org.apache.solr.update.processor.OpenNLPLangDetectUpdateProcessorFactory">
<lst name="defaults">
<str name="langid.fl">title,subject,text,keywords</str>
<str name="langid.langField">language_s</str>
<str name="langid.model">langdetect-183.bin</str>
</lst>
</processor>
----
==== OpenNLP-specific parameters
`langid.model`::
An OpenNLP language detection model. The OpenNLP project provides a pre-trained 103 language model on the http://opennlp.apache.org/models.html[OpenNLP site's model dowload page]. Model training instructions are provided on the http://opennlp.apache.org/docs/1.8.4/manual/opennlp.html#tools.langdetect[OpenNLP website]. This parameter is required.
==== OpenNLP language codes
`OpenNLPLangDetectUpdateProcessor` automatically converts the 3-letter ISO 639-3 codes detected by the OpenNLP model into 2-letter ISO 639-1 codes.
== langid Parameters
As previously mentioned, both implementations of the `langid` UpdateRequestProcessor take the same parameters.