LUCENE-4797: fix remaining html violations, engage linter in solr

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1658539 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2015-02-09 19:02:32 +00:00
parent 87b7393360
commit 12b30db6ce
166 changed files with 309 additions and 363 deletions

View File

@ -30,10 +30,6 @@
<property name="javac.target" value="1.8"/>
<property name="javac.args" value=""/>
<!-- for now disable doclint: -->
<property name="javadoc.doclint.args" value="-Xdoclint:none"/>
<property name="javac.doclint.args" value=""/>
<property name="dest" location="${common-solr.dir}/build" />
<property name="build.dir" location="${dest}/${ant.project.name}"/>
<property name="dist" location="${common-solr.dir}/dist"/>

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -23,10 +23,10 @@ import java.io.InputStream;
import java.io.IOException;
import java.util.Properties;
/**
* <p> A data source implementation which can be used to read binary stream from content streams. </p> <p/> <p> Refer to <a
* <p> A data source implementation which can be used to read binary stream from content streams. </p> <p> Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a> for more
* details. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 3.1

View File

@ -28,13 +28,12 @@ import java.util.Properties;
* <p>
* A DataSource which reads from local files
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 3.1

View File

@ -26,10 +26,10 @@ import java.net.URL;
import java.net.URLConnection;
import java.util.Properties;
/**
* <p> A data source implementation which can be used to read binary streams using HTTP. </p> <p/> <p> Refer to <a
* <p> A data source implementation which can be used to read binary streams using HTTP. </p> <p> Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a> for more
* details. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 3.1

View File

@ -27,10 +27,10 @@ import java.util.Map;
/**
* {@link Transformer} instance which converts a {@link Clob} to a {@link String}.
* <p/>
* <p>
* Refer to <a href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.4

View File

@ -25,10 +25,10 @@ import java.util.Properties;
/**
* A DataSource implementation which reads from the ContentStream of a POST request
* <p/>
* <p>
* Refer to <a href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.4

View File

@ -32,7 +32,7 @@ import java.util.Map;
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -35,7 +35,6 @@ public interface DIHCache extends Iterable<Map<String,Object>> {
* Opens the cache using the specified properties. The {@link Context}
* includes any parameters needed by the cache impl. This must be called
* before any read/write operations are permitted.
* <p>
*/
public void open(Context context);

View File

@ -36,7 +36,7 @@ public interface DIHWriter {
/**
* <p>
* Release resources used by this writer. After calling close, reads & updates will throw exceptions.
* Release resources used by this writer. After calling close, reads &amp; updates will throw exceptions.
* </p>
*/
public void close();

View File

@ -57,13 +57,12 @@ import org.xml.sax.InputSource;
* <p>
* It is configured in solrconfig.xml
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -19,9 +19,8 @@ package org.apache.solr.handler.dataimport;
/**
* <p> Exception class for all DataImportHandler exceptions </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
* <p/>
*
* @since solr 1.3
*/

View File

@ -62,7 +62,7 @@ import java.util.concurrent.locks.ReentrantLock;
/**
* <p> Stores all configuration information for pulling and indexing data. </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -23,17 +23,15 @@ import java.util.Properties;
* <p>
* Provides data from a source with a given query.
* </p>
* <p/>
* <p>
* Implementation of this abstract class must provide a default no-arg constructor
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3
@ -43,7 +41,7 @@ public abstract class DataSource<T> {
/**
* Initializes the DataSource with the <code>Context</code> and
* initialization properties.
* <p/>
* <p>
* This is invoked by the <code>DataImporter</code> after creating an
* instance of this class.
*/

View File

@ -47,7 +47,6 @@ import org.apache.solr.util.DateMathParser;
* <li>The {@link Locale} to parse.
* (optional. Defaults to the Root Locale) </li>
* </ul>
* </p>
*/
public class DateFormatEvaluator extends Evaluator {

View File

@ -28,13 +28,11 @@ import org.slf4j.LoggerFactory;
* <p>
* {@link Transformer} instance which creates {@link Date} instances out of {@link String}s.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -43,7 +43,7 @@ import java.util.concurrent.atomic.AtomicLong;
/**
* <p> {@link DocBuilder} is responsible for creating Solr documents out of the given configuration. It also maintains
* statistics information. It depends on the {@link EntityProcessor} implementations to fetch data. </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -23,17 +23,15 @@ import java.util.Map;
* An instance of entity processor serves an entity. It is reused throughout the
* import process.
* </p>
* <p/>
* <p>
* Implementations of this abstract class must provide a public no-args constructor.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -25,9 +25,9 @@ import org.slf4j.LoggerFactory;
import java.util.*;
/**
* <p> Base class for all implementations of {@link EntityProcessor} </p> <p/> <p> Most implementations of {@link EntityProcessor}
* <p> Base class for all implementations of {@link EntityProcessor} </p> <p> Most implementations of {@link EntityProcessor}
* extend this base class which provides common functionality. </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -31,17 +31,17 @@ import java.util.Properties;
/**
* This can be useful for users who have a DB field containing xml and wish to use a nested {@link XPathEntityProcessor}
* <p/>
* <p>
* The datasouce may be configured as follows
* <p/>
* <p>
* &lt;datasource name="f1" type="FieldReaderDataSource" /&gt;
* <p/>
* <p>
* The enity which uses this datasource must keep the url value as the variable name url="field-name"
* <p/>
* <p>
* The fieldname must be resolvable from {@link VariableResolver}
* <p/>
* <p>
* This may be used with any {@link EntityProcessor} which uses a {@link DataSource}&lt;{@link Reader}&gt; eg: {@link XPathEntityProcessor}
* <p/>
* <p>
* Supports String, BLOB, CLOB data types and there is an extra field (in the entity) 'encoding' for BLOB types
*
* @since 1.4

View File

@ -31,17 +31,16 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
/**
* This can be useful for users who have a DB field containing BLOBs which may be Rich documents
* <p/>
* <p>
* The datasouce may be configured as follows
* <p/>
* <p>
* &lt;dataSource name="f1" type="FieldStreamDataSource" /&gt;
* <p/>
* <p>
* The entity which uses this datasource must keep and attribute dataField
* <p/>
* <p>
* The fieldname must be resolvable from {@link VariableResolver}
* <p/>
* <p>
* This may be used with any {@link EntityProcessor} which uses a {@link DataSource}&lt;{@link InputStream}&gt; eg: TikaEntityProcessor
* <p/>
*
* @since 3.1
*/

View File

@ -34,13 +34,12 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
* The file is read with the default platform encoding. It can be overriden by
* specifying the encoding in solrconfig.xml
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -31,7 +31,6 @@ import org.apache.solr.util.DateMathParser;
* An {@link EntityProcessor} instance which can stream file names found in a given base
* directory matching patterns and returning rows containing file information.
* </p>
* <p/>
* <p>
* It supports querying a give base directory by matching:
* <ul>
@ -43,14 +42,12 @@ import org.apache.solr.util.DateMathParser;
* </ul>
* Its output can be used along with {@link FileDataSource} to read from files in file
* systems.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -33,10 +33,10 @@ import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
/**
* <p> A DataSource implementation which can fetch data using JDBC. </p> <p/> <p> Refer to <a
* <p> A DataSource implementation which can fetch data using JDBC. </p> <p> Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a> for more
* details. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -28,7 +28,6 @@ import org.apache.commons.io.IOUtils;
* An {@link EntityProcessor} instance which can stream lines of text read from a
* datasource. Options allow lines to be explicitly skipped or included in the index.
* </p>
* <p/>
* <p>
* Attribute summary
* <ul>
@ -39,17 +38,16 @@ import org.apache.commons.io.IOUtils;
* <li>skipLineRegex is an optional attribute that is applied after any
* acceptLineRegex and discards any line which matches this regExp.</li>
* </ul>
* </p><p>
* <p>
* Although envisioned for reading lines from a file or url, LineEntityProcessor may also be useful
* for dealing with change lists, where each line contains filenames which can be used by subsequent entities
* to parse content from those files.
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.4

View File

@ -23,10 +23,10 @@ import java.util.Map;
/**
* A {@link Transformer} implementation which logs messages in a given template format.
* <p/>
* <p>
* Refer to <a href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.4

View File

@ -25,7 +25,7 @@ import java.util.Properties;
* <p>
* A mock DataSource implementation which can be used for testing.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -33,13 +33,12 @@ import java.util.regex.Pattern;
* Number, Integer, Currency and Percent styles as supported by
* {@link NumberFormat} with configurable locales.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -27,13 +27,12 @@ import java.util.regex.Pattern;
* A {@link Transformer} implementation which uses Regular Expressions to extract, split
* and replace data in fields.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -31,13 +31,12 @@ import javax.script.ScriptException;
* A {@link Transformer} instance capable of executing functions written in scripting
* languages as a {@link Transformer} instance.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -32,7 +32,7 @@ import java.nio.charset.StandardCharsets;
/**
* <p> Writes documents to SOLR. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3

View File

@ -29,13 +29,12 @@ import java.util.regex.Pattern;
* databases. It is used in conjunction with {@link JdbcDataSource}. This is the default
* {@link EntityProcessor} if none is specified explicitly in data-config.xml
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
*

View File

@ -27,20 +27,18 @@ import org.slf4j.LoggerFactory;
* A {@link Transformer} which can put values into a column by resolving an expression
* containing other columns
* </p>
* <p/>
* <p>
* For example:<br />
* For example:<br>
* &lt;field column="name" template="${e.lastName}, ${e.firstName}
* ${e.middleName}" /&gt; will produce the name by combining values from
* lastName, firstName and middleName fields as given in the template attribute.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
*

View File

@ -22,17 +22,15 @@ import java.util.Map;
* <p>
* Use this API to implement a custom transformer for any given entity
* </p>
* <p/>
* <p>
* Implementations of this abstract class must provide a public no-args constructor.
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
*

View File

@ -30,10 +30,10 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* <p> A data source implementation which can be used to read character files using HTTP. </p> <p/> <p> Refer to <a
* <p> A data source implementation which can be used to read character files using HTTP. </p> <p> Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a> for more
* details. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
*

View File

@ -43,10 +43,10 @@ import java.util.concurrent.atomic.AtomicReference;
/**
* <p> An implementation of {@link EntityProcessor} which uses a streaming xpath parser to extract values out of XML documents.
* It is typically used in conjunction with {@link URLDataSource} or {@link FileDataSource}. </p> <p/> <p> Refer to <a
* It is typically used in conjunction with {@link URLDataSource} or {@link FileDataSource}. </p> <p> Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a> for more
* details. </p>
* <p/>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
*

View File

@ -50,10 +50,8 @@ import org.slf4j.LoggerFactory;
* This class is thread-safe for parsing xml. But adding fields is not
* thread-safe. The recommended usage is to addField() in one thread and
* then share the instance across threads.
* </p>
* <p/>
* <b>This API is experimental and may change in the future.</b>
* <p>
* <b>This API is experimental and may change in the future.</b>
*
* @since solr 1.3
*/

View File

@ -36,13 +36,12 @@ import org.w3c.dom.Element;
* <p>
* Mapping for data-config.xml
* </p>
* <p/>
* <p>
* Refer to <a
* href="http://wiki.apache.org/solr/DataImportHandler">http://wiki.apache.org/solr/DataImportHandler</a>
* for more details.
* </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
* @since solr 1.3

View File

@ -44,7 +44,7 @@ import org.junit.Before;
* <p>
* Abstract base class for DataImportHandler tests
* </p>
* <p/>
* <p>
* <b>This API is experimental and subject to change</b>
*
*

View File

@ -35,7 +35,6 @@ import org.junit.Test;
* <p>
* Test for JdbcDataSource
* </p>
* <p/>
* <p>
* Note: The tests are ignored for the lack of DB support for testing
* </p>

View File

@ -32,10 +32,7 @@ import java.util.List;
import java.util.Map;
/**
* <p>
* Test for ScriptTransformer
* </p>
* <p/>
*
*
* @since solr 1.3

View File

@ -36,7 +36,7 @@ public interface ExtractingParams {
/**
* The param prefix for mapping Tika metadata to Solr fields.
* <p/>
* <p>
* To map a field, add a name like:
* <pre>fmap.title=solr.title</pre>
*
@ -48,7 +48,7 @@ public interface ExtractingParams {
/**
* The boost value for the name of the field. The boost can be specified by a name mapping.
* <p/>
* <p>
* For example
* <pre>
* map.title=solr.title
@ -73,9 +73,8 @@ public interface ExtractingParams {
* Restrict the extracted parts of a document to be indexed
* by passing in an XPath expression. All content that satisfies the XPath expr.
* will be passed to the {@link SolrContentHandler}.
* <p/>
* <p>
* See Tika's docs for what the extracted document looks like.
* <p/>
* @see #CAPTURE_ELEMENTS
*/
public static final String XPATH_EXPRESSION = "xpath";
@ -104,11 +103,11 @@ public interface ExtractingParams {
/**
* Capture the specified fields (and everything included below it that isn't capture by some other capture field) separately from the default. This is different
* then the case of passing in an XPath expression.
* <p/>
* <p>
* The Capture field is based on the localName returned to the {@link SolrContentHandler}
* by Tika, not to be confused by the mapped field. The field name can then
* be mapped into the index schema.
* <p/>
* <p>
* For instance, a Tika document may look like:
* <pre>
* &lt;html&gt;

View File

@ -43,7 +43,6 @@ import java.util.Map;
/**
* Handler for rich documents like PDF or Word or any other file format that Tika handles that need the text to be extracted
* first from the document.
* <p/>
*/
public class ExtractingRequestHandler extends ContentStreamHandlerBase implements SolrCoreAware {

View File

@ -38,8 +38,7 @@ import java.util.*;
/**
* The class responsible for handling Tika events and translating them into {@link org.apache.solr.common.SolrInputDocument}s.
* <B>This class is not thread-safe.</B>
* <p/>
* <p/>
* <p>
* User's may wish to override this class to provide their own functionality.
*
* @see org.apache.solr.handler.extraction.SolrContentHandlerFactory
@ -313,7 +312,7 @@ public class SolrContentHandler extends DefaultHandler implements ExtractingPara
/**
* Can be used to transform input values based on their {@link org.apache.solr.schema.SchemaField}
* <p/>
* <p>
* This implementation only formats dates using the {@link org.apache.solr.common.util.DateUtil}.
*
* @param val The value to transform

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -41,7 +41,7 @@ import com.cybozu.labs.langdetect.LangDetectException;
/**
* Identifies the language of a set of input fields using
* http://code.google.com/p/language-detection
* <p/>
* <p>
* The UpdateProcessorChain config entry can take a number of parameters
* which may also be passed as HTTP parameters on the update request
* and override the defaults. Here is the simplest processor config possible:

View File

@ -28,7 +28,7 @@ import org.apache.solr.util.plugin.SolrCoreAware;
/**
* Identifies the language of a set of input fields using Tika's
* LanguageIdentifier. The tika-core-x.y.jar must be on the classpath
* <p/>
* <p>
* The UpdateProcessorChain config entry can take a number of parameters
* which may also be passed as HTTP parameters on the update request
* and override the defaults. Here is the simplest processor config possible:

View File

@ -172,7 +172,7 @@ public class JettySolrRunner {
}
/**
* Constructor taking an ordered list of additional (servlet holder -> path spec) mappings
* Constructor taking an ordered list of additional (servlet holder -&gt; path spec) mappings
* to add to the servlet context
*/
public JettySolrRunner(String solrHome, String context, int port,
@ -190,7 +190,7 @@ public class JettySolrRunner {
}
/**
* Constructor taking an ordered list of additional (filter holder -> path spec) mappings.
* Constructor taking an ordered list of additional (filter holder -&gt; path spec) mappings.
* Filters are placed after the DebugFilter but before the SolrDispatchFilter.
*/
public JettySolrRunner(String solrHome, String context, int port,

View File

@ -89,7 +89,7 @@ public class CloudUtil {
/**
* Returns a displayable unified path to the given resource. For non-solrCloud that will be the
* same as getConfigDir, but for Cloud it will be getConfigSetZkPath ending in a /
* <p/>
* <p>
* <b>Note:</b> Do not use this to generate a valid file path, but for debug printing etc
* @param loader Resource loader instance
* @return a String of path to resource

View File

@ -56,7 +56,6 @@ public class ZkSolrResourceLoader extends SolrResourceLoader {
* will delegate to the context classloader when possible,
* otherwise it will attempt to resolve resources using any jar files found in
* the "lib/" directory in the specified instance directory.
* <p>
*/
public ZkSolrResourceLoader(String instanceDir, String configSet, ClassLoader parent,
Properties coreProperties, ZkController zooKeeperController) {

View File

@ -61,7 +61,7 @@ public class AbstractSolrEventListener implements SolrEventListener {
/**
* Add the {@link org.apache.solr.common.params.EventParams#EVENT} with either the {@link org.apache.solr.common.params.EventParams#NEW_SEARCHER}
* or {@link org.apache.solr.common.params.EventParams#FIRST_SEARCHER} values depending on the value of currentSearcher.
* <p/>
* <p>
* Makes a copy of NamedList and then adds the parameters.
*
*

View File

@ -19,10 +19,10 @@ package org.apache.solr.core;
/**
* Used to request notification when the core is closed.
* <p/>
* <p>
* Call {@link org.apache.solr.core.SolrCore#addCloseHook(org.apache.solr.core.CloseHook)} during the {@link org.apache.solr.util.plugin.SolrCoreAware#inform(SolrCore)} method to
* add a close hook to your object.
* <p/>
* <p>
* The close hook can be useful for releasing objects related to the request handler (for instance, if you have a JDBC DataSource or something like that)
*/
@ -31,7 +31,7 @@ public abstract class CloseHook {
/**
* Method called when the given SolrCore object is closing / shutting down but before the update handler and
* searcher(s) are actually closed
* <br />
* <br>
* <b>Important:</b> Keep the method implementation as short as possible. If it were to use any heavy i/o , network connections -
* it might be a better idea to launch in a separate Thread so as to not to block the process of
* shutting down a given SolrCore instance.
@ -42,9 +42,9 @@ public abstract class CloseHook {
/**
* Method called when the given SolrCore object has been shut down and update handlers and searchers are closed
* <br/>
* <br>
* Use this method for post-close clean up operations e.g. deleting the index from disk.
* <br/>
* <br>
* <b>The core's passed to the method is already closed and therefore, its update handler or searcher should *NOT* be used</b>
*
* <b>Important:</b> Keep the method implementation as short as possible. If it were to use any heavy i/o , network connections -

View File

@ -100,7 +100,7 @@ public class Config {
* will be created.
* </p>
* <p>
* Consider passing a non-null 'name' parameter in all use-cases since it is used for logging & exception reporting.
* Consider passing a non-null 'name' parameter in all use-cases since it is used for logging &amp; exception reporting.
* </p>
* @param loader the resource loader used to obtain an input stream if 'is' is null
* @param name the resource name used if the input stream 'is' is null

View File

@ -264,7 +264,7 @@ public class CoreDescriptor {
* Is this property a Solr-standard property, or is it an extra property
* defined per-core by the user?
* @param propName the Property name
* @return @{code true} if this property is user-defined
* @return {@code true} if this property is user-defined
*/
protected static boolean isUserDefinedProperty(String propName) {
return !standardPropNames.contains(propName);

View File

@ -32,11 +32,11 @@ import java.util.concurrent.atomic.AtomicInteger;
/**
* A wrapper for an IndexDeletionPolicy instance.
* <p/>
* <p>
* Provides features for looking up IndexCommit given a version. Allows reserving index
* commit points for certain amounts of time to support features such as index replication
* or snapshooting directly out of a live index directory.
* <p/>
* <p>
* <b>NOTE</b>: The {@link #clone()} method returns <tt>this</tt> in order to make
* this {@link IndexDeletionPolicy} instance trackable across {@link IndexWriter}
* instantiations. This is correct because each core has its own
@ -59,7 +59,7 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
/**
* Gets the most recent commit point
* <p/>
* <p>
* It is recommended to reserve a commit point for the duration of usage so that
* it is not deleted by the underlying deletion policy
*

View File

@ -40,7 +40,6 @@ import java.util.concurrent.ConcurrentHashMap;
* Responsible for finding (or creating) a MBeanServer from given configuration
* and registering all SolrInfoMBean objects with JMX.
* </p>
* <p/>
* <p>
* Please see http://wiki.apache.org/solr/SolrJmx for instructions on usage and configuration
* </p>

View File

@ -1065,14 +1065,13 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
* <li>all CloseHooks will be notified</li>
* <li>All MBeans will be unregistered from MBeanServer if JMX was enabled
* </li>
* </ul>
* <p>
* </ul>
* <p>
* The behavior of this method is determined by the result of decrementing
* the core's reference count (A core is created with a reference count of 1)...
* </p>
* <ul>
* <li>If reference count is > 0, the usage count is decreased by 1 and no
* <li>If reference count is &gt; 0, the usage count is decreased by 1 and no
* resources are released.
* </li>
* <li>If reference count is == 0, the resources are released.

View File

@ -37,7 +37,7 @@ public interface SolrEventListener extends NamedListInitializedPlugin{
/** The searchers passed here are only guaranteed to be valid for the duration
* of this method call, so care should be taken not to spawn threads or asynchronous
* tasks with references to these searchers.
* <p/>
* <p>
* Implementations should add the {@link org.apache.solr.common.params.EventParams#EVENT} parameter and set it to a value of either:
* <ul>
* <li>{@link org.apache.solr.common.params.EventParams#FIRST_SEARCHER} - First Searcher event</li>

View File

@ -151,7 +151,6 @@ public class SolrResourceLoader implements ResourceLoader,Closeable
* otherwise it will attempt to resolve resources using any jar files
* found in the "lib/" directory in the specified instance directory.
* If the instance directory is not specified (=null), SolrResourceLoader#locateInstanceDir will provide one.
* <p>
*/
public SolrResourceLoader( String instanceDir, ClassLoader parent )
{
@ -286,7 +285,7 @@ public class SolrResourceLoader implements ResourceLoader,Closeable
/**
* EXPERT
* <p/>
* <p>
* The underlying class loader. Most applications will not need to use this.
* @return The {@link ClassLoader}
*/

View File

@ -50,7 +50,7 @@ import java.util.*;
/**
* An analysis handler that provides a breakdown of the analysis process of provided documents. This handler expects a
* (single) content stream of the following format:
* <p/>
* <br>
* <pre><code>
* &lt;docs&gt;
* &lt;doc&gt;
@ -63,12 +63,10 @@ import java.util.*;
* ...
* &lt;/docs&gt;
* </code></pre>
* <p/>
* <br>
* <em><b>Note: Each document must contain a field which serves as the unique key. This key is used in the returned
* response to associate an analysis breakdown to the analyzed document.</b></em>
* <p/>
* <p/>
* <p/>
* <p>
* Like the {@link org.apache.solr.handler.FieldAnalysisRequestHandler}, this handler also supports query analysis by
* sending either an "analysis.query" or "q" request parameter that holds the query text to be analyzed. It also
* supports the "analysis.showmatch" parameter which when set to {@code true}, all field tokens that match the query

View File

@ -38,7 +38,7 @@ import java.util.Set;
/**
* Provides the ability to specify multiple field types and field names in the same request. Expected parameters:
* <table border="1">
* <table border="1" summary="table of parameters">
* <tr>
* <th align="left">Name</th>
* <th align="left">Type</th>

View File

@ -83,16 +83,16 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p> A Handler which provides a REST API for replication and serves replication requests from Slaves. <p/> </p>
* <p> A Handler which provides a REST API for replication and serves replication requests from Slaves. </p>
* <p>When running on the master, it provides the following commands <ol> <li>Get the current replicable index version
* (command=indexversion)</li> <li>Get the list of files for a given index version
* (command=filelist&amp;indexversion=&lt;VERSION&gt;)</li> <li>Get full or a part (chunk) of a given index or a config
* file (command=filecontent&amp;file=&lt;FILE_NAME&gt;) You can optionally specify an offset and length to get that
* chunk of the file. You can request a configuration file by using "cf" parameter instead of the "file" parameter.</li>
* <li>Get status/statistics (command=details)</li> </ol> </p> <p>When running on the slave, it provides the following
* <li>Get status/statistics (command=details)</li> </ol> <p>When running on the slave, it provides the following
* commands <ol> <li>Perform a snap pull now (command=snappull)</li> <li>Get status/statistics (command=details)</li>
* <li>Abort a snap pull (command=abort)</li> <li>Enable/Disable polling the master for new versions (command=enablepoll
* or command=disablepoll)</li> </ol> </p>
* or command=disablepoll)</li> </ol>
*
*
* @since solr 1.4

View File

@ -62,7 +62,7 @@ public abstract class RequestHandlerBase implements SolrRequestHandler, SolrInfo
/**
* Initializes the {@link org.apache.solr.request.SolrRequestHandler} by creating three {@link org.apache.solr.common.params.SolrParams} named.
* <table border="1">
* <table border="1" summary="table of parameters">
* <tr><th>Name</th><th>Description</th></tr>
* <tr><td>defaults</td><td>Contains all of the named arguments contained within the list element named "defaults".</td></tr>
* <tr><td>appends</td><td>Contains all of the named arguments contained within the list element named "appends".</td></tr>

View File

@ -115,7 +115,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p/> Provides functionality of downloading changed index files as well as config files and a timer for scheduling fetches from the
* <p> Provides functionality of downloading changed index files as well as config files and a timer for scheduling fetches from the
* master. </p>
*
*

View File

@ -42,7 +42,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p/> Provides functionality equivalent to the snapshooter script </p>
* <p> Provides functionality equivalent to the snapshooter script </p>
* This is no longer used in standard replication.
*
*

View File

@ -29,7 +29,6 @@ import java.net.URL;
* in the solrconfig as defaults, and may be overriden as request parameters.
* (TODO: complete documentation of request parameters here, rather than only
* on the wiki).
* </p>
*
* <ul>
* <li> highlight - Set to any value not .equal() to "false" to enable highlight

View File

@ -482,7 +482,7 @@ public class CoreAdminHandler extends RequestHandlerBase {
/**
* Handle Custom Action.
* <p/>
* <p>
* This method could be overridden by derived classes to handle custom actions. <br> By default - this method throws a
* solr exception. Derived classes are free to write their derivation if necessary.
*/

View File

@ -57,7 +57,7 @@ import java.util.Set;
* If you want to selectively restrict access some configuration files, you can list
* these files in the {@link #HIDDEN} invariants. For example to hide
* synonyms.txt and anotherfile.txt, you would register:
* <p>
* <br>
* <pre>
* &lt;requestHandler name="/admin/file" class="org.apache.solr.handler.admin.ShowFileRequestHandler" &gt;
* &lt;lst name="defaults"&gt;
@ -83,7 +83,7 @@ import java.util.Set;
* set it directly using: {@link #USE_CONTENT_TYPE}. For example, to get a plain text
* version of schema.xml, try:
* <pre>
* http://localhost:8983/solr/admin/file?file=schema.xml&contentType=text/plain
* http://localhost:8983/solr/admin/file?file=schema.xml&amp;contentType=text/plain
* </pre>
*
*

View File

@ -92,17 +92,17 @@ import org.apache.solr.util.plugin.SolrCoreAware;
/**
* The ExpandComponent is designed to work with the CollapsingPostFilter.
* The CollapsingPostFilter collapses a result set on a field.
* <p/>
* <p>
* The ExpandComponent expands the collapsed groups for a single page.
* <p/>
* <p>
* http parameters:
* <p/>
* expand=true <br/>
* expand.rows=5 <br/>
* expand.sort=field asc|desc<br/>
* expand.q=*:* (optional, overrides the main query)<br/>
* expand.fq=type:child (optional, overrides the main filter queries)<br/>
* expand.field=field (mandatory if the not used with the CollapsingQParserPlugin)<br/>
* <p>
* expand=true <br>
* expand.rows=5 <br>
* expand.sort=field asc|desc<br>
* expand.q=*:* (optional, overrides the main query)<br>
* expand.fq=type:child (optional, overrides the main filter queries)<br>
* expand.field=field (mandatory if the not used with the CollapsingQParserPlugin)<br>
*/
public class ExpandComponent extends SearchComponent implements PluginInfoInitialized, SolrCoreAware {
public static final String COMPONENT_NAME = "expand";

View File

@ -39,7 +39,6 @@ import org.apache.solr.search.SolrIndexSearcher;
* for facet values present in another field.
* <p>
* 9/10/2009 - Moved out of StatsComponent to allow open access to UnInvertedField
* <p/>
* @see org.apache.solr.handler.component.StatsComponent
*
*/

View File

@ -56,10 +56,10 @@ import org.apache.solr.util.plugin.SolrCoreAware;
/**
* Return term vectors for the documents in a query result set.
* <p/>
* <p>
* Info available:
* term, frequency, position, offset, IDF.
* <p/>
* <p>
* <b>Note</b> Returning IDF can be expensive.
*
* <pre class="prettyprint">

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -144,7 +144,7 @@ public class CSVConfig {
/**
* Set the fill pattern. Defaults to {@link #FILLNONE}
* <br/>Other options are : {@link #FILLLEFT} and {@link #FILLRIGHT}
* <br>Other options are : {@link #FILLLEFT} and {@link #FILLRIGHT}
* @param fill the fill pattern.
*/
public void setFill(int fill) {
@ -258,7 +258,7 @@ public class CSVConfig {
}
/**
* Creates a config based on a stream. It tries to guess<br/>
* Creates a config based on a stream. It tries to guess<br>
* NOTE : The stream will be closed.
* @param inputStream the inputstream.
* @return the guessed config.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.

View File

@ -271,7 +271,7 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
* Sets the boolean operator of the QueryParser.
* In default mode (<code>OR_OPERATOR</code>) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>.<br/>
* <code>capital OR of OR Hungary</code>.<br>
* In <code>AND_OPERATOR</code> mode terms are considered to be in conjunction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code>
*/

View File

@ -53,7 +53,7 @@ import org.apache.solr.search.SyntaxError;
/**
* Computes interval facets for docvalues field (single or multivalued).
* <p/>
* <p>
* Given a set of intervals for a field and a DocSet, it calculates the number
* of documents that match each of the intervals provided. The final count for
* each interval should be exactly the same as the number of results of a range
@ -61,12 +61,12 @@ import org.apache.solr.search.SyntaxError;
* of {@code facet.query=field:[A TO B]} should be the same as the count of
* {@code f.field.facet.interval.set=[A,B]}, however, this method will usually
* be faster in cases where there are a larger number of intervals per field.
* <p/>
* <p>
* To use this class, create an instance using
* {@link #IntervalFacets(SchemaField, SolrIndexSearcher, DocSet, String[], SolrParams)}
* and then iterate the {@link FacetInterval} using {@link #iterator()}
* <p/>
* Intervals Format</br>
* <p>
* Intervals Format<br>
* Intervals must begin with either '(' or '[', be followed by the start value,
* then a comma ',', the end value, and finally ')' or ']'. For example:
* <ul>
@ -89,7 +89,7 @@ import org.apache.solr.search.SyntaxError;
* As with facet.query, the key used to display the result can be set by using local params
* syntax, for example:<p>
* <code>{!key='First Half'}[0,5) </code>
* <p/>
* <p>
* To use this class:
* <pre>
* IntervalFacets intervalFacets = new IntervalFacets(schemaField, searcher, docs, intervalStrs, params);

View File

@ -1245,7 +1245,7 @@ public class SimpleFacets {
}
/**
* A simple key=>val pair whose natural order is such that
* A simple key=&gt;val pair whose natural order is such that
* <b>higher</b> vals come before lower vals.
* In case of tie vals, then <b>lower</b> keys come before higher keys.
*/

View File

@ -462,7 +462,7 @@ public class UnInvertedField extends DocTermOrds {
/**
* Collect statistics about the UninvertedField. Code is very similar to {@link #getCounts(org.apache.solr.search.SolrIndexSearcher, org.apache.solr.search.DocSet, int, int, Integer, boolean, String, String)}
* It can be used to calculate stats on multivalued fields.
* <p/>
* <p>
* This method is mainly used by the {@link org.apache.solr.handler.component.StatsComponent}.
*
* @param searcher The Searcher to use to gather the statistics

View File

@ -35,7 +35,7 @@ import org.apache.solr.search.SolrReturnFields;
* the response to a query request.
*
* <p>
* <a name="returnable_data" /><b>Note On Returnable Data...</b><br/>
* <a name="returnable_data"></a><b>Note On Returnable Data...</b><br>
* A <code>SolrQueryResponse</code> may contain the following types of
* Objects generated by the <code>SolrRequestHandler</code> that processed
* the request.

View File

@ -26,7 +26,7 @@ import org.apache.solr.request.SolrQueryRequest;
* A DocTransformer can add, remove or alter a Document before it is written out to the Response. For instance, there are implementations
* that can put explanations inline with a document, add constant values and mark items as being artificially boosted (see {@link org.apache.solr.handler.component.QueryElevationComponent})
*
* <p/>
* <p>
* New instance for each request
*
* @see TransformerFactory

View File

@ -73,7 +73,7 @@ public abstract class BaseSolrResource extends ServerResource {
* from the SolrRequestInfo thread local, then gets the SolrCore
* and IndexSchema and sets up the response.
* writer.
* <p/>
* <p>
* If an error occurs during initialization, setExisting(false) is
* called and an error status code and message is set; in this case,
* Restlet will not continue servicing the request (by calling the

View File

@ -56,7 +56,7 @@ abstract class BaseFieldResource extends BaseSolrResource {
* on this list. The (Dynamic)FieldResource classes ignore this list,
* since the (dynamic) field is specified in the URL path, rather than
* in a query parameter.
* <p/>
* <p>
* Also pulls the "showDefaults" param from the request, for use by all
* subclasses to include default values from the associated field type
* in the response. By default this param is off.

View File

@ -43,7 +43,7 @@ import static org.apache.solr.common.SolrException.ErrorCode;
/**
* This class responds to requests at /solr/(corename)/schema/copyfields
* <p/>
* <p>
*
* To restrict the set of copyFields in the response, specify one or both
* of the following as query parameters, with values as space and/or comma

View File

@ -41,7 +41,7 @@ import java.util.Map;
/**
* This class responds to requests at /solr/(corename)/schema/dynamicfields
* <p/>
* <p>
* To restrict the set of dynamic fields in the response, specify a comma
* and/or space separated list of dynamic field patterns in the "fl" query
* parameter.

View File

@ -48,7 +48,7 @@ import java.util.TreeSet;
/**
* This class responds to requests at /solr/(corename)/schema/fields
* <p/>
* <p>
* Two query parameters are supported:
* <ul>
* <li>

View File

@ -43,12 +43,12 @@ import java.util.Map;
/**
* This class responds to requests at /solr/(corename)/schema/fields/(fieldname)
* where "fieldname" is the name of a field.
* <p/>
* <p>
* The GET method returns properties for the given fieldname.
* The "includeDynamic" query parameter, if specified, will cause the
* dynamic field matching the given fieldname to be returned if fieldname
* is not explicitly declared in the schema.
* <p/>
* <p>
* The PUT method accepts field addition requests in JSON format.
*/
public class FieldResource extends BaseFieldResource implements GETable, PUTable {

View File

@ -242,7 +242,7 @@ public abstract class AbstractSpatialFieldType<T extends SpatialStrategy> extend
* Returns a String version of a shape to be used for the stored value. This method in Solr is only called if for some
* reason a Shape object is passed to the field type (perhaps via a custom UpdateRequestProcessor),
* *and* the field is marked as stored. <em>The default implementation throws an exception.</em>
* <p/>
* <p>
* Spatial4j 0.4 is probably the last release to support SpatialContext.toString(shape) but it's deprecated with no
* planned replacement. Shapes do have a toString() method but they are generally internal/diagnostic and not
* standard WKT.
@ -268,7 +268,7 @@ public abstract class AbstractSpatialFieldType<T extends SpatialStrategy> extend
//--------------------------------------------------------------
/**
* Implemented for compatibility with geofilt & bbox query parsers:
* Implemented for compatibility with geofilt &amp; bbox query parsers:
* {@link SpatialQueryable}.
*/
@Override

View File

@ -20,15 +20,15 @@ package org.apache.solr.schema;
/**
* A CoordinateFieldType is the base class for {@link org.apache.solr.schema.FieldType}s that have semantics
* related to items in a coordinate system.
* <br/>
* <br>
* Implementations depend on a delegating work to a sub {@link org.apache.solr.schema.FieldType}, specified by
* either the {@link #SUB_FIELD_SUFFIX} or the {@link #SUB_FIELD_TYPE} (the latter is used if both are defined.
* <br/>
* <br>
* Example:
* <pre>&lt;fieldType name="xy" class="solr.PointType" dimension="2" subFieldType="double"/&gt;
* </pre>
* In theory, classes deriving from this should be able to do things like represent a point, a polygon, a line, etc.
* <br/>
* <br>
* NOTE: There can only be one sub Field Type.
*
*/

View File

@ -30,7 +30,7 @@ import java.util.Map;
/** Get values from an external file instead of the index.
*
* <p/><code>keyField</code> will normally be the unique key field, but it doesn't have to be.
* <p><code>keyField</code> will normally be the unique key field, but it doesn't have to be.
* <ul><li> It's OK to have a keyField value that can't be found in the index</li>
* <li>It's OK to have some documents without a keyField in the file (defVal is used as the default)</li>
* <li>It's OK for a keyField value to point to multiple documents (no uniqueness requirement)</li>
@ -40,19 +40,19 @@ import java.util.Map;
* This parameter has never been implemented. As of Solr 3.6/4.0 it is optional and can be omitted.
*
* The format of the external file is simply newline separated keyFieldValue=floatValue.
* <br/>Example:
* <br/><code>doc33=1.414</code>
* <br/><code>doc34=3.14159</code>
* <br/><code>doc40=42</code>
* <br>Example:
* <br><code>doc33=1.414</code>
* <br><code>doc34=3.14159</code>
* <br><code>doc40=42</code>
*
* <p/>Solr looks for the external file in the index directory under the name of
* <p>Solr looks for the external file in the index directory under the name of
* external_&lt;fieldname&gt; or external_&lt;fieldname&gt;.*
*
* <p/>If any files of the latter pattern appear, the last (after being sorted by name) will be used and previous versions will be deleted.
* <p>If any files of the latter pattern appear, the last (after being sorted by name) will be used and previous versions will be deleted.
* This is to help support systems where one may not be able to overwrite a file (like Windows, if the file is in use).
* <p/>If the external file has already been loaded, and it is changed, those changes will not be visible until a commit has been done.
* <p/>The external file may be sorted or unsorted by the key field, but it will be substantially slower (untested) if it isn't sorted.
* <p/>Fields of this type may currently only be used as a ValueSource in a FunctionQuery.
* <p>If the external file has already been loaded, and it is changed, those changes will not be visible until a commit has been done.
* <p>The external file may be sorted or unsorted by the key field, but it will be substantially slower (untested) if it isn't sorted.
* <p>Fields of this type may currently only be used as a ValueSource in a FunctionQuery.
*
* @see ExternalFileFieldReloader
*/

View File

@ -41,8 +41,8 @@ import java.util.List;
* listeners in your solrconfig.xml:
*
* <pre>
* &lt;listener event="newSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/>
* &lt;listener event="firstSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/>
* &lt;listener event="newSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/&gt;
* &lt;listener event="firstSearcher" class="org.apache.solr.schema.ExternalFileFieldReloader"/&gt;
* </pre>
*
* The caches will be reloaded for all ExternalFileFields in your schema after

View File

@ -231,7 +231,7 @@ public abstract class FieldType extends FieldProperties {
* (taken from toInternal()). Having a different representation for
* external, internal, and indexed would present quite a few problems
* given the current Lucene architecture. An analyzer for adding docs
* would need to translate internal->indexed while an analyzer for
* would need to translate internal-&gt;indexed while an analyzer for
* querying would need to translate external-&gt;indexed.
* </p>
* <p>
@ -434,7 +434,7 @@ public abstract class FieldType extends FieldProperties {
/**
* Returns a Query instance for doing prefix searches on this field type.
* Also, other QueryParser implementations may have different semantics.
* <p/>
* <p>
* Sub-classes should override this method to provide their own range query implementation.
*
* @param parser the {@link org.apache.solr.search.QParser} calling the method
@ -676,7 +676,7 @@ public abstract class FieldType extends FieldProperties {
* currently passes part1 and part2 as null if they are '*' respectively. minInclusive and maxInclusive are both true
* currently by SolrQueryParser but that may change in the future. Also, other QueryParser implementations may have
* different semantics.
* <p/>
* <p>
* Sub-classes should override this method to provide their own range query implementation. They should strive to
* handle nulls in part1 and/or part2 as well as unequal minInclusive and maxInclusive parameters gracefully.
*
@ -792,7 +792,7 @@ public abstract class FieldType extends FieldProperties {
private static final String POSITION_INCREMENT_GAP = "positionIncrementGap";
/**
* Get a map of property name -> value for this field type.
* Get a map of property name -&gt; value for this field type.
* @param showDefaults if true, include default properties.
*/
public SimpleOrderedMap<Object> getNamedPropertyValues(boolean showDefaults) {

View File

@ -300,7 +300,7 @@ public class IndexSchema {
/**
* Name of the default search field specified in the schema file.
* <br/><b>Note:</b>Avoid calling this, try to use this method so that the 'df' param is consulted as an override:
* <br><b>Note:</b>Avoid calling this, try to use this method so that the 'df' param is consulted as an override:
* {@link org.apache.solr.search.QueryParsing#getDefaultField(IndexSchema, String)}
*/
public String getDefaultSearchFieldName() {
@ -1356,7 +1356,7 @@ public class IndexSchema {
}
/**
* Get a map of property name -> value for the whole schema.
* Get a map of property name -&gt; value for the whole schema.
*/
public SimpleOrderedMap<Object> getNamedPropertyValues() {
SimpleOrderedMap<Object> topLevel = new SimpleOrderedMap<>();

View File

@ -53,10 +53,10 @@ import org.apache.solr.search.QParser;
*
* Examples of queries:
* <ul>
* <li>http://localhost:8983/solr/select/?q=*:*&fl=name&sort=random_1234%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&fl=name&sort=random_2345%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&fl=name&sort=random_ABDC%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&fl=name&sort=random_21%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&amp;fl=name&amp;sort=random_1234%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&amp;fl=name&amp;sort=random_2345%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&amp;fl=name&amp;sort=random_ABDC%20desc</li>
* <li>http://localhost:8983/solr/select/?q=*:*&amp;fl=name&amp;sort=random_21%20desc</li>
* </ul>
* Note that multiple calls to the same URL will return the same sorting order.
*

View File

@ -304,7 +304,7 @@ public final class SchemaField extends FieldProperties {
}
/**
* Get a map of property name -> value for this field. If showDefaults is true,
* Get a map of property name -&gt; value for this field. If showDefaults is true,
* include default properties (those inherited from the declared property type and
* not overridden in the field declaration).
*/

View File

@ -62,13 +62,13 @@ import org.apache.solr.search.QParser;
* {@link DoubleField}.
* See {@link org.apache.lucene.search.NumericRangeQuery} for more details.
* It supports integer, float, long, double and date types.
* <p/>
* <p>
* For each number being added to this field, multiple terms are generated as per the algorithm described in the above
* link. The possible number of terms increases dramatically with lower precision steps. For
* the fast range search to work, trie fields must be indexed.
* <p/>
* <p>
* Trie fields are sortable in numerical order and can be used in function queries.
* <p/>
* <p>
* Note that if you use a precisionStep of 32 for int/float and 64 for long/double/date, then multiple terms will not be
* generated, range search will be no faster than any other number field, but sorting will still be possible.
*

View File

@ -60,7 +60,7 @@ public class BitDocSet extends DocSetBase {
this.size = size;
}
/*** DocIterator using nextSetBit()
/* DocIterator using nextSetBit()
public DocIterator iterator() {
return new DocIterator() {
int pos=bits.nextSetBit(0);
@ -158,7 +158,7 @@ public class BitDocSet extends DocSetBase {
}
/**
* Returns true of the doc exists in the set. Should only be called when doc <
* Returns true of the doc exists in the set. Should only be called when doc &lt;
* {@link FixedBitSet#length()}.
*/
@Override

View File

@ -74,32 +74,32 @@ import org.apache.solr.schema.TrieLongField;
This is a high performance alternative to standard Solr
field collapsing (with ngroups) when the number of distinct groups
in the result set is high.
<p/>
<p>
Sample syntax:
<p/>
<p>
Collapse based on the highest scoring document:
<p/>
<p>
fq=(!collapse field=field_name}
<p/>
<p>
Collapse based on the min value of a numeric field:
<p/>
<p>
fq={!collapse field=field_name min=field_name}
<p/>
<p>
Collapse based on the max value of a numeric field:
<p/>
<p>
fq={!collapse field=field_name max=field_name}
<p/>
<p>
Collapse with a null policy:
<p/>
<p>
fq={!collapse field=field_name nullPolicy=nullPolicy}
<p/>
There are three null policies: <br/>
ignore : removes docs with a null value in the collapse field (default).<br/>
expand : treats each doc with a null value in the collapse field as a separate group.<br/>
<p>
There are three null policies: <br>
ignore : removes docs with a null value in the collapse field (default).<br>
expand : treats each doc with a null value in the collapse field as a separate group.<br>
collapse : collapses all docs with a null value into a single group using either highest score, or min/max.
<p/>
<p>
The CollapsingQParserPlugin fully supports the QueryElevationComponent
**/

View File

@ -29,7 +29,7 @@ import org.apache.solr.request.SolrQueryRequest;
/**
* Parse Solr's variant on the Lucene {@link org.apache.lucene.queryparser.complexPhrase.ComplexPhraseQueryParser} syntax.
* <p/>
* <p>
* Modified from {@link org.apache.solr.search.LuceneQParserPlugin} and {@link org.apache.solr.search.SurroundQParserPlugin}
*/
public class ComplexPhraseQParserPlugin extends QParserPlugin {

Some files were not shown because too many files have changed in this diff Show More