fix more javadocs warnings

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1065474 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2011-01-31 02:59:40 +00:00
parent 1377b916e6
commit 107c06324b
19 changed files with 30 additions and 37 deletions

View File

@ -114,14 +114,14 @@ public class InstantiatedIndexWriter implements Closeable {
* MAddDocs_20000 - 7 4000 100 false - - 1 - - 20000 - - 535,8 - - 37,33 - 309 680 640 - 501 968 896
* </pre>
*
* @see org.apache.lucene.index.IndexWriter#setMergeFactor(int)
* @see org.apache.lucene.index.LogMergePolicy#setMergeFactor(int)
*/
public void setMergeFactor(int mergeFactor) {
this.mergeFactor = mergeFactor;
}
/**
* @see org.apache.lucene.index.IndexWriter#getMergeFactor()
* @see org.apache.lucene.index.LogMergePolicy#getMergeFactor()
*/
public int getMergeFactor() {
return mergeFactor;

View File

@ -138,11 +138,6 @@ you don't need to worry about dealing with those.
config.setAnalyzer(new WhitespaceAnalyzer());
Query query = qpHelper.parse("apache AND lucene", "defaultField");
</pre>
<p>
To make it easy for people who are using current Lucene's query parser to switch to
the new one, there is a {@link org.apache.lucene.queryParser.standard.QueryParserWrapper} under org.apache.lucene.queryParser.standard
that keeps the old query parser interface, but uses the new query parser infrastructure.
</p>
</body>
</html>

View File

@ -82,7 +82,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
private Set<?> excltable = Collections.emptySet();
/**
* Builds an analyzer with the default stop words ({@link #BRAZILIAN_STOP_WORDS}).
* Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}).
*/
public BrazilianAnalyzer(Version matchVersion) {
this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET);

View File

@ -65,7 +65,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase {
}
/**
* Builds an analyzer which removes words in {@link #STOP_WORDS}.
* Builds an analyzer which removes words in {@link #getDefaultStopSet()}.
*/
public CJKAnalyzer(Version matchVersion) {
this(matchVersion, DefaultSetHolder.DEFAULT_STOP_SET);

View File

@ -86,7 +86,7 @@ public final class CzechAnalyzer extends StopwordAnalyzerBase {
private final Set<?> stemExclusionTable;
/**
* Builds an analyzer with the default stop words ({@link #CZECH_STOP_WORDS}).
* Builds an analyzer with the default stop words ({@link #getDefaultStopSet()}).
*
* @param matchVersion Lucene version to match See
* {@link <a href="#version">above</a>}

View File

@ -109,7 +109,7 @@ public final class DutchAnalyzer extends ReusableAnalyzerBase {
private final Version matchVersion;
/**
* Builds an analyzer with the default stop words ({@link #DUTCH_STOP_WORDS})
* Builds an analyzer with the default stop words ({@link #getDefaultStopSet()})
* and a few default entries for the stem exclusion table.
*
*/

View File

@ -28,7 +28,7 @@ import org.apache.lucene.search.TopDocs;
/**
* Create a log ready for submission.
* Extend this class and override
* {@link #report(QualityQuery, TopDocs, String, Searcher)}
* {@link #report(QualityQuery, TopDocs, String, IndexSearcher)}
* to create different reports.
*/
public class SubmissionReport {

View File

@ -74,7 +74,6 @@ public class SolrZkClient {
* @param zkClientTimeout
* @param strat
* @param onReconnect
* @param clientConnectTimeout
* @throws InterruptedException
* @throws TimeoutException
* @throws IOException
@ -164,7 +163,7 @@ public class SolrZkClient {
/**
* @param path
* @return
* @return true if path exists
* @throws KeeperException
* @throws InterruptedException
*/
@ -178,7 +177,7 @@ public class SolrZkClient {
* @param data
* @param acl
* @param createMode
* @return
* @return path of created node
* @throws KeeperException
* @throws InterruptedException
*/
@ -190,7 +189,7 @@ public class SolrZkClient {
/**
* @param path
* @param watcher
* @return
* @return children of the node at the path
* @throws KeeperException
* @throws InterruptedException
*/
@ -203,7 +202,7 @@ public class SolrZkClient {
* @param path
* @param watcher
* @param stat
* @return
* @return node's data
* @throws KeeperException
* @throws InterruptedException
*/
@ -216,7 +215,7 @@ public class SolrZkClient {
* @param path
* @param data
* @param version
* @return
* @return node's state
* @throws KeeperException
* @throws InterruptedException
*/
@ -229,8 +228,8 @@ public class SolrZkClient {
*
* @param path
* @param data
* @param watcher
* @return
* @param createMode
* @return path of created node
* @throws KeeperException
* @throws InterruptedException
*/

View File

@ -228,7 +228,6 @@ public interface FacetParams {
* String indicating what "other" ranges should be computed for a
* numerical range facet (multi-value).
* Can be overriden on a per field basis.
* @see FacetNumberOther
*/
public static final String FACET_RANGE_OTHER = FACET_RANGE + ".other";
/**

View File

@ -242,7 +242,7 @@ public class StrUtils {
* {@link NullPointerException} and {@link SolrException} free version of {@link #parseBool(String)}
* @param s
* @param def
* @return
* @return parsed boolean value (or def, if s is null or invalid)
*/
public static boolean parseBool(String s, boolean def) {
if( s != null ) {

View File

@ -186,7 +186,7 @@ public final class ZkController {
/**
* @param collection
* @param fileName
* @return
* @return true if config file exists
* @throws KeeperException
* @throws InterruptedException
*/
@ -206,7 +206,7 @@ public final class ZkController {
/**
* @param zkConfigName
* @param fileName
* @return
* @return config file data (in bytes)
* @throws KeeperException
* @throws InterruptedException
*/
@ -250,7 +250,7 @@ public final class ZkController {
}
/**
* @return
* @return zookeeper server address
*/
public String getZkServerAddress() {
return zkServerAddress;
@ -392,7 +392,7 @@ public final class ZkController {
/**
* @param path
* @return
* @return true if the path exists
* @throws KeeperException
* @throws InterruptedException
*/
@ -403,7 +403,7 @@ public final class ZkController {
/**
* @param collection
* @return
* @return config value
* @throws KeeperException
* @throws InterruptedException
* @throws IOException

View File

@ -49,7 +49,7 @@ public class RequestHandlerUtils
* Check the request parameters and decide if it should commit or optimize.
* If it does, it will check parameters for "waitFlush" and "waitSearcher"
*
* @deprecated Use {@link #handleCommit(UpdateRequestProcessor,SolrParams,boolean)}
* @deprecated Use {@link #handleCommit(SolrQueryRequest,UpdateRequestProcessor,SolrParams,boolean)}
*
* @since solr 1.2
*/

View File

@ -302,12 +302,12 @@ public abstract class BaseResponseWriter {
* {@link SolrInputDocument}s to be spit out as a {@link SolrDocumentList}
* so they can be processed as a whole, rather than on a doc-by-doc basis.
* If set to false, this method calls
* {@link #writeAllDocs(DocListInfo, List)}, else if set to true, then this
* {@link #writeAllDocs(BaseResponseWriter.DocListInfo, List)}, else if set to true, then this
* method forces calling {@link #writeDoc(SolrDocument)} on a doc-by-doc
* basis. one
*
* @return True to force {@link #writeDoc(SolrDocument)} to be called, False
* to force {@link #writeAllDocs(DocListInfo, List)} to be called.
* to force {@link #writeAllDocs(BaseResponseWriter.DocListInfo, List)} to be called.
*/
public boolean isStreamingDocs() { return true; }

View File

@ -99,7 +99,7 @@ public final class IndexSchema {
* If the is stream is null, the resource loader will load the schema resource by name.
* @see SolrResourceLoader#openSchema
* By default, this follows the normal config path directory searching rules.
* @see Config#openResource
* @see SolrResourceLoader#openResource
*/
public IndexSchema(SolrConfig solrConfig, String name, InputStream is) {
this.solrConfig = solrConfig;

View File

@ -38,7 +38,7 @@ import java.io.IOException;
/**
* @version $Id$
*
* @deprecated use {@link LongField} or {@link TrieLongtField} - will be removed in 5.x
* @deprecated use {@link LongField} or {@link TrieLongField} - will be removed in 5.x
*/
@Deprecated
public class SortableLongField extends FieldType {

View File

@ -357,7 +357,7 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
/**
* @return the indexDir on which this searcher is opened
* @see org.apache.solr.search.SolrIndexSearcher#SolrIndexSearcher(org.apache.solr.core.SolrCore, org.apache.solr.schema.IndexSchema, String, String, boolean)
* @see #SolrIndexSearcher(SolrCore, IndexSchema, String, Directory, boolean)
*/
public String getIndexDir() {
return indexDir;

View File

@ -92,7 +92,7 @@ public abstract class ValueSource implements Serializable {
* EXPERIMENTAL: This method is subject to change.
* <br>WARNING: Sorted function queries are not currently weighted.
* <p>
* Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, AtomicReaderContext)}
* Get the SortField for this ValueSource. Uses the {@link #getValues(java.util.Map, IndexReader.AtomicReaderContext)}
* to populate the SortField.
*
* @param reverse true if this is a reverse sort.

View File

@ -150,7 +150,7 @@ public class LBHttpSolrServer extends SolrServer {
return numDeadServersToTry;
}
/** @return The number of dead servers to try if there are no live servers left.
/** @param numDeadServersToTry The number of dead servers to try if there are no live servers left.
* Defaults to the number of servers in this request. */
public void setNumDeadServersToTry(int numDeadServersToTry) {
this.numDeadServersToTry = numDeadServersToTry;

View File

@ -115,7 +115,7 @@ public class SpellCheckResponse {
* <p>
* Return the first collated query string. For convenience and backwards-compatibility. Use getCollatedResults() for full data.
* </p>
* @return
* @return first collated query string
*/
public String getCollatedResult() {
return collations==null || collations.size()==0 ? null : collations.get(0).collationQueryString;
@ -126,7 +126,7 @@ public class SpellCheckResponse {
* Return all collations.
* Will include # of hits and misspelling-to-correction details if "spellcheck.collateExtendedResults was true.
* </p>
* @return
* @return all collations
*/
public List<Collation> getCollatedResults() {
return collations;