javadoc cosmetics

git-svn-id: https://svn.apache.org/repos/asf/lucene/java/trunk@354001 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Wolfgang Hoschek 2005-12-05 07:11:02 +00:00
parent 4ff370aaa3
commit 63ce2859a7
3 changed files with 34 additions and 6 deletions

View File

@ -354,6 +354,8 @@ public class MemoryIndex {
/**
* Creates and returns a searcher that can be used to execute arbitrary
* Lucene queries and to collect the resulting query results as hits.
*
* @return a searcher
*/
public IndexSearcher createSearcher() {
MemoryIndexReader reader = new MemoryIndexReader();
@ -371,8 +373,7 @@ public class MemoryIndex {
* @return the relevance score of the matchmaking; A number in the range
* [0.0 .. 1.0], with 0.0 indicating no match. The higher the number
* the better the match.
* @see org.apache.lucene.queryParser.QueryParser#parse(String, String,
* Analyzer)
* @see org.apache.lucene.queryParser.QueryParser#parse(String)
*/
public float search(Query query) {
if (query == null)
@ -412,6 +413,8 @@ public class MemoryIndex {
* this instance. Useful for smart memory sensititve caches/pools. Assumes
* fieldNames are interned, whereas tokenized terms are memory-overlaid. For
* simplicity, assumes no VM word boundary alignment of instance vars.
*
* @return the main memory consumption
*/
public int getMemorySize() {
// for example usage in a smart cache see nux.xom.pool.Pool
@ -471,7 +474,11 @@ public class MemoryIndex {
return entries;
}
/** Returns a String representation of the index data for debugging purposes. */
/**
* Returns a String representation of the index data for debugging purposes.
*
* @return the string representation
*/
public String toString() {
StringBuffer result = new StringBuffer(256);
sortFields();

View File

@ -174,6 +174,7 @@ public class PatternAnalyzer extends Analyzer {
* the name of the field to tokenize (currently ignored).
* @param text
* the string to tokenize
* @return a new token stream
*/
public TokenStream tokenStream(String fieldName, String text) {
// Ideally the Analyzer superclass should have a method with the same signature,
@ -200,6 +201,12 @@ public class PatternAnalyzer extends Analyzer {
* Creates a token stream that tokenizes all the text in the given Reader;
* This implementation forwards to <code>tokenStream(String, String)</code> and is
* less efficient than <code>tokenStream(String, String)</code>.
*
* @param fieldName
* the name of the field to tokenize (currently ignored).
* @param reader
* the reader delivering the text
* @return a new token stream
*/
public TokenStream tokenStream(String fieldName, Reader reader) {
if (reader instanceof FastStringReader) { // fast path
@ -214,7 +221,13 @@ public class PatternAnalyzer extends Analyzer {
}
}
/** Indicates whether some other object is "equal to" this one. */
/**
* Indicates whether some other object is "equal to" this one.
*
* @param other
* the reference object with which to compare.
* @return true if equal, false otherwise
*/
public boolean equals(Object other) {
if (this == other) return true;
if (this == DEFAULT_ANALYZER && other == EXTENDED_ANALYZER) return false;
@ -230,7 +243,11 @@ public class PatternAnalyzer extends Analyzer {
return false;
}
/** Returns a hash code value for the object. */
/**
* Returns a hash code value for the object.
*
* @return the hash code.
*/
public int hashCode() {
if (this == DEFAULT_ANALYZER) return -1218418418; // fast path
if (this == EXTENDED_ANALYZER) return 1303507063; // fast path

View File

@ -116,7 +116,11 @@ public class SynonymMap {
return copy;
}
/** Returns a String representation of the index data for debugging purposes. */
/**
* Returns a String representation of the index data for debugging purposes.
*
* @return a String representation
*/
public String toString() {
StringBuffer buf = new StringBuffer();
Iterator iter = new TreeMap(table).keySet().iterator();