fix lint problems at private level (not enforced, jflex has some issues with generated code)

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1642261 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2014-11-28 03:53:13 +00:00
parent cd1783c58c
commit f63eec34dc
12 changed files with 22 additions and 22 deletions

View File

@ -35,7 +35,7 @@ public class TaskStats implements Cloneable {
/** task start time */
private long start;
/** task elapsed time. elapsed >= 0 indicates run completion! */
/** task elapsed time. elapsed >= 0 indicates run completion! */
private long elapsed = -1;
/** max tot mem during task */

View File

@ -529,9 +529,9 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
* Note that when TermPositions.nextPosition() is later used to
* retrieve this value, val-1 will be returned, not val.
* <P>
* IMPORTANT NOTE: Before Lucene 2.9, val>=0 were safe (for val==0,
* IMPORTANT NOTE: Before Lucene 2.9, val&gt;=0 were safe (for val==0,
* the retrieved position would be -1). But starting with Lucene 2.9,
* this unfortunately changed, and only val>0 are safe. val=0 can
* this unfortunately changed, and only val&gt;0 are safe. val=0 can
* still be used, but don't count on the value you retrieve later
* (it could be 0 or -1, depending on circumstances or versions).
* This change is described in Lucene's JIRA: LUCENE-1542.

View File

@ -189,7 +189,7 @@ import org.apache.lucene.util.RecyclingIntBlockAllocator;
*/
public class MemoryIndex {
/** info for each field: Map<String fieldName, Info field> */
/** info for each field: Map&lt;String fieldName, Info field&gt; */
private final HashMap<String,Info> fields = new HashMap<>();
/** fields sorted ascending by fieldName; lazily computed on demand */
@ -689,8 +689,8 @@ public class MemoryIndex {
private static final class Info {
/**
* Term strings and their positions for this field: Map <String
* termText, ArrayIntList positions>
* Term strings and their positions for this field: Map &lt;String
* termText, ArrayIntList positions&gt;
*/
private final BytesRefHash terms;

View File

@ -1,3 +1,5 @@
package org.apache.lucene.queries.mlt;
/**
* Copyright 2004-2005 The Apache Software Foundation.
*
@ -13,7 +15,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.queries.mlt;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
@ -45,7 +46,6 @@ import java.util.HashMap;
import java.util.Map;
import java.util.Set;
/**
* Generate "more like this" similarity queries.
* Based on this mail:
@ -644,7 +644,7 @@ public final class MoreLikeThis {
}
/**
* Create a PriorityQueue from a word->tf map.
* Create a PriorityQueue from a word-&gt;tf map.
*
* @param words a map of words keyed on the word(String) with Int objects as the values.
*/

View File

@ -110,7 +110,7 @@ public final class SlowFuzzyTermsEnum extends FuzzyTermsEnum {
* <p>The termCompare method in FuzzyTermEnum uses Levenshtein distance to
* calculate the distance between the given term and the comparing term.
* </p>
* <p>If the minSimilarity is >= 1.0, this uses the maxEdits as the comparison.
* <p>If the minSimilarity is &gt;= 1.0, this uses the maxEdits as the comparison.
* Otherwise, this method uses the following logic to calculate similarity.
* <pre>
* similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));

View File

@ -407,7 +407,7 @@ public class RandomSpatialOpFuzzyPrefixTreeTest extends StrategyTestCase {
/**
* An aggregate of 2 shapes. Unfortunately we can't simply use a ShapeCollection because:
* (a) ambiguity between CONTAINS & WITHIN for equal shapes, and
* (a) ambiguity between CONTAINS and WITHIN for equal shapes, and
* (b) adjacent pairs could as a whole contain the input shape.
* The tests here are sensitive to these matters, although in practice ShapeCollection
* is fine.

View File

@ -73,12 +73,12 @@ public class DirectSpellChecker {
private int maxInspections = 5;
/** minimum accuracy for a term to match */
private float accuracy = SpellChecker.DEFAULT_ACCURACY;
/** value in [0..1] (or absolute number >=1) representing the minimum
/** value in [0..1] (or absolute number &gt;= 1) representing the minimum
* number of documents (of the total) where a term should appear. */
private float thresholdFrequency = 0f;
/** minimum length of a query word to return suggestions */
private int minQueryLength = 4;
/** value in [0..1] (or absolute number >=1) representing the maximum
/** value in [0..1] (or absolute number &gt;= 1) representing the maximum
* number of documents (of the total) a query term can appear in to
* be corrected. */
private float maxQueryFrequency = 0.01f;

View File

@ -119,7 +119,7 @@ import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZ
public class AnalyzingSuggester extends Lookup {
/**
* FST<Weight,Surface>:
* FST&lt;Weight,Surface&gt;:
* input is the analyzed form, with a null byte between terms
* weights are encoded as costs: (Integer.MAX_VALUE-weight)
* surface is the original, unanalyzed form.
@ -912,12 +912,12 @@ public class AnalyzingSuggester extends Lookup {
throw new UnsupportedOperationException();
}
/** cost -> weight */
/** cost -&gt; weight */
private static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded);
}
/** weight -> cost */
/** weight -&gt; cost */
private static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -734,12 +734,12 @@ public class FreeTextSuggester extends Lookup {
}
}
/** weight -> cost */
/** weight -&gt; cost */
private long encodeWeight(long ngramCount) {
return Long.MAX_VALUE - ngramCount;
}
/** cost -> weight */
/** cost -&gt; weight */
//private long decodeWeight(Pair<Long,BytesRef> output) {
private long decodeWeight(Long output) {
assert output != null;

View File

@ -243,7 +243,7 @@ public class FSTCompletionLookup extends Lookup implements Accountable {
}
}
/** weight -> cost */
/** weight -&gt; cost */
private static int encodeWeight(long value) {
if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -247,12 +247,12 @@ public class WFSTCompletionLookup extends Lookup {
}
}
/** cost -> weight */
/** cost -&gt; weight */
private static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded);
}
/** weight -> cost */
/** weight -&gt; cost */
private static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -53,7 +53,7 @@ public class DocumentDictionaryTest extends LuceneTestCase {
static final String PAYLOAD_FIELD_NAME = "p1";
static final String CONTEXT_FIELD_NAME = "c1";
/** Returns Pair(list of invalid document terms, Map of document term -> document) */
/** Returns Pair(list of invalid document terms, Map of document term -&gt; document) */
private Map.Entry<List<String>, Map<String, Document>> generateIndexDocuments(int ndocs, boolean requiresPayload, boolean requiresContexts) {
Map<String, Document> docs = new HashMap<>();
List<String> invalidDocTerms = new ArrayList<>();