fix lint problems at private level (not enforced, jflex has some issues with generated code)

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1642261 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Muir 2014-11-28 03:53:13 +00:00
parent cd1783c58c
commit f63eec34dc
12 changed files with 22 additions and 22 deletions

View File

@ -35,7 +35,7 @@ public class TaskStats implements Cloneable {
/** task start time */ /** task start time */
private long start; private long start;
/** task elapsed time. elapsed >= 0 indicates run completion! */ /** task elapsed time. elapsed >= 0 indicates run completion! */
private long elapsed = -1; private long elapsed = -1;
/** max tot mem during task */ /** max tot mem during task */

View File

@ -529,9 +529,9 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
* Note that when TermPositions.nextPosition() is later used to * Note that when TermPositions.nextPosition() is later used to
* retrieve this value, val-1 will be returned, not val. * retrieve this value, val-1 will be returned, not val.
* <P> * <P>
* IMPORTANT NOTE: Before Lucene 2.9, val>=0 were safe (for val==0, * IMPORTANT NOTE: Before Lucene 2.9, val&gt;=0 were safe (for val==0,
* the retrieved position would be -1). But starting with Lucene 2.9, * the retrieved position would be -1). But starting with Lucene 2.9,
* this unfortunately changed, and only val>0 are safe. val=0 can * this unfortunately changed, and only val&gt;0 are safe. val=0 can
* still be used, but don't count on the value you retrieve later * still be used, but don't count on the value you retrieve later
* (it could be 0 or -1, depending on circumstances or versions). * (it could be 0 or -1, depending on circumstances or versions).
* This change is described in Lucene's JIRA: LUCENE-1542. * This change is described in Lucene's JIRA: LUCENE-1542.

View File

@ -189,7 +189,7 @@ import org.apache.lucene.util.RecyclingIntBlockAllocator;
*/ */
public class MemoryIndex { public class MemoryIndex {
/** info for each field: Map<String fieldName, Info field> */ /** info for each field: Map&lt;String fieldName, Info field&gt; */
private final HashMap<String,Info> fields = new HashMap<>(); private final HashMap<String,Info> fields = new HashMap<>();
/** fields sorted ascending by fieldName; lazily computed on demand */ /** fields sorted ascending by fieldName; lazily computed on demand */
@ -689,8 +689,8 @@ public class MemoryIndex {
private static final class Info { private static final class Info {
/** /**
* Term strings and their positions for this field: Map <String * Term strings and their positions for this field: Map &lt;String
* termText, ArrayIntList positions> * termText, ArrayIntList positions&gt;
*/ */
private final BytesRefHash terms; private final BytesRefHash terms;

View File

@ -1,3 +1,5 @@
package org.apache.lucene.queries.mlt;
/** /**
* Copyright 2004-2005 The Apache Software Foundation. * Copyright 2004-2005 The Apache Software Foundation.
* *
@ -13,7 +15,6 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.lucene.queries.mlt;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
@ -45,7 +46,6 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
/** /**
* Generate "more like this" similarity queries. * Generate "more like this" similarity queries.
* Based on this mail: * Based on this mail:
@ -644,7 +644,7 @@ public final class MoreLikeThis {
} }
/** /**
* Create a PriorityQueue from a word->tf map. * Create a PriorityQueue from a word-&gt;tf map.
* *
* @param words a map of words keyed on the word(String) with Int objects as the values. * @param words a map of words keyed on the word(String) with Int objects as the values.
*/ */

View File

@ -110,7 +110,7 @@ public final class SlowFuzzyTermsEnum extends FuzzyTermsEnum {
* <p>The termCompare method in FuzzyTermEnum uses Levenshtein distance to * <p>The termCompare method in FuzzyTermEnum uses Levenshtein distance to
* calculate the distance between the given term and the comparing term. * calculate the distance between the given term and the comparing term.
* </p> * </p>
* <p>If the minSimilarity is >= 1.0, this uses the maxEdits as the comparison. * <p>If the minSimilarity is &gt;= 1.0, this uses the maxEdits as the comparison.
* Otherwise, this method uses the following logic to calculate similarity. * Otherwise, this method uses the following logic to calculate similarity.
* <pre> * <pre>
* similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen))); * similarity = 1 - ((float)distance / (float) (prefixLength + Math.min(textlen, targetlen)));

View File

@ -407,7 +407,7 @@ public class RandomSpatialOpFuzzyPrefixTreeTest extends StrategyTestCase {
/** /**
* An aggregate of 2 shapes. Unfortunately we can't simply use a ShapeCollection because: * An aggregate of 2 shapes. Unfortunately we can't simply use a ShapeCollection because:
* (a) ambiguity between CONTAINS & WITHIN for equal shapes, and * (a) ambiguity between CONTAINS and WITHIN for equal shapes, and
* (b) adjacent pairs could as a whole contain the input shape. * (b) adjacent pairs could as a whole contain the input shape.
* The tests here are sensitive to these matters, although in practice ShapeCollection * The tests here are sensitive to these matters, although in practice ShapeCollection
* is fine. * is fine.

View File

@ -73,12 +73,12 @@ public class DirectSpellChecker {
private int maxInspections = 5; private int maxInspections = 5;
/** minimum accuracy for a term to match */ /** minimum accuracy for a term to match */
private float accuracy = SpellChecker.DEFAULT_ACCURACY; private float accuracy = SpellChecker.DEFAULT_ACCURACY;
/** value in [0..1] (or absolute number >=1) representing the minimum /** value in [0..1] (or absolute number &gt;= 1) representing the minimum
* number of documents (of the total) where a term should appear. */ * number of documents (of the total) where a term should appear. */
private float thresholdFrequency = 0f; private float thresholdFrequency = 0f;
/** minimum length of a query word to return suggestions */ /** minimum length of a query word to return suggestions */
private int minQueryLength = 4; private int minQueryLength = 4;
/** value in [0..1] (or absolute number >=1) representing the maximum /** value in [0..1] (or absolute number &gt;= 1) representing the maximum
* number of documents (of the total) a query term can appear in to * number of documents (of the total) a query term can appear in to
* be corrected. */ * be corrected. */
private float maxQueryFrequency = 0.01f; private float maxQueryFrequency = 0.01f;

View File

@ -119,7 +119,7 @@ import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZ
public class AnalyzingSuggester extends Lookup { public class AnalyzingSuggester extends Lookup {
/** /**
* FST<Weight,Surface>: * FST&lt;Weight,Surface&gt;:
* input is the analyzed form, with a null byte between terms * input is the analyzed form, with a null byte between terms
* weights are encoded as costs: (Integer.MAX_VALUE-weight) * weights are encoded as costs: (Integer.MAX_VALUE-weight)
* surface is the original, unanalyzed form. * surface is the original, unanalyzed form.
@ -912,12 +912,12 @@ public class AnalyzingSuggester extends Lookup {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
/** cost -> weight */ /** cost -&gt; weight */
private static int decodeWeight(long encoded) { private static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded); return (int)(Integer.MAX_VALUE - encoded);
} }
/** weight -> cost */ /** weight -&gt; cost */
private static int encodeWeight(long value) { private static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) { if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value); throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -734,12 +734,12 @@ public class FreeTextSuggester extends Lookup {
} }
} }
/** weight -> cost */ /** weight -&gt; cost */
private long encodeWeight(long ngramCount) { private long encodeWeight(long ngramCount) {
return Long.MAX_VALUE - ngramCount; return Long.MAX_VALUE - ngramCount;
} }
/** cost -> weight */ /** cost -&gt; weight */
//private long decodeWeight(Pair<Long,BytesRef> output) { //private long decodeWeight(Pair<Long,BytesRef> output) {
private long decodeWeight(Long output) { private long decodeWeight(Long output) {
assert output != null; assert output != null;

View File

@ -243,7 +243,7 @@ public class FSTCompletionLookup extends Lookup implements Accountable {
} }
} }
/** weight -> cost */ /** weight -&gt; cost */
private static int encodeWeight(long value) { private static int encodeWeight(long value) {
if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) { if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value); throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -247,12 +247,12 @@ public class WFSTCompletionLookup extends Lookup {
} }
} }
/** cost -> weight */ /** cost -&gt; weight */
private static int decodeWeight(long encoded) { private static int decodeWeight(long encoded) {
return (int)(Integer.MAX_VALUE - encoded); return (int)(Integer.MAX_VALUE - encoded);
} }
/** weight -> cost */ /** weight -&gt; cost */
private static int encodeWeight(long value) { private static int encodeWeight(long value) {
if (value < 0 || value > Integer.MAX_VALUE) { if (value < 0 || value > Integer.MAX_VALUE) {
throw new UnsupportedOperationException("cannot encode value: " + value); throw new UnsupportedOperationException("cannot encode value: " + value);

View File

@ -53,7 +53,7 @@ public class DocumentDictionaryTest extends LuceneTestCase {
static final String PAYLOAD_FIELD_NAME = "p1"; static final String PAYLOAD_FIELD_NAME = "p1";
static final String CONTEXT_FIELD_NAME = "c1"; static final String CONTEXT_FIELD_NAME = "c1";
/** Returns Pair(list of invalid document terms, Map of document term -> document) */ /** Returns Pair(list of invalid document terms, Map of document term -&gt; document) */
private Map.Entry<List<String>, Map<String, Document>> generateIndexDocuments(int ndocs, boolean requiresPayload, boolean requiresContexts) { private Map.Entry<List<String>, Map<String, Document>> generateIndexDocuments(int ndocs, boolean requiresPayload, boolean requiresContexts) {
Map<String, Document> docs = new HashMap<>(); Map<String, Document> docs = new HashMap<>();
List<String> invalidDocTerms = new ArrayList<>(); List<String> invalidDocTerms = new ArrayList<>();