Remove redundant field initializers (#13060)

Make protected fields in final classes private.
This commit is contained in:
Dmitry Cherniachenko 2024-02-19 10:52:43 +01:00 committed by GitHub
parent 39f681812b
commit 9a88d8ad25
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 48 additions and 48 deletions

View File

@ -59,11 +59,11 @@ public class MinHashFilter extends TokenFilter {
private final List<List<FixedSizeTreeSet<LongPair>>> minHashSets;
private int hashSetSize = DEFAULT_HASH_SET_SIZE;
private final int hashSetSize;
private int bucketCount = DEFAULT_BUCKET_COUNT;
private final int bucketCount;
private int hashCount = DEFAULT_HASH_COUNT;
private final int hashCount;
private boolean requiresInitialisation = true;

View File

@ -32,13 +32,13 @@ public class MinHashFilterFactory extends TokenFilterFactory {
/** SPI name */
public static final String NAME = "minHash";
private int hashCount = MinHashFilter.DEFAULT_HASH_COUNT;
private final int hashCount;
private int bucketCount = MinHashFilter.DEFAULT_BUCKET_COUNT;
private final int bucketCount;
private int hashSetSize = MinHashFilter.DEFAULT_HASH_SET_SIZE;
private final int hashSetSize;
private boolean withRotation;
private final boolean withRotation;
/** Create a {@link MinHashFilterFactory}. */
public MinHashFilterFactory(Map<String, String> args) {

View File

@ -67,7 +67,7 @@ public class WordDelimiterGraphFilterFactory extends TokenFilterFactory
private final int flags;
byte[] typeTable = null;
private CharArraySet protectedWords = null;
private boolean adjustOffsets = false;
private final boolean adjustOffsets;
/** Creates a new WordDelimiterGraphFilterFactory */
public WordDelimiterGraphFilterFactory(Map<String, String> args) {

View File

@ -89,7 +89,7 @@ public final class DutchAnalyzer extends Analyzer {
private final CharArraySet stoptable;
/** Contains words that should be indexed but not stemmed. */
private CharArraySet excltable = CharArraySet.EMPTY_SET;
private final CharArraySet excltable;
private final StemmerOverrideMap stemdict;

View File

@ -41,8 +41,8 @@ public class PatternCaptureGroupFilterFactory extends TokenFilterFactory {
/** SPI name */
public static final String NAME = "patternCaptureGroup";
private Pattern pattern;
private boolean preserveOriginal = true;
private final Pattern pattern;
private final boolean preserveOriginal;
public PatternCaptureGroupFilterFactory(Map<String, String> args) {
super(args);

View File

@ -114,7 +114,7 @@ public class JapaneseTokenizerFactory extends TokenizerFactory implements Resour
* /箱根山-箱根/成田空港-成田/ requests "箱根" and "成田" to be in the result in NBEST output.
*/
private final String nbestExamples;
private int nbestCost = -1;
private int nbestCost;
/** Creates a new JapaneseTokenizerFactory */
public JapaneseTokenizerFactory(Map<String, String> args) {

View File

@ -44,7 +44,7 @@ public final class OpenNLPTokenizer extends SegmentingTokenizerBase {
private int sentenceStart = 0;
private int sentenceIndex = -1;
private NLPTokenizerOp tokenizerOp = null;
private final NLPTokenizerOp tokenizerOp;
public OpenNLPTokenizer(
AttributeFactory factory, NLPSentenceDetectorOp sentenceOp, NLPTokenizerOp tokenizerOp)

View File

@ -23,7 +23,7 @@ import opennlp.tools.chunker.ChunkerModel;
/** Supply OpenNLP Chunking tool Requires binary models from OpenNLP project on SourceForge. */
public class NLPChunkerOp {
private ChunkerME chunker = null;
private final ChunkerME chunker;
public NLPChunkerOp(ChunkerModel chunkerModel) throws IOException {
chunker = new ChunkerME(chunkerModel);

View File

@ -27,7 +27,7 @@ import opennlp.tools.postag.POSTaggerME;
* SourceForge.
*/
public class NLPPOSTaggerOp {
private POSTagger tagger = null;
private final POSTagger tagger;
public NLPPOSTaggerOp(POSModel model) throws IOException {
tagger = new POSTaggerME(model);

View File

@ -32,10 +32,10 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
*/
public final class DaitchMokotoffSoundexFilter extends TokenFilter {
/** true if encoded tokens should be added as synonyms */
protected boolean inject = true;
private final boolean inject;
/** phonetic encoder */
protected DaitchMokotoffSoundex encoder = new DaitchMokotoffSoundex();
private final DaitchMokotoffSoundex encoder = new DaitchMokotoffSoundex();
// output is a string such as ab|ac|...
private static final Pattern pattern = Pattern.compile("([^|]+)");

View File

@ -32,13 +32,13 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
*/
public final class PhoneticFilter extends TokenFilter {
/** true if encoded tokens should be added as synonyms */
protected boolean inject = true;
private final boolean inject;
/** phonetic encoder */
protected Encoder encoder = null;
private final Encoder encoder;
/** captured state, non-null when <code>inject=true</code> and a token is buffered */
protected State save = null;
private State save = null;
private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
private final PositionIncrementAttribute posAtt = addAttribute(PositionIncrementAttribute.class);

View File

@ -73,7 +73,7 @@ public class Trie {
List<CharSequence> cmds = new ArrayList<>();
int root;
boolean forward = false;
boolean forward;
/**
* Constructor for the Trie object.

View File

@ -43,7 +43,7 @@ public class TaskSequence extends PerfTask {
private boolean resetExhausted = false;
private PerfTask[] tasksArray;
private boolean anyExhaustibleTasks;
private boolean collapsable = false; // to not collapse external sequence named in alg.
private final boolean collapsable; // to not collapse external sequence named in alg.
private boolean fixedTime; // true if we run for fixed time
private double runTimeSec; // how long to run for

View File

@ -36,7 +36,7 @@ public class TestTrecContentSource extends LuceneTestCase {
/** A TrecDocMaker which works on a String and not files. */
private static class StringableTrecSource extends TrecContentSource {
private String docs = null;
private final String docs;
public StringableTrecSource(String docs, boolean forever) {
this.docs = docs;

View File

@ -32,7 +32,7 @@ public final class FieldInfo {
/** Internal field number */
public final int number;
private DocValuesType docValuesType = DocValuesType.NONE;
private DocValuesType docValuesType;
// True if any document indexed term vectors
private boolean storeTermVector;

View File

@ -84,7 +84,7 @@ public class LiveIndexWriterConfig {
protected volatile int perThreadHardLimitMB;
/** True if segment flushes should use compound file format */
protected volatile boolean useCompoundFile = IndexWriterConfig.DEFAULT_USE_COMPOUND_FILE_SYSTEM;
protected volatile boolean useCompoundFile;
/** True if calls to {@link IndexWriter#close()} should first do a commit. */
protected boolean commitOnClose = IndexWriterConfig.DEFAULT_COMMIT_ON_CLOSE;

View File

@ -597,12 +597,12 @@ public abstract class MergePolicy {
* If the size of the merge segment exceeds this ratio of the total index size then it will remain
* in non-compound format
*/
protected double noCFSRatio = DEFAULT_NO_CFS_RATIO;
protected double noCFSRatio;
/**
* If the size of the merged segment exceeds this value then it will not use compound file format.
*/
protected long maxCFSSegmentSize = DEFAULT_MAX_CFS_SEGMENT_SIZE;
protected long maxCFSSegmentSize;
/** Creates a new merge policy instance. */
protected MergePolicy() {

View File

@ -120,7 +120,7 @@ final class WANDScorer extends Scorer {
private final int scalingFactor;
// scaled min competitive score
private long minCompetitiveScore = 0;
private long minCompetitiveScore;
private final Scorer[] allScorers;

View File

@ -89,7 +89,7 @@ public class TermOrdValComparator extends FieldComparator<BytesRef> {
private boolean singleSort;
/** Whether this comparator is allowed to skip documents. */
private boolean canSkipDocuments = true;
private boolean canSkipDocuments;
/** Whether the collector is done with counting hits so that we can start skipping documents. */
private boolean hitsThresholdReached = false;

View File

@ -253,7 +253,7 @@ public class RoaringDocIdSet extends DocIdSet {
private class Iterator extends DocIdSetIterator {
int block;
DocIdSetIterator sub = null;
DocIdSetIterator sub;
int doc;
Iterator() throws IOException {

View File

@ -31,7 +31,7 @@ import org.apache.lucene.util.IntsRef;
*/
public class LimitedFiniteStringsIterator extends FiniteStringsIterator {
/** Maximum number of finite strings to create. */
private int limit = Integer.MAX_VALUE;
private final int limit;
/** Number of generated finite strings. */
private int count = 0;

View File

@ -123,7 +123,7 @@ public abstract class OffsetsEnum implements Comparable<OffsetsEnum>, Closeable
private final PostingsEnum postingsEnum; // with offsets
private final int freq;
private int posCounter = -1;
private int posCounter;
public OfPostings(BytesRef term, int freq, PostingsEnum postingsEnum) throws IOException {
this.term = Objects.requireNonNull(term);

View File

@ -30,7 +30,7 @@ import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
*/
public class BoostQueryNode extends QueryNodeImpl {
private float value = 0;
private float value;
/**
* Constructs a boost node

View File

@ -84,7 +84,7 @@ public class ModifierQueryNode extends QueryNodeImpl {
}
}
private Modifier modifier = Modifier.MOD_NONE;
private Modifier modifier;
/**
* Used to store the modifier value on the original query string

View File

@ -25,9 +25,9 @@ import org.apache.lucene.queryparser.flexible.core.parser.EscapeQuerySyntax;
*/
public class OpaqueQueryNode extends QueryNodeImpl {
private CharSequence schema = null;
private CharSequence schema;
private CharSequence value = null;
private CharSequence value;
/**
* @param schema - schema identifier

View File

@ -41,7 +41,7 @@ public class PathQueryNode extends QueryNodeImpl {
/** Term text with a beginning and end position */
public static class QueryText implements Cloneable {
CharSequence value = null;
CharSequence value;
/** != null The term's begin position. */
int begin;
@ -97,7 +97,7 @@ public class PathQueryNode extends QueryNodeImpl {
}
}
private List<QueryText> values = null;
private List<QueryText> values;
/**
* @param pathElements - List of QueryText objects

View File

@ -25,7 +25,7 @@ import org.apache.lucene.search.PhraseQuery; // javadocs
/** Query node for {@link PhraseQuery}'s slop factor. */
public class PhraseSlopQueryNode extends QueryNodeImpl implements FieldableNode {
private int value = 0;
private int value;
/**
* @exception QueryNodeError throw in overridden method to disallow

View File

@ -57,9 +57,9 @@ public class ProximityQueryNode extends BooleanQueryNode {
/** utility class containing the distance condition and number */
public static class ProximityType {
int pDistance = 0;
int pDistance;
Type pType = null;
Type pType;
public ProximityType(Type type) {
this(type, 0);
@ -71,10 +71,10 @@ public class ProximityQueryNode extends BooleanQueryNode {
}
}
private Type proximityType = Type.SENTENCE;
private Type proximityType;
private int distance = -1;
private boolean inorder = false;
private CharSequence field = null;
private final boolean inorder;
private CharSequence field;
/**
* @param clauses - QueryNode children

View File

@ -32,7 +32,7 @@ import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
*/
public class SlopQueryNode extends QueryNodeImpl implements FieldableNode {
private int value = 0;
private int value;
/**
* @param query - QueryNode Tree with the phrase

View File

@ -34,7 +34,7 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi
*/
public class FieldBoostMapFCListener implements FieldConfigListener {
private QueryConfigHandler config = null;
private final QueryConfigHandler config;
public FieldBoostMapFCListener(QueryConfigHandler config) {
this.config = config;

View File

@ -36,7 +36,7 @@ import org.apache.lucene.queryparser.flexible.standard.config.StandardQueryConfi
*/
public class FieldDateResolutionFCListener implements FieldConfigListener {
private QueryConfigHandler config = null;
private final QueryConfigHandler config;
public FieldDateResolutionFCListener(QueryConfigHandler config) {
this.config = config;

View File

@ -175,7 +175,7 @@ public class DocHelper {
public static Field textUtfField2 =
new Field(TEXT_FIELD_UTF2_KEY, FIELD_UTF2_TEXT, TEXT_TYPE_STORED_WITH_TVS);
public static Map<String, Object> nameValues = null;
public static Map<String, Object> nameValues;
// ordered list of all the fields...
// could use LinkedHashMap for this purpose if Java1.4 is OK