LUCENE-9856: fail precommit on unused local variables, take two (#37)

Enable ecj unused local variable, private instance and method detection. Allow SuppressWarnings("unused") to disable unused checks (e.g. for generated code or very special tests). Fix gradlew regenerate for python 3.9 SuppressWarnings("unused") for generated javacc and jflex code. Enable a few other easy ecj checks such as Deprecated annotation, hashcode/equals, equals across different types.

Co-authored-by: Mike McCandless <mikemccand@apache.org>
This commit is contained in:
Robert Muir 2021-03-23 13:59:00 -04:00 committed by GitHub
parent 53fd63dbb2
commit 945b1cb872
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
168 changed files with 276 additions and 1101 deletions

View File

@ -86,7 +86,7 @@ ext {
scriptDepVersions = [
"apache-rat": "0.11",
"commons-codec": "1.13",
"ecj": "3.19.0",
"ecj": "3.25.0",
"javacc": "7.0.4",
"jflex": "1.7.0",
"jgit": "5.9.0.202009080501-r",

View File

@ -95,6 +95,12 @@ def commonCleanups = { FileTree generatedFiles ->
text = text.replace(
"public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }",
"// (setDebugStream omitted).")
text = text.replace(
"public class QueryParserTokenManager ",
'@SuppressWarnings("unused") public class QueryParserTokenManager ')
text = text.replace(
"public class StandardSyntaxParserTokenManager ",
'@SuppressWarnings("unused") public class StandardSyntaxParserTokenManager ')
return text
})
}
@ -123,6 +129,9 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"final private LookaheadSuccess jj_ls =",
"static final private LookaheadSuccess jj_ls =")
text = text.replace(
"public class QueryParser ",
'@SuppressWarnings("unused") public class QueryParser ')
return text
})
}
@ -145,6 +154,9 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"new java.util.ArrayList<int[]>",
"new java.util.ArrayList<>")
text = text.replace(
"public class QueryParser ",
'@SuppressWarnings("unused") public class QueryParser ')
return text
})
}
@ -221,6 +233,9 @@ configure(project(":lucene:queryparser")) {
text = text.replace(
"Collections.<QueryNode> singletonList",
"Collections.singletonList")
text = text.replace(
"public class StandardSyntaxParser ",
'@SuppressWarnings("unused") public class StandardSyntaxParser ')
return text
})
}

View File

@ -3,6 +3,7 @@ eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.targetPlatform=11
org.eclipse.jdt.core.compiler.compliance=11
org.eclipse.jdt.core.compiler.doc.comment.support=enabled
org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=enabled
org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.comparingIdentical=error
@ -32,6 +33,7 @@ org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error
org.eclipse.jdt.core.compiler.problem.unusedImport=error
org.eclipse.jdt.core.compiler.problem.unusedLocal=error
org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
org.eclipse.jdt.core.compiler.source=11

View File

@ -1,13 +1,24 @@
#Sun Sep 23 20:55:03 EDT 2012
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=disabled
org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
org.eclipse.jdt.core.compiler.annotation.nonnull=org.eclipse.jdt.annotation.NonNull
org.eclipse.jdt.core.compiler.annotation.nonnull.secondary=
org.eclipse.jdt.core.compiler.annotation.nonnullbydefault=org.eclipse.jdt.annotation.NonNullByDefault
org.eclipse.jdt.core.compiler.annotation.nonnullisdefault=disabled
org.eclipse.jdt.core.compiler.annotation.nonnullbydefault.secondary=
org.eclipse.jdt.core.compiler.annotation.nullable=org.eclipse.jdt.annotation.Nullable
org.eclipse.jdt.core.compiler.annotation.nullable.secondary=
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=11
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=11
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.doc.comment.support=enabled
org.eclipse.jdt.core.compiler.problem.APILeak=error
org.eclipse.jdt.core.compiler.problem.annotatedTypeArgumentToUnannotated=error
org.eclipse.jdt.core.compiler.problem.annotationSuperInterface=error
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.autoboxing=ignore
@ -18,7 +29,9 @@ org.eclipse.jdt.core.compiler.problem.deprecationInDeprecatedCode=disabled
org.eclipse.jdt.core.compiler.problem.deprecationWhenOverridingDeprecatedMethod=disabled
org.eclipse.jdt.core.compiler.problem.discouragedReference=error
org.eclipse.jdt.core.compiler.problem.emptyStatement=ignore
org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.problem.explicitlyClosedAutoCloseable=ignore
org.eclipse.jdt.core.compiler.problem.fallthroughCase=ignore
org.eclipse.jdt.core.compiler.problem.fatalOptionalError=disabled
org.eclipse.jdt.core.compiler.problem.fieldHiding=ignore
@ -37,8 +50,10 @@ org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsNotVisibleRef=disabled
org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsVisibility=private
org.eclipse.jdt.core.compiler.problem.localVariableHiding=ignore
org.eclipse.jdt.core.compiler.problem.methodWithConstructorName=error
org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=ignore
org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=ignore
org.eclipse.jdt.core.compiler.problem.missingDefaultCase=ignore
org.eclipse.jdt.core.compiler.problem.missingDeprecatedAnnotation=error
org.eclipse.jdt.core.compiler.problem.missingEnumCaseDespiteDefault=disabled
org.eclipse.jdt.core.compiler.problem.missingHashCodeMethod=error
org.eclipse.jdt.core.compiler.problem.missingJavadocComments=ignore
org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsOverriding=disabled
org.eclipse.jdt.core.compiler.problem.missingJavadocCommentsVisibility=public
@ -54,43 +69,63 @@ org.eclipse.jdt.core.compiler.problem.missingSynchronizedOnInheritedMethod=ignor
org.eclipse.jdt.core.compiler.problem.noEffectAssignment=error
org.eclipse.jdt.core.compiler.problem.noImplicitStringConversion=error
org.eclipse.jdt.core.compiler.problem.nonExternalizedStringLiteral=ignore
org.eclipse.jdt.core.compiler.problem.nonnullParameterAnnotationDropped=error
org.eclipse.jdt.core.compiler.problem.nonnullTypeVariableFromLegacyInvocation=error
org.eclipse.jdt.core.compiler.problem.nullAnnotationInferenceConflict=error
org.eclipse.jdt.core.compiler.problem.nullReference=ignore
org.eclipse.jdt.core.compiler.problem.nullSpecViolation=error
org.eclipse.jdt.core.compiler.problem.nullUncheckedConversion=error
org.eclipse.jdt.core.compiler.problem.overridingPackageDefaultMethod=error
org.eclipse.jdt.core.compiler.problem.parameterAssignment=ignore
org.eclipse.jdt.core.compiler.problem.pessimisticNullAnalysisForFreeTypeVariables=error
org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=ignore
org.eclipse.jdt.core.compiler.problem.potentialNullReference=ignore
org.eclipse.jdt.core.compiler.problem.potentiallyUnclosedCloseable=ignore
org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
org.eclipse.jdt.core.compiler.problem.redundantNullAnnotation=error
org.eclipse.jdt.core.compiler.problem.redundantNullCheck=ignore
org.eclipse.jdt.core.compiler.problem.redundantSpecificationOfTypeArguments=ignore
org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=ignore
org.eclipse.jdt.core.compiler.problem.reportMethodCanBePotentiallyStatic=ignore
org.eclipse.jdt.core.compiler.problem.reportMethodCanBeStatic=ignore
org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=error
org.eclipse.jdt.core.compiler.problem.specialParameterHidingField=disabled
org.eclipse.jdt.core.compiler.problem.staticAccessReceiver=ignore
org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=disabled
org.eclipse.jdt.core.compiler.problem.suppressOptionalErrors=enabled
org.eclipse.jdt.core.compiler.problem.suppressWarnings=enabled
org.eclipse.jdt.core.compiler.problem.suppressWarningsNotFullyAnalysed=error
org.eclipse.jdt.core.compiler.problem.syntacticNullAnalysisForFields=disabled
org.eclipse.jdt.core.compiler.problem.syntheticAccessEmulation=ignore
org.eclipse.jdt.core.compiler.problem.terminalDeprecation=ignore
org.eclipse.jdt.core.compiler.problem.typeParameterHiding=ignore
org.eclipse.jdt.core.compiler.problem.unavoidableGenericTypeProblems=enabled
org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=ignore
org.eclipse.jdt.core.compiler.problem.unclosedCloseable=ignore
org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=ignore
org.eclipse.jdt.core.compiler.problem.unhandledWarningToken=ignore
org.eclipse.jdt.core.compiler.problem.unlikelyCollectionMethodArgumentType=error
org.eclipse.jdt.core.compiler.problem.unlikelyCollectionMethodArgumentTypeStrict=disabled
org.eclipse.jdt.core.compiler.problem.unlikelyEqualsArgumentType=error
org.eclipse.jdt.core.compiler.problem.unnecessaryElse=ignore
org.eclipse.jdt.core.compiler.problem.unnecessaryTypeCheck=ignore
org.eclipse.jdt.core.compiler.problem.unqualifiedFieldAccess=ignore
org.eclipse.jdt.core.compiler.problem.unstableAutoModuleName=ignore
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=ignore
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionExemptExceptionAndThrowable=enabled
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionIncludeDocCommentReference=enabled
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedImport=error
org.eclipse.jdt.core.compiler.problem.unusedLabel=ignore
org.eclipse.jdt.core.compiler.problem.unusedLocal=ignore
org.eclipse.jdt.core.compiler.problem.unusedLabel=error
org.eclipse.jdt.core.compiler.problem.unusedLocal=error
org.eclipse.jdt.core.compiler.problem.unusedObjectAllocation=ignore
org.eclipse.jdt.core.compiler.problem.unusedParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedParameterIncludeDocCommentReference=enabled
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=ignore
org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=ignore
org.eclipse.jdt.core.compiler.problem.unusedWarningToken=ignore
org.eclipse.jdt.core.compiler.problem.varargsArgumentNeedCast=error
org.eclipse.jdt.core.compiler.release=disabled
org.eclipse.jdt.core.compiler.source=11

View File

@ -32,7 +32,7 @@ import org.apache.lucene.analysis.util.OpenStringBuilder;
/**
* A CharFilter that wraps another Reader and attempts to strip out HTML constructs.
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
public final class HTMLStripCharFilter extends BaseCharFilter {

View File

@ -30,7 +30,7 @@ import org.apache.lucene.analysis.util.OpenStringBuilder;
/**
* A CharFilter that wraps another Reader and attempts to strip out HTML constructs.
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
%%
%unicode 9.0

View File

@ -22,7 +22,7 @@ package org.apache.lucene.analysis.classic;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/** This class implements the classic lucene StandardTokenizer up until 3.0 */
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused", "fallthrough"})
class ClassicTokenizerImpl {
/** This character denotes the end of file */

View File

@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* This class implements the classic lucene StandardTokenizer up until 3.0
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
%%
%class ClassicTokenizerImpl

View File

@ -22,7 +22,6 @@ import javax.xml.parsers.SAXParserFactory;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.xml.sax.XMLReader;
import org.xml.sax.helpers.DefaultHandler;
@ -341,24 +340,4 @@ public class PatternParser extends DefaultHandler {
word = readToken(chars);
}
}
/** Returns a string of the location. */
private String getLocationString(SAXParseException ex) {
StringBuilder str = new StringBuilder();
String systemId = ex.getSystemId();
if (systemId != null) {
int index = systemId.lastIndexOf('/');
if (index != -1) {
systemId = systemId.substring(index + 1);
}
str.append(systemId);
}
str.append(':');
str.append(ex.getLineNumber());
str.append(':');
str.append(ex.getColumnNumber());
return str.toString();
} // getLocationString(SAXParseException):String
}

View File

@ -42,7 +42,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li>&lt;EMOJI&gt;: A sequence of Emoji characters</li>
* </ul>
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
public final class UAX29URLEmailTokenizerImpl {

View File

@ -40,7 +40,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li>&lt;EMOJI&gt;: A sequence of Emoji characters</li>
* </ul>
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
%%
%unicode 9.0

View File

@ -619,10 +619,6 @@ public class KStemmer {
* CharArrayMap<String>(maxCacheSize,false); }
***/
private char finalChar() {
return word.charAt(k);
}
private char penultChar() {
return word.charAt(k - 1);
}

View File

@ -59,8 +59,6 @@ public class HunspellStemFilterFactory extends TokenFilterFactory implements Res
private static final String PARAM_DICTIONARY = "dictionary";
private static final String PARAM_AFFIX = "affix";
// NOTE: this one is currently unused?:
private static final String PARAM_RECURSION_CAP = "recursionCap";
private static final String PARAM_IGNORE_CASE = "ignoreCase";
private static final String PARAM_LONGEST_ONLY = "longestOnly";

View File

@ -31,7 +31,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
public final class FixBrokenOffsetsFilter extends TokenFilter {
private int lastStartOffset;
private int lastEndOffset;
private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@ -58,7 +57,6 @@ public final class FixBrokenOffsetsFilter extends TokenFilter {
public void reset() throws IOException {
super.reset();
lastStartOffset = 0;
lastEndOffset = 0;
}
private void fixOffsets() {
@ -72,6 +70,5 @@ public final class FixBrokenOffsetsFilter extends TokenFilter {
}
offsetAtt.setOffset(startOffset, endOffset);
lastStartOffset = startOffset;
lastEndOffset = endOffset;
}
}

View File

@ -22,7 +22,7 @@ package org.apache.lucene.analysis.wikipedia;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/** JFlex-generated tokenizer that is aware of Wikipedia syntax. */
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused", "fallthrough"})
class WikipediaTokenizerImpl {
/** This character denotes the end of file */

View File

@ -22,7 +22,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
/**
* JFlex-generated tokenizer that is aware of Wikipedia syntax.
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
%%
%class WikipediaTokenizerImpl

View File

@ -358,9 +358,8 @@ public class TestHTMLStripCharFilter extends BaseTokenStreamTestCase {
static void assertLegalOffsets(String in) throws Exception {
int length = in.length();
HTMLStripCharFilter reader = new HTMLStripCharFilter(new BufferedReader(new StringReader(in)));
int ch = 0;
int off = 0;
while ((ch = reader.read()) != -1) {
while (reader.read() != -1) {
int correction = reader.correctOffset(off);
assertTrue(
"invalid offset correction: " + off + "->" + correction + " for doc of length: " + length,

View File

@ -56,11 +56,10 @@ public class TestCJKBigramFilterFactory extends BaseTokenStreamFactoryTestCase {
/** Test that bogus arguments result in exception */
public void testBogusArguments() throws Exception {
IllegalArgumentException expected =
expectThrows(
IllegalArgumentException.class,
() -> {
tokenFilterFactory("CJKBigram", "bogusArg", "bogusValue");
});
expectThrows(
IllegalArgumentException.class,
() -> {
tokenFilterFactory("CJKBigram", "bogusArg", "bogusValue");
});
}
}

View File

@ -673,16 +673,4 @@ public class TestWordDelimiterFilter extends BaseTokenStreamTestCase {
null,
false);
}
private Analyzer getAnalyzer(final int flags) {
return new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
return new TokenStreamComponents(
tokenizer, new WordDelimiterFilter(tokenizer, flags, null));
}
};
}
}

View File

@ -800,16 +800,6 @@ public class TestWordDelimiterGraphFilter extends BaseTokenStreamTestCase {
return (flags & flag) != 0;
}
private static boolean isEnglishPossessive(String text, int pos) {
if (pos > 2) {
if ((text.charAt(pos - 1) == 's' || text.charAt(pos - 1) == 'S')
&& (pos == text.length() || text.charAt(pos) != '-')) {
text = text.substring(0, text.length() - 2);
}
}
return true;
}
private static class WordPart {
final String part;
final int startOffset;

View File

@ -44,7 +44,7 @@ public class TestNGramTokenizer extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
NGramTokenizer tok = new NGramTokenizer(2, 1);
new NGramTokenizer(2, 1);
});
}

View File

@ -70,7 +70,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testNoTokens() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer(".*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
String s;
while (true) {
s = TestUtil.randomUnicodeString(random());
@ -95,7 +95,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testSplitSingleCharWhitespace() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a \tb c"));
assertTokenStreamContents(
t, new String[] {"a", "b", "c"}, new int[] {0, 3, 7}, new int[] {1, 4, 8});
@ -103,7 +103,7 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testSplitMultiCharWhitespace() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a \tb c"));
assertTokenStreamContents(
t, new String[] {"a", "b", "c"}, new int[] {0, 3, 7}, new int[] {1, 4, 8});
@ -111,21 +111,21 @@ public class TestSimplePatternSplitTokenizer extends BaseTokenStreamTestCase {
public void testLeadingNonToken() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader(" a c"));
assertTokenStreamContents(t, new String[] {"a", "c"}, new int[] {4, 6}, new int[] {5, 7});
}
public void testTrailingNonToken() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("[ \t\r\n]*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("a c "));
assertTokenStreamContents(t, new String[] {"a", "c"}, new int[] {0, 2}, new int[] {1, 3});
}
public void testEmptyStringPatternOneMatch() throws Exception {
Tokenizer t = new SimplePatternSplitTokenizer("a*");
CharTermAttribute termAtt = t.getAttribute(CharTermAttribute.class);
t.getAttribute(CharTermAttribute.class);
t.setReader(new StringReader("bbab"));
assertTokenStreamContents(t, new String[] {"bb", "b"}, new int[] {0, 3}, new int[] {2, 4});
}

View File

@ -62,14 +62,6 @@ public class TestSynonymMapFilter extends BaseTokenStreamTestCase {
b.add(inputCharsRef.get(), outputCharsRef.get(), keepOrig);
}
private void assertEquals(CharTermAttribute term, String expected) {
assertEquals(expected.length(), term.length());
final char[] buffer = term.buffer();
for (int chIDX = 0; chIDX < expected.length(); chIDX++) {
assertEquals(expected.charAt(chIDX), buffer[chIDX]);
}
}
// For the output string: separate positions with a space,
// and separate multiple tokens at each position with a
// /. If a token should have end offset != the input

View File

@ -156,8 +156,7 @@ public class TestWikipediaTokenizerFactory extends BaseTokenStreamFactoryTestCas
expectThrows(
IllegalArgumentException.class,
() -> {
Tokenizer tf =
tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, "-1").create(newAttributeFactory());
tokenizerFactory(WIKIPEDIA, TOKEN_OUTPUT, "-1").create(newAttributeFactory());
});
assertTrue(
expected

View File

@ -43,7 +43,6 @@ public final class OpenNLPTokenizer extends SegmentingTokenizerBase {
private int termNum = 0;
private int sentenceStart = 0;
private NLPSentenceDetectorOp sentenceOp = null;
private NLPTokenizerOp tokenizerOp = null;
public OpenNLPTokenizer(
@ -54,7 +53,6 @@ public final class OpenNLPTokenizer extends SegmentingTokenizerBase {
throw new IllegalArgumentException(
"OpenNLPTokenizer: both a Sentence Detector and a Tokenizer are required");
}
this.sentenceOp = sentenceOp;
this.tokenizerOp = tokenizerOp;
}

View File

@ -82,10 +82,9 @@ public class TestOpenNLPTokenizerFactory extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
CustomAnalyzer analyzer =
CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
.withTokenizer("opennlp", "tokenizerModel", "en-test-tokenizer.bin")
.build();
CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
.withTokenizer("opennlp", "tokenizerModel", "en-test-tokenizer.bin")
.build();
});
assertTrue(
expected.getMessage().contains("Configuration Error: missing parameter 'sentenceModel'"));
@ -97,10 +96,9 @@ public class TestOpenNLPTokenizerFactory extends BaseTokenStreamTestCase {
expectThrows(
IllegalArgumentException.class,
() -> {
CustomAnalyzer analyzer =
CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
.withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin")
.build();
CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
.withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin")
.build();
});
assertTrue(
expected.getMessage().contains("Configuration Error: missing parameter 'tokenizerModel'"));

View File

@ -27,8 +27,6 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
/** Filter for DoubleMetaphone (supporting secondary codes) */
public final class DoubleMetaphoneFilter extends TokenFilter {
private static final String TOKEN_TYPE = "DoubleMetaphone";
private final LinkedList<State> remainingTokens = new LinkedList<>();
private final DoubleMetaphone encoder = new DoubleMetaphone();
private final boolean inject;

View File

@ -53,8 +53,6 @@ class BigramDictionary extends AbstractDictionary {
private int max = 0;
private int repeat = 0;
// static Logger log = Logger.getLogger(BigramDictionary.class);
public static synchronized BigramDictionary getInstance() {
@ -143,7 +141,7 @@ class BigramDictionary extends AbstractDictionary {
*/
public void loadFromFile(String dctFilePath) throws IOException {
int i, cnt, length, total = 0;
int i, cnt, length;
// The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.
// The 3756th is used (as a header) to store information.
int[] buffer = new int[3];
@ -163,7 +161,6 @@ class BigramDictionary extends AbstractDictionary {
if (cnt <= 0) {
continue;
}
total += cnt;
int j = 0;
while (j < cnt) {
dctFile.read(intBuffer);
@ -232,13 +229,11 @@ class BigramDictionary extends AbstractDictionary {
if (hash2 < 0) hash2 = PRIME_BIGRAM_LENGTH + hash2;
int index = hash1;
int i = 1;
repeat++;
while (bigramHashTable[index] != 0
&& bigramHashTable[index] != hashId
&& i < PRIME_BIGRAM_LENGTH) {
index = (hash1 + i * hash2) % PRIME_BIGRAM_LENGTH;
i++;
repeat++;
if (i > max) max = i;
}
// System.out.println(i - 1);

View File

@ -228,7 +228,6 @@ public class Trie {
int cmd = -1;
StrEnum e = new StrEnum(key, forward);
Character ch = null;
Character aux = null;
for (int i = 0; i < key.length(); ) {
ch = e.next();
@ -243,7 +242,7 @@ public class Trie {
for (int skip = c.skip; skip > 0; skip--) {
if (i < key.length()) {
aux = e.next();
e.next();
} else {
return null;
}

View File

@ -327,20 +327,6 @@ final class Lucene70NormsProducer extends NormsProducer implements Cloneable {
};
}
private IndexInput getDisiInput2(FieldInfo field, NormsEntry entry) throws IOException {
IndexInput slice = null;
if (merging) {
slice = disiInputs.get(field.number);
}
if (slice == null) {
slice = data.slice("docs", entry.docsWithFieldOffset, entry.docsWithFieldLength);
if (merging) {
disiInputs.put(field.number, slice);
}
}
return slice;
}
@Override
public NumericDocValues getNorms(FieldInfo field) throws IOException {
final NormsEntry entry = norms.get(field.number);

View File

@ -106,17 +106,14 @@ public class Lucene70SegmentInfoFormat extends SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
int format =
CodecUtil.checkIndexHeader(
input,
Lucene70SegmentInfoFormat.CODEC_NAME,
Lucene70SegmentInfoFormat.VERSION_START,
Lucene70SegmentInfoFormat.VERSION_CURRENT,
segmentID,
"");
CodecUtil.checkIndexHeader(
input,
Lucene70SegmentInfoFormat.CODEC_NAME,
Lucene70SegmentInfoFormat.VERSION_START,
Lucene70SegmentInfoFormat.VERSION_CURRENT,
segmentID,
"");
si = parseSegmentInfo(dir, input, segment, segmentID);
} catch (Throwable exception) {
priorE = exception;
} finally {

View File

@ -97,7 +97,6 @@ public class TestIndexedDISI extends LuceneTestCase {
private void assertAdvanceBeyondEnd(BitSet set, Directory dir) throws IOException {
final int cardinality = set.cardinality();
final byte denseRankPower = 9; // Not tested here so fixed to isolate factors
long length;
int jumpTableentryCount;
try (IndexOutput out = dir.createOutput("bar", IOContext.DEFAULT)) {
jumpTableentryCount =
@ -434,9 +433,7 @@ public class TestIndexedDISI extends LuceneTestCase {
length = out.getFilePointer();
}
try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) {
IndexedDISI disi =
new IndexedDISI(
in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
new IndexedDISI(in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
}
// This tests the legality of the denseRankPower only, so we don't do anything with the disi
}

View File

@ -97,12 +97,6 @@ public class ReadTokensTask extends PerfTask {
int left;
String s;
void init(String s) {
this.s = s;
left = s.length();
this.upto = 0;
}
@Override
public int read(char[] c) {
return read(c, 0, c.length);

View File

@ -178,6 +178,7 @@ public class SearchTravRetHighlightTask extends SearchTravTask {
void withTopDocs(IndexSearcher searcher, Query q, TopDocs hits) throws Exception;
}
@SuppressWarnings("unused")
private volatile int preventOptimizeAway = 0;
private class StandardHLImpl implements HLImpl {

View File

@ -437,7 +437,7 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
}
}
}
return new NewsPost(body.toString(), subject, groupName, number);
return new NewsPost(body.toString(), subject, groupName);
} catch (Throwable e) {
return null;
}
@ -447,13 +447,11 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
private final String body;
private final String subject;
private final String group;
private final String number;
private NewsPost(String body, String subject, String group, String number) {
private NewsPost(String body, String subject, String group) {
this.body = body;
this.subject = subject;
this.group = group;
this.number = number;
}
public String getBody() {
@ -467,9 +465,5 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
public String getGroup() {
return group;
}
public String getNumber() {
return number;
}
}
}

View File

@ -1920,14 +1920,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
public HighFreqDocsEnum() {}
public int[] getDocIDs() {
return docIDs;
}
public int[] getFreqs() {
return freqs;
}
public PostingsEnum reset(int[] docIDs, int[] freqs) {
this.docIDs = docIDs;
this.freqs = freqs;
@ -2106,18 +2098,6 @@ public final class DirectPostingsFormat extends PostingsFormat {
posJump = hasOffsets ? 3 : 1;
}
public int[] getDocIDs() {
return docIDs;
}
public int[][] getPositions() {
return positions;
}
public int getPosJump() {
return posJump;
}
public PostingsEnum reset(int[] docIDs, int[] freqs, int[][] positions, byte[][][] payloads) {
this.docIDs = docIDs;
this.freqs = freqs;

View File

@ -559,7 +559,7 @@ public class FSTTermsReader extends FieldsProducer {
if (term == null) {
return SeekStatus.END;
} else {
return term.equals(target) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
return term.get().equals(target) ? SeekStatus.FOUND : SeekStatus.NOT_FOUND;
}
}

View File

@ -22,7 +22,6 @@ import static org.apache.lucene.codecs.simpletext.SimpleTextPointsWriter.BLOCK_V
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
@ -195,59 +194,6 @@ final class SimpleTextBKDReader extends PointValues implements Accountable {
}
}
private void visitCompressedDocValues(
int[] commonPrefixLengths,
byte[] scratchPackedValue,
IndexInput in,
int[] docIDs,
int count,
IntersectVisitor visitor,
int compressedDim)
throws IOException {
// the byte at `compressedByteOffset` is compressed using run-length compression,
// other suffix bytes are stored verbatim
final int compressedByteOffset =
compressedDim * bytesPerDim + commonPrefixLengths[compressedDim];
commonPrefixLengths[compressedDim]++;
int i;
for (i = 0; i < count; ) {
scratchPackedValue[compressedByteOffset] = in.readByte();
final int runLen = Byte.toUnsignedInt(in.readByte());
for (int j = 0; j < runLen; ++j) {
for (int dim = 0; dim < numDims; dim++) {
int prefix = commonPrefixLengths[dim];
in.readBytes(scratchPackedValue, dim * bytesPerDim + prefix, bytesPerDim - prefix);
}
visitor.visit(docIDs[i + j], scratchPackedValue);
}
i += runLen;
}
if (i != count) {
throw new CorruptIndexException(
"Sub blocks do not add up to the expected count: " + count + " != " + i, in);
}
}
private int readCompressedDim(IndexInput in) throws IOException {
int compressedDim = in.readByte();
if (compressedDim < -1 || compressedDim >= numIndexDims) {
throw new CorruptIndexException("Got compressedDim=" + compressedDim, in);
}
return compressedDim;
}
private void readCommonPrefixes(
int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in) throws IOException {
for (int dim = 0; dim < numDims; dim++) {
int prefix = in.readVInt();
commonPrefixLengths[dim] = prefix;
if (prefix > 0) {
in.readBytes(scratchPackedValue, dim * bytesPerDim, prefix);
}
// System.out.println("R: " + dim + " of " + numDims + " prefix=" + prefix);
}
}
private void intersect(
IntersectState state, int nodeID, byte[] cellMinPacked, byte[] cellMaxPacked)
throws IOException {

View File

@ -816,40 +816,6 @@ final class SimpleTextBKDWriter implements Closeable {
}
}
private void writeLeafBlockPackedValuesRange(
IndexOutput out,
int[] commonPrefixLengths,
int start,
int end,
IntFunction<BytesRef> packedValues)
throws IOException {
for (int i = start; i < end; ++i) {
BytesRef ref = packedValues.apply(i);
assert ref.length == config.packedBytesLength;
for (int dim = 0; dim < config.numDims; dim++) {
int prefix = commonPrefixLengths[dim];
out.writeBytes(
ref.bytes, ref.offset + dim * config.bytesPerDim + prefix, config.bytesPerDim - prefix);
}
}
}
private static int runLen(
IntFunction<BytesRef> packedValues, int start, int end, int byteOffset) {
BytesRef first = packedValues.apply(start);
byte b = first.bytes[first.offset + byteOffset];
for (int i = start + 1; i < end; ++i) {
BytesRef ref = packedValues.apply(i);
byte b2 = ref.bytes[ref.offset + byteOffset];
assert Byte.toUnsignedInt(b2) >= Byte.toUnsignedInt(b);
if (b != b2) {
return i - start;
}
}
return end - start;
}
@Override
public void close() throws IOException {
if (tempInput != null) {

View File

@ -157,14 +157,6 @@ class SimpleTextPointsWriter extends PointsWriter {
SimpleTextUtil.write(out, s, scratch);
}
private void writeInt(IndexOutput out, int x) throws IOException {
SimpleTextUtil.write(out, Integer.toString(x), scratch);
}
private void writeLong(IndexOutput out, long x) throws IOException {
SimpleTextUtil.write(out, Long.toString(x), scratch);
}
private void write(IndexOutput out, BytesRef b) throws IOException {
SimpleTextUtil.write(out, b);
}

View File

@ -74,8 +74,8 @@ public class SimpleTextVectorWriter extends VectorWriter {
public void writeField(FieldInfo fieldInfo, VectorValues vectors) throws IOException {
long vectorDataOffset = vectorData.getFilePointer();
List<Integer> docIds = new ArrayList<>();
int docV, ord = 0;
for (docV = vectors.nextDoc(); docV != NO_MORE_DOCS; docV = vectors.nextDoc(), ord++) {
int docV;
for (docV = vectors.nextDoc(); docV != NO_MORE_DOCS; docV = vectors.nextDoc()) {
writeVectorValue(vectors);
docIds.add(docV);
}

View File

@ -39,7 +39,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li>&lt;EMOJI&gt;: A sequence of Emoji characters</li>
* </ul>
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
public final class StandardTokenizerImpl {

View File

@ -37,7 +37,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
* <li>&lt;EMOJI&gt;: A sequence of Emoji characters</li>
* </ul>
*/
@SuppressWarnings("fallthrough")
@SuppressWarnings({"unused","fallthrough"})
%%
%unicode 9.0

View File

@ -118,7 +118,6 @@ public abstract class VectorWriter implements Closeable {
/** Tracks state of one sub-reader that we are merging */
private static class VectorValuesSub extends DocIDMerger.Sub {
final MergeState.DocMap docMap;
final VectorValues values;
final int segmentIndex;
int count;
@ -127,7 +126,6 @@ public abstract class VectorWriter implements Closeable {
super(docMap);
this.values = values;
this.segmentIndex = segmentIndex;
this.docMap = docMap;
assert values.docID() == -1;
}

View File

@ -103,12 +103,9 @@ public class Lucene86SegmentInfoFormat extends SegmentInfoFormat {
Throwable priorE = null;
SegmentInfo si = null;
try {
int format =
CodecUtil.checkIndexHeader(
input, CODEC_NAME, VERSION_START, VERSION_CURRENT, segmentID, "");
CodecUtil.checkIndexHeader(
input, CODEC_NAME, VERSION_START, VERSION_CURRENT, segmentID, "");
si = parseSegmentInfo(dir, input, segment, segmentID);
} catch (Throwable exception) {
priorE = exception;
} finally {

View File

@ -125,14 +125,13 @@ public final class Lucene90FieldInfosFormat extends FieldInfosFormat {
Throwable priorE = null;
FieldInfo infos[] = null;
try {
int version =
CodecUtil.checkIndexHeader(
input,
Lucene90FieldInfosFormat.CODEC_NAME,
Lucene90FieldInfosFormat.FORMAT_START,
Lucene90FieldInfosFormat.FORMAT_CURRENT,
segmentInfo.getId(),
segmentSuffix);
CodecUtil.checkIndexHeader(
input,
Lucene90FieldInfosFormat.CODEC_NAME,
Lucene90FieldInfosFormat.FORMAT_START,
Lucene90FieldInfosFormat.FORMAT_CURRENT,
segmentInfo.getId(),
segmentSuffix);
final int size = input.readVInt(); // read in the size
infos = new FieldInfo[size];

View File

@ -21,7 +21,6 @@ import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.FloatBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
@ -324,7 +323,6 @@ public final class Lucene90VectorReader extends VectorReader {
final BytesRef binaryValue;
final ByteBuffer byteBuffer;
final FloatBuffer floatBuffer;
final int byteSize;
final float[] value;
@ -336,7 +334,6 @@ public final class Lucene90VectorReader extends VectorReader {
this.dataIn = dataIn;
byteSize = Float.BYTES * fieldEntry.dimension;
byteBuffer = ByteBuffer.allocate(byteSize);
floatBuffer = byteBuffer.asFloatBuffer();
value = new float[fieldEntry.dimension];
binaryValue = new BytesRef(byteBuffer.array(), byteBuffer.arrayOffset(), byteSize);
}

View File

@ -97,14 +97,12 @@ final class IntersectTermsEnumFrame {
int suffix;
private final IntersectTermsEnum ite;
private final int version;
public IntersectTermsEnumFrame(IntersectTermsEnum ite, int ord) throws IOException {
this.ite = ite;
this.ord = ord;
this.termState = ite.fr.parent.postingsReader.newTermState();
this.termState.totalTermFreq = -1;
this.version = ite.fr.parent.version;
suffixLengthBytes = new byte[32];
suffixLengthsReader = new ByteArrayDataInput();
}

View File

@ -271,13 +271,6 @@ public final class Lucene90BlockTreeTermsReader extends FieldsProducer {
return bytes;
}
/** Seek {@code input} to the directory offset. */
private static void seekDir(IndexInput input) throws IOException {
input.seek(input.length() - CodecUtil.footerLength() - 8);
long offset = input.readLong();
input.seek(offset);
}
// for debugging
// private static String toHex(int v) {
// return "0x" + Integer.toHexString(v);

View File

@ -94,14 +94,12 @@ final class SegmentTermsEnumFrame {
final ByteArrayDataInput bytesReader = new ByteArrayDataInput();
private final SegmentTermsEnum ste;
private final int version;
public SegmentTermsEnumFrame(SegmentTermsEnum ste, int ord) throws IOException {
this.ste = ste;
this.ord = ord;
this.state = ste.fr.parent.postingsReader.newTermState();
this.state.totalTermFreq = -1;
this.version = ste.fr.parent.version;
suffixLengthBytes = new byte[32];
suffixLengthsReader = new ByteArrayDataInput();
}

View File

@ -64,8 +64,6 @@ import org.apache.lucene.util.LongBitSet;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.SuppressForbidden;
import org.apache.lucene.util.Version;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.CompiledAutomaton;
/**
* Basic tool and API to check the health of an index and write a new segments file that removes
@ -1094,171 +1092,6 @@ public final class CheckIndex implements Closeable {
return status;
}
/**
* Visits all terms in the range minTerm (inclusive) to maxTerm (exclusive), marking all doc IDs
* encountered into allDocsSeen, and returning the total number of terms visited.
*/
private static long getDocsFromTermRange(
String field,
int maxDoc,
TermsEnum termsEnum,
FixedBitSet docsSeen,
BytesRef minTerm,
BytesRef maxTerm,
boolean isIntersect)
throws IOException {
docsSeen.clear(0, docsSeen.length());
long termCount = 0;
PostingsEnum postingsEnum = null;
BytesRefBuilder lastTerm = null;
while (true) {
BytesRef term;
// Kinda messy: for intersect, we must first next(), but for "normal", we are already on our
// first term:
if (isIntersect || termCount != 0) {
term = termsEnum.next();
} else {
term = termsEnum.term();
}
if (term == null) {
if (isIntersect == false) {
throw new RuntimeException("didn't see max term field=" + field + " term=" + maxTerm);
}
// System.out.println(" terms=" + termCount);
return termCount;
}
assert term.isValid();
if (lastTerm == null) {
lastTerm = new BytesRefBuilder();
lastTerm.copyBytes(term);
} else {
if (lastTerm.get().compareTo(term) >= 0) {
throw new RuntimeException(
"terms out of order: lastTerm=" + lastTerm.get() + " term=" + term);
}
lastTerm.copyBytes(term);
}
// System.out.println(" term=" + term);
// Caller already ensured terms enum positioned >= minTerm:
if (term.compareTo(minTerm) < 0) {
throw new RuntimeException("saw term before min term field=" + field + " term=" + minTerm);
}
if (isIntersect == false) {
int cmp = term.compareTo(maxTerm);
if (cmp == 0) {
// Done!
// System.out.println(" terms=" + termCount);
return termCount;
} else if (cmp > 0) {
throw new RuntimeException("didn't see end term field=" + field + " term=" + maxTerm);
}
}
postingsEnum = termsEnum.postings(postingsEnum, 0);
int lastDoc = -1;
while (true) {
int doc = postingsEnum.nextDoc();
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
if (doc <= lastDoc) {
throw new RuntimeException("term " + term + ": doc " + doc + " <= lastDoc " + lastDoc);
}
if (doc >= maxDoc) {
throw new RuntimeException("term " + term + ": doc " + doc + " >= maxDoc " + maxDoc);
}
// System.out.println(" doc=" + doc);
docsSeen.set(doc);
lastDoc = doc;
}
termCount++;
}
}
/**
* Test Terms.intersect on this range, and validates that it returns the same doc ids as using
* non-intersect TermsEnum. Returns true if any fake terms were seen.
*/
private static boolean checkSingleTermRange(
String field,
int maxDoc,
Terms terms,
BytesRef minTerm,
BytesRef maxTerm,
FixedBitSet normalDocs,
FixedBitSet intersectDocs)
throws IOException {
// System.out.println(" check minTerm=" + minTerm.utf8ToString() + " maxTerm=" +
// maxTerm.utf8ToString());
assert minTerm.compareTo(maxTerm) <= 0;
TermsEnum termsEnum = terms.iterator();
TermsEnum.SeekStatus status = termsEnum.seekCeil(minTerm);
if (status != TermsEnum.SeekStatus.FOUND) {
throw new RuntimeException(
"failed to seek to existing term field=" + field + " term=" + minTerm);
}
// Do "dumb" iteration to visit all terms in the range:
long normalTermCount =
getDocsFromTermRange(field, maxDoc, termsEnum, normalDocs, minTerm, maxTerm, false);
// Now do the same operation using intersect:
long intersectTermCount =
getDocsFromTermRange(
field,
maxDoc,
terms.intersect(
new CompiledAutomaton(
Automata.makeBinaryInterval(minTerm, true, maxTerm, false),
true,
false,
Integer.MAX_VALUE,
true),
null),
intersectDocs,
minTerm,
maxTerm,
true);
if (intersectTermCount > normalTermCount) {
throw new RuntimeException(
"intersect returned too many terms: field="
+ field
+ " intersectTermCount="
+ intersectTermCount
+ " normalTermCount="
+ normalTermCount);
}
if (normalDocs.equals(intersectDocs) == false) {
throw new RuntimeException(
"intersect visited different docs than straight terms enum: "
+ normalDocs.cardinality()
+ " for straight enum, vs "
+ intersectDocs.cardinality()
+ " for intersect, minTerm="
+ minTerm
+ " maxTerm="
+ maxTerm);
}
// System.out.println(" docs=" + normalTermCount);
// System.out.println(" " + intersectTermCount + " vs " + normalTermCount);
return intersectTermCount != normalTermCount;
}
/**
* checks Fields api is consistent with itself. searcher is optional, to verify with queries. Can
* be null.
@ -2553,7 +2386,6 @@ public final class CheckIndex implements Closeable {
public static class VerifyPointsVisitor implements PointValues.IntersectVisitor {
private long pointCountSeen;
private int lastDocID = -1;
private final int maxDoc;
private final FixedBitSet docsSeen;
private final byte[] lastMinPackedValue;
private final byte[] lastMaxPackedValue;
@ -2570,7 +2402,6 @@ public final class CheckIndex implements Closeable {
/** Sole constructor */
public VerifyPointsVisitor(String fieldName, int maxDoc, PointValues values)
throws IOException {
this.maxDoc = maxDoc;
this.fieldName = fieldName;
numDataDims = values.getNumDimensions();
numIndexDims = values.getNumIndexDimensions();

View File

@ -49,7 +49,6 @@ public class OrdinalMap implements Accountable {
// TODO: use more efficient packed ints structures?
private static class TermsEnumIndex {
public static final TermsEnumIndex[] EMPTY_ARRAY = new TermsEnumIndex[0];
final int subIndex;
final TermsEnum termsEnum;
BytesRef currentTerm;

View File

@ -35,8 +35,6 @@ public class SpanScorer extends Scorer {
/** accumulated sloppy freq (computed in setFreqCurrentDoc) */
private float freq;
/** number of matches (computed in setFreqCurrentDoc) */
private int numMatches;
private int lastScoredDoc = -1; // last doc we called setFreqCurrentDoc() for
@ -77,13 +75,12 @@ public class SpanScorer extends Scorer {
}
/**
* Sets {@link #freq} and {@link #numMatches} for the current document.
* Sets {@link #freq} for the current document.
*
* <p>This will be called at most once per document.
*/
protected final void setFreqCurrentDoc() throws IOException {
freq = 0.0f;
numMatches = 0;
spans.doStartCurrentDoc();
@ -102,7 +99,6 @@ public class SpanScorer extends Scorer {
// assert (startPos != prevStartPos) || (endPos > prevEndPos) : "non increased
// endPos="+endPos;
assert (startPos != prevStartPos) || (endPos >= prevEndPos) : "decreased endPos=" + endPos;
numMatches++;
if (docScorer == null) { // scores not required, break out here
freq = 1;
return;

View File

@ -374,7 +374,6 @@ public class OfflineSorter {
/** Merge the most recent {@code maxTempFile} partitions into a new partition. */
void mergePartitions(Directory trackingDir, List<Future<Partition>> segments) throws IOException {
long start = System.currentTimeMillis();
List<Future<Partition>> segmentsToMerge;
if (segments.size() > maxTempFiles) {
segmentsToMerge = segments.subList(segments.size() - maxTempFiles, segments.size());
@ -429,7 +428,6 @@ public class OfflineSorter {
long start = System.currentTimeMillis();
SortableBytesRefArray buffer;
boolean exhausted = false;
int count;
if (valueLength != -1) {
// fixed length case
buffer = new FixedLengthBytesRefArray(valueLength);

View File

@ -71,8 +71,6 @@ public final class FST<T> implements Accountable {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(FST.class);
private static final long ARC_SHALLOW_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(Arc.class);
private static final int BIT_FINAL_ARC = 1 << 0;
static final int BIT_LAST_ARC = 1 << 1;

View File

@ -59,7 +59,6 @@ import org.apache.lucene.util.SparseFixedBitSet;
public final class HnswGraph extends KnnGraphValues {
private final int maxConn;
private final VectorValues.SearchStrategy searchStrategy;
// Each entry lists the top maxConn neighbors of a node. The nodes correspond to vectors added to
// HnswBuilder, and the
@ -70,13 +69,12 @@ public final class HnswGraph extends KnnGraphValues {
private int upto;
private NeighborArray cur;
HnswGraph(int maxConn, VectorValues.SearchStrategy searchStrategy) {
HnswGraph(int maxConn) {
graph = new ArrayList<>();
// Typically with diversity criteria we see nodes not fully occupied; average fanout seems to be
// about 1/2 maxConn. There is some indexing time penalty for under-allocating, but saves RAM
graph.add(new NeighborArray(Math.max(32, maxConn / 4)));
this.maxConn = maxConn;
this.searchStrategy = searchStrategy;
}
/**

View File

@ -99,7 +99,7 @@ public final class HnswGraphBuilder {
}
this.maxConn = maxConn;
this.beamWidth = beamWidth;
this.hnsw = new HnswGraph(maxConn, searchStrategy);
this.hnsw = new HnswGraph(maxConn);
bound = BoundsChecker.create(searchStrategy.reversed);
random = new Random(seed);
scratch = new NeighborArray(Math.max(beamWidth, maxConn + 1));

View File

@ -28,7 +28,6 @@ import org.apache.lucene.util.ArrayUtil;
public class NeighborArray {
private int size;
private int upto;
float[] score;
int[] node;

View File

@ -15,7 +15,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from fractions import gcd
try:
# python 3.9+
from math import gcd
except ImportError:
# old python
from fractions import gcd
"""Code generation for bulk operations"""

View File

@ -191,8 +191,7 @@ public class TestStopFilter extends BaseTokenStreamTestCase {
StopFilter stopfilter, List<Integer> stopwordPositions, final int numberOfTokens)
throws IOException {
CharTermAttribute termAtt = stopfilter.getAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncrAtt =
stopfilter.getAttribute(PositionIncrementAttribute.class);
stopfilter.getAttribute(PositionIncrementAttribute.class);
stopfilter.reset();
log("Test stopwords positions:");
for (int i = 0; i < numberOfTokens; i++) {

View File

@ -98,7 +98,6 @@ public class TestIndexedDISI extends LuceneTestCase {
private void assertAdvanceBeyondEnd(BitSet set, Directory dir) throws IOException {
final int cardinality = set.cardinality();
final byte denseRankPower = 9; // Not tested here so fixed to isolate factors
long length;
int jumpTableentryCount;
try (IndexOutput out = dir.createOutput("bar", IOContext.DEFAULT)) {
jumpTableentryCount =
@ -435,9 +434,7 @@ public class TestIndexedDISI extends LuceneTestCase {
length = out.getFilePointer();
}
try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) {
IndexedDISI disi =
new IndexedDISI(
in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
new IndexedDISI(in, 0L, length, jumpTableEntryCount, denseRankPowerRead, set.cardinality());
}
// This tests the legality of the denseRankPower only, so we don't do anything with the disi
}

View File

@ -84,7 +84,6 @@ public class Test2BSortedDocValuesOrds extends LuceneTestCase {
int counter = 0;
for (LeafReaderContext context : r.leaves()) {
LeafReader reader = context.reader();
BytesRef scratch = new BytesRef();
BinaryDocValues dv = DocValues.getBinary(reader, "dv");
for (int i = 0; i < reader.maxDoc(); i++) {
assertEquals(i, dv.nextDoc());

View File

@ -341,13 +341,11 @@ public class TestCodecs extends LuceneTestCase {
private static class Verify extends Thread {
final Fields termsDict;
final FieldData[] fields;
final SegmentInfo si;
volatile boolean failed;
Verify(final SegmentInfo si, final FieldData[] fields, final Fields termsDict) {
this.fields = fields;
this.termsDict = termsDict;
this.si = si;
}
@Override
@ -377,8 +375,6 @@ public class TestCodecs extends LuceneTestCase {
assertEquals(DocIdSetIterator.NO_MORE_DOCS, postingsEnum.nextDoc());
}
byte[] data = new byte[10];
private void verifyPositions(final PositionData[] positions, final PostingsEnum posEnum)
throws Throwable {
for (int i = 0; i < positions.length; i++) {

View File

@ -95,7 +95,6 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
public final ReaderManager mgr;
private final Directory indexDir;
private final Path root;
private final Path segsPath;
/** Which segments have been closed, but their parallel index is not yet not removed. */
@ -119,8 +118,6 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
}
public ReindexingReader(Path root) throws IOException {
this.root = root;
// Normal index is stored under "index":
indexDir = openDirectory(root.resolve("index"));
@ -869,7 +866,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
assertEquals(i, oldValues.nextDoc());
Document oldDoc = reader.document(i);
reader.document(i);
Document newDoc = new Document();
newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, oldValues.longValue()));
w.addDocument(newDoc);
@ -996,7 +993,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
assertNotNull("oldSchemaGen=" + oldSchemaGen, oldValues);
for (int i = 0; i < maxDoc; i++) {
// TODO: is this still O(blockSize^2)?
Document oldDoc = reader.document(i);
reader.document(i);
Document newDoc = new Document();
assertEquals(i, oldValues.nextDoc());
newDoc.add(
@ -1518,7 +1515,6 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
NumericDocValues numbers = MultiDocValues.getNumericValues(r, fieldName);
int maxDoc = r.maxDoc();
boolean failed = false;
long t0 = System.currentTimeMillis();
for (int i = 0; i < maxDoc; i++) {
Document oldDoc = r.document(i);
long value = multiplier * Long.parseLong(oldDoc.get("text").split(" ")[1]);

View File

@ -238,7 +238,7 @@ public class TestDoc extends LuceneTestCase {
new FieldInfos.FieldNumbers(null),
context);
MergeState mergeState = merger.merge();
merger.merge();
r1.close();
r2.close();
si.setFiles(new HashSet<>(trackingDir.getCreatedFiles()));

View File

@ -1720,7 +1720,6 @@ public class TestIndexSorting extends LuceneTestCase {
}
public void testRandom1() throws IOException {
boolean withDeletes = random().nextBoolean();
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
Sort indexSort = new Sort(new SortField("foo", SortField.Type.LONG));
@ -1791,7 +1790,6 @@ public class TestIndexSorting extends LuceneTestCase {
}
public void testMultiValuedRandom1() throws IOException {
boolean withDeletes = random().nextBoolean();
Directory dir = newDirectory();
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
Sort indexSort = new Sort(new SortedNumericSortField("foo", SortField.Type.LONG));
@ -2412,7 +2410,6 @@ public class TestIndexSorting extends LuceneTestCase {
}
private static final class RandomDoc {
public final int id;
public final int intValue;
public final int[] intValues;
public final long longValue;
@ -2425,7 +2422,6 @@ public class TestIndexSorting extends LuceneTestCase {
public final byte[][] bytesValues;
public RandomDoc(int id) {
this.id = id;
intValue = random().nextInt();
longValue = random().nextLong();
floatValue = random().nextFloat();

View File

@ -2967,7 +2967,6 @@ public class TestIndexWriter extends LuceneTestCase {
// Use WindowsFS to prevent open files from being deleted:
FileSystem fs = new WindowsFS(path.getFileSystem()).getFileSystem(URI.create("file:///"));
Path root = new FilterPath(path, fs);
DirectoryReader reader;
// MMapDirectory doesn't work because it closes its file handles after mapping!
try (FSDirectory dir = new NIOFSDirectory(root)) {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));

View File

@ -182,8 +182,6 @@ public class TestIndexWriterCommit extends LuceneTestCase {
// sum because the merged FST may use array encoding for
// some arcs (which uses more space):
final String idFormat = TestUtil.getPostingsFormat("id");
final String contentFormat = TestUtil.getPostingsFormat("content");
MockDirectoryWrapper dir = newMockDirectory();
Analyzer analyzer;
if (random().nextBoolean()) {

View File

@ -1430,7 +1430,7 @@ public class TestIndexWriterExceptions extends LuceneTestCase {
w.close();
IndexReader reader = DirectoryReader.open(dir);
assertTrue(reader.numDocs() > 0);
SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
SegmentInfos.readLatestCommit(dir);
for (LeafReaderContext context : reader.leaves()) {
assertFalse(context.reader().getFieldInfos().hasVectors());
}

View File

@ -159,9 +159,6 @@ public class TestIndexWriterOnDiskFull extends LuceneTestCase {
// sum because the merged FST may use array encoding for
// some arcs (which uses more space):
final String idFormat = TestUtil.getPostingsFormat("id");
final String contentFormat = TestUtil.getPostingsFormat("content");
int START_COUNT = 57;
int NUM_DIR = TEST_NIGHTLY ? 50 : 5;
int END_COUNT = START_COUNT + NUM_DIR * (TEST_NIGHTLY ? 25 : 5);

View File

@ -424,7 +424,6 @@ public class TestIndexWriterReader extends LuceneTestCase {
IndexWriter mainWriter;
final List<Throwable> failures = new ArrayList<>();
DirectoryReader[] readers;
boolean didClose = false;
AtomicInteger count = new AtomicInteger(0);
AtomicInteger numaddIndexes = new AtomicInteger(0);
@ -460,7 +459,6 @@ public class TestIndexWriterReader extends LuceneTestCase {
}
void close(boolean doWait) throws Throwable {
didClose = true;
if (doWait) {
mainWriter.close();
} else {

View File

@ -112,7 +112,6 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
private static final int MAX_THREADS_AT_ONCE = 10;
static class CheckSegmentCount implements Runnable, Closeable {
private final IndexWriter w;
private final AtomicInteger maxThreadCountPerIter;
private final AtomicInteger indexingCount;
private DirectoryReader r;
@ -120,7 +119,6 @@ public class TestIndexWriterThreadsToSegments extends LuceneTestCase {
public CheckSegmentCount(
IndexWriter w, AtomicInteger maxThreadCountPerIter, AtomicInteger indexingCount)
throws IOException {
this.w = w;
this.maxThreadCountPerIter = maxThreadCountPerIter;
this.indexingCount = indexingCount;
r = DirectoryReader.open(w);

View File

@ -54,7 +54,6 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
private static class IndexerThread extends Thread {
private final CyclicBarrier syncStart;
boolean diskFull;
Throwable error;
IndexWriter writer;
boolean noErrors;
@ -100,7 +99,6 @@ public class TestIndexWriterWithThreads extends LuceneTestCase {
// ioe.printStackTrace(System.out);
if (ioe.getMessage().startsWith("fake disk full at")
|| ioe.getMessage().equals("now failing on purpose")) {
diskFull = true;
try {
Thread.sleep(1);
} catch (InterruptedException ie) {

View File

@ -138,9 +138,6 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
writer.addDocument(doc(i, val));
}
int numDocUpdates = 0;
int numValueUpdates = 0;
for (int i = 0; i < numOperations; i++) {
final int op = TestUtil.nextInt(random(), 1, 100);
final long val = random().nextLong();
@ -152,10 +149,8 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
final int id = TestUtil.nextInt(random(), 0, expected.size() - 1);
expected.put(id, val);
if (op <= UPD_CUTOFF) {
numDocUpdates++;
writer.updateDocument(new Term("id", "doc-" + id), doc(id, val));
} else {
numValueUpdates++;
writer.updateNumericDocValue(new Term("id", "doc-" + id), "val", val);
}
}
@ -832,7 +827,6 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
int refreshChance = TestUtil.nextInt(random(), 5, 200);
int deleteChance = TestUtil.nextInt(random(), 2, 100);
int idUpto = 0;
int deletedCount = 0;
List<OneSortDoc> docs = new ArrayList<>();
@ -1600,7 +1594,6 @@ public class TestNumericDocValuesUpdates extends LuceneTestCase {
// update all doc values
long value = random().nextInt();
NumericDocValuesField[] update = new NumericDocValuesField[numDocs];
for (int i = 0; i < numDocs; i++) {
Term term = new Term("id", new BytesRef(Integer.toString(i)));
writer.updateDocValues(term, new NumericDocValuesField("ndv", value));

View File

@ -365,11 +365,6 @@ public class TestPayloads extends LuceneTestCase {
super(PER_FIELD_REUSE_STRATEGY);
}
public PayloadAnalyzer(String field, byte[] data, int offset, int length) {
super(PER_FIELD_REUSE_STRATEGY);
setPayloadData(field, data, offset, length);
}
void setPayloadData(String field, byte[] data, int offset, int length) {
fieldToData.put(field, new PayloadData(data, offset, length));
}

View File

@ -19,14 +19,12 @@ package org.apache.lucene.index;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.*;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.*;
public class TestStressIndexing extends LuceneTestCase {
private abstract static class TimedThread extends Thread {
volatile boolean failed;
int count;
private static int RUN_TIME_MSEC = atLeast(1000);
private TimedThread[] allThreads;
@ -40,13 +38,10 @@ public class TestStressIndexing extends LuceneTestCase {
public void run() {
final long stopTime = System.currentTimeMillis() + RUN_TIME_MSEC;
count = 0;
try {
do {
if (anyErrors()) break;
doWork();
count++;
} while (System.currentTimeMillis() < stopTime);
} catch (Throwable e) {
System.out.println(Thread.currentThread() + ": exc");
@ -103,10 +98,9 @@ public class TestStressIndexing extends LuceneTestCase {
public void doWork() throws Throwable {
for (int i = 0; i < 100; i++) {
IndexReader ir = DirectoryReader.open(directory);
IndexSearcher is = newSearcher(ir);
newSearcher(ir);
ir.close();
}
count += 100;
}
}

View File

@ -20,6 +20,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTerm extends LuceneTestCase {
@SuppressWarnings("unlikely-arg-type")
public void testEquals() {
final Term base = new Term("same", "same");
final Term same = new Term("same", "same");

View File

@ -249,7 +249,6 @@ public class TestTermVectorsReader extends LuceneTestCase {
Codec.getDefault()
.termVectorsFormat()
.vectorsReader(dir, seg.info, fieldInfos, newIOContext(random()));
BytesRef[] terms;
Terms vector = reader.get(0).terms(testFields[0]);
assertNotNull(vector);
assertEquals(testTerms.length, vector.size());

View File

@ -263,7 +263,7 @@ public class TestTermsHashPerField extends LuceneTestCase {
for (int i = 0; i < numDocs; i++) {
int numTerms = 1 + random().nextInt(200);
int doc = i;
for (int j = 0; i < numTerms; i++) {
for (int j = 0; j < numTerms; j++) {
BytesRef ref = RandomPicks.randomFrom(random(), bytesRefs);
Posting posting = postingMap.get(ref);
if (posting.termId == -1) {

View File

@ -28,8 +28,6 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
final boolean failOnCommit;
final boolean failOnRollback;
boolean rollbackCalled = false;
Map<String, String> prepareCommitData = null;
Map<String, String> commitData = null;
public TwoPhaseCommitImpl(boolean failOnPrepare, boolean failOnCommit, boolean failOnRollback) {
this.failOnPrepare = failOnPrepare;
@ -43,7 +41,6 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
}
public long prepareCommit(Map<String, String> commitData) throws IOException {
this.prepareCommitData = commitData;
assertFalse("commit should not have been called before all prepareCommit were", commitCalled);
if (failOnPrepare) {
throw new IOException("failOnPrepare");
@ -57,7 +54,6 @@ public class TestTwoPhaseCommitTool extends LuceneTestCase {
}
public long commit(Map<String, String> commitData) throws IOException {
this.commitData = commitData;
commitCalled = true;
if (failOnCommit) {
throw new RuntimeException("failOnCommit");

View File

@ -356,8 +356,6 @@ public class TestBoolean2 extends LuceneTestCase {
public void testRandomQueries() throws Exception {
String[] vals = {"w1", "w2", "w3", "w4", "w5", "xx", "yy", "zzz"};
int tot = 0;
BooleanQuery q1 = null;
try {
@ -395,7 +393,6 @@ public class TestBoolean2 extends LuceneTestCase {
collector = TopFieldCollector.create(sort, 1000, 1);
searcher.search(q1, collector);
ScoreDoc[] hits2 = collector.topDocs().scoreDocs;
tot += hits2.length;
CheckHits.checkEqual(q1, hits1, hits2);
BooleanQuery.Builder q3 = new BooleanQuery.Builder();

View File

@ -585,7 +585,7 @@ public class TestControlledRealTimeReopenThread extends ThreadedIndexingAndSearc
nrtDeletesThread.setDaemon(true);
nrtDeletesThread.start();
long gen1 = w.addDocument(new Document());
w.addDocument(new Document());
long gen2 = w.deleteAll();
nrtDeletesThread.waitForGeneration(gen2);
IOUtils.close(nrtDeletesThread, nrtDeletes, w, dir);

View File

@ -46,7 +46,6 @@ public class TestLongValuesSource extends LuceneTestCase {
dir = newDirectory();
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
int numDocs = TestUtil.nextInt(random(), 2049, 4000);
int leastValue = 45;
for (int i = 0; i < numDocs; i++) {
Document document = new Document();
document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));

View File

@ -646,8 +646,6 @@ public class TestMatchesIterator extends LuceneTestCase {
// "a phrase sentence with many phrase sentence iterations of a phrase sentence",
public void testSloppyPhraseQueryWithRepeats() throws IOException {
Term p = new Term(FIELD_WITH_OFFSETS, "phrase");
Term s = new Term(FIELD_WITH_OFFSETS, "sentence");
PhraseQuery pq = new PhraseQuery(10, FIELD_WITH_OFFSETS, "phrase", "sentence", "sentence");
checkMatches(
pq,

View File

@ -47,6 +47,7 @@ public class TestSortedNumericSortField extends LuceneTestCase {
}
}
@SuppressWarnings("unlikely-arg-type")
public void testEquals() throws Exception {
SortField sf = new SortedNumericSortField("a", SortField.Type.LONG);
assertFalse(sf.equals(null));

View File

@ -47,6 +47,7 @@ public class TestSortedSetSortField extends LuceneTestCase {
}
}
@SuppressWarnings("unlikely-arg-type")
public void testEquals() throws Exception {
SortField sf = new SortedSetSortField("a", false);
assertFalse(sf.equals(null));

View File

@ -126,9 +126,6 @@ public class TestTermQuery extends LuceneTestCase {
w.addDocument(new Document());
DirectoryReader reader = w.getReader();
FilterDirectoryReader noSeekReader = new NoSeekDirectoryReader(reader);
IndexSearcher noSeekSearcher = new IndexSearcher(noSeekReader);
Query query = new TermQuery(new Term("foo", "bar"));
TermQuery queryWithContext =
new TermQuery(
new Term("foo", "bar"),

View File

@ -22,6 +22,7 @@ import org.apache.lucene.util.LuceneTestCase;
public class TestTotalHits extends LuceneTestCase {
@SuppressWarnings("unlikely-arg-type")
public void testEqualsAndHashcode() {
TotalHits totalHits1 = randomTotalHits();
assertFalse(totalHits1.equals(null));

View File

@ -32,6 +32,7 @@ import org.apache.lucene.util.LuceneTestCase;
/** TestWildcard tests the '*' and '?' wildcard characters. */
public class TestWildcard extends LuceneTestCase {
@SuppressWarnings("unlikely-arg-type")
public void testEquals() {
WildcardQuery wq1 = new WildcardQuery(new Term("field", "b*a"));
WildcardQuery wq2 = new WildcardQuery(new Term("field", "b*a"));

View File

@ -181,7 +181,7 @@ public class TestMultiMMap extends BaseDirectoryTestCase {
public void testSeekSliceZero() throws Exception {
int upto = TEST_NIGHTLY ? 31 : 3;
for (int i = 0; i < 3; i++) {
for (int i = 0; i < upto; i++) {
MMapDirectory mmapDir = new MMapDirectory(createTempDir("testSeekSliceZero"), 1 << i);
IndexOutput io = mmapDir.createOutput("zeroBytes", newIOContext(random()));
io.close();

View File

@ -30,7 +30,6 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.TestUtil;
@ -82,7 +81,7 @@ public class TestNRTCachingDirectory extends BaseDirectoryTestCase {
final IndexSearcher s = newSearcher(r);
// Just make sure search can run; we can't assert
// totHits since it could be 0
TopDocs hits = s.search(new TermQuery(new Term("body", "the")), 10);
s.search(new TermQuery(new Term("body", "the")), 10);
// System.out.println("tot hits " + hits.totalHits);
}
}

View File

@ -1723,7 +1723,6 @@ public class TestBKD extends LuceneTestCase {
public void testTooManyPoints() throws Exception {
Directory dir = newDirectory();
final int numValues = 10;
final int numPointsAdded = 50; // exceeds totalPointCount
final int numBytesPerDim = TestUtil.nextInt(random(), 1, 4);
final byte[] pointValue = new byte[numBytesPerDim];
BKDWriter w =
@ -1755,7 +1754,6 @@ public class TestBKD extends LuceneTestCase {
public void testTooManyPoints1D() throws Exception {
Directory dir = newDirectory();
final int numValues = 10;
final int numPointsAdded = 50; // exceeds totalPointCount
final int numBytesPerDim = TestUtil.nextInt(random(), 1, 4);
final byte[][] pointValue = new byte[11][numBytesPerDim];
BKDWriter w =

View File

@ -294,7 +294,6 @@ public class TestFSTs extends LuceneTestCase {
for (int inputMode = 0; inputMode < 2; inputMode++) {
final int numWords = random.nextInt(maxNumWords + 1);
Set<IntsRef> termsSet = new HashSet<>();
IntsRef[] terms = new IntsRef[numWords];
while (termsSet.size() < numWords) {
final String term = getRandomString(random);
termsSet.add(toIntsRef(term, inputMode));
@ -527,7 +526,7 @@ public class TestFSTs extends LuceneTestCase {
BufferedReader is = Files.newBufferedReader(wordsFileIn, StandardCharsets.UTF_8);
try {
final IntsRefBuilder intsRef = new IntsRefBuilder();
final IntsRefBuilder intsRefBuilder = new IntsRefBuilder();
long tStart = System.currentTimeMillis();
int ord = 0;
while (true) {
@ -535,8 +534,8 @@ public class TestFSTs extends LuceneTestCase {
if (w == null) {
break;
}
toIntsRef(w, inputMode, intsRef);
fstCompiler.add(intsRef.get(), getOutput(intsRef.get(), ord));
toIntsRef(w, inputMode, intsRefBuilder);
fstCompiler.add(intsRefBuilder.get(), getOutput(intsRefBuilder.get(), ord));
ord++;
if (ord % 500000 == 0) {
@ -613,10 +612,10 @@ public class TestFSTs extends LuceneTestCase {
if (w == null) {
break;
}
toIntsRef(w, inputMode, intsRef);
toIntsRef(w, inputMode, intsRefBuilder);
if (iter == 0) {
T expected = getOutput(intsRef.get(), ord);
T actual = Util.get(fst, intsRef.get());
T expected = getOutput(intsRefBuilder.get(), ord);
T actual = Util.get(fst, intsRefBuilder.get());
if (actual == null) {
throw new RuntimeException("unexpected null output on input=" + w);
}
@ -631,18 +630,18 @@ public class TestFSTs extends LuceneTestCase {
}
} else {
// Get by output
final Long output = (Long) getOutput(intsRef.get(), ord);
final Long output = (Long) getOutput(intsRefBuilder.get(), ord);
@SuppressWarnings({"unchecked", "deprecation"})
final IntsRef actual = Util.getByOutput((FST<Long>) fst, output.longValue());
if (actual == null) {
throw new RuntimeException("unexpected null input from output=" + output);
}
if (!actual.equals(intsRef)) {
if (!actual.equals(intsRefBuilder.get())) {
throw new RuntimeException(
"wrong input (got "
+ actual
+ " but expected "
+ intsRef
+ intsRefBuilder
+ " from output="
+ output);
}

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.util.fst;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.lucene.util.BytesRef;
@ -99,26 +98,4 @@ public class TestUtil extends LuceneTestCase {
}
return fstCompiler.compile();
}
private List<String> createRandomDictionary(int width, int depth) {
return createRandomDictionary(new ArrayList<>(), new StringBuilder(), width, depth);
}
private List<String> createRandomDictionary(
List<String> dict, StringBuilder buf, int width, int depth) {
char c = (char) random().nextInt(128);
assert width < Character.MIN_SURROGATE / 8 - 128; // avoid surrogate chars
int len = buf.length();
for (int i = 0; i < width; i++) {
buf.append(c);
if (depth > 0) {
createRandomDictionary(dict, buf, width, depth - 1);
} else {
dict.add(buf.toString());
}
c += random().nextInt(8);
buf.setLength(len);
}
return dict;
}
}

View File

@ -20,7 +20,6 @@ import java.io.IOException;
import java.util.List;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.DoubleValues;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Rescorer;
@ -47,21 +46,6 @@ class ExpressionRescorer extends SortRescorer {
this.bindings = bindings;
}
private static DoubleValues scores(int doc, float score) {
return new DoubleValues() {
@Override
public double doubleValue() throws IOException {
return score;
}
@Override
public boolean advanceExact(int target) throws IOException {
assert doc == target;
return true;
}
};
}
@Override
public Explanation explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID)
throws IOException {

View File

@ -166,7 +166,7 @@ public final class JavascriptCompiler {
@SuppressWarnings({"unused", "null"})
private static void unusedTestCompile() throws IOException {
DoubleValues f = null;
double ret = f.doubleValue();
f.doubleValue();
}
/**

View File

@ -94,6 +94,7 @@ public class TestExpressionValueSource extends LuceneTestCase {
assertEquals(4, values.doubleValue(), 0);
}
@SuppressWarnings("unlikely-arg-type")
public void testDoubleValuesSourceEquals() throws Exception {
Expression expr = JavascriptCompiler.compile("sqrt(a) + ln(b)");

View File

@ -441,15 +441,7 @@ public class FacetsConfig {
System.arraycopy(field.assoc.bytes, field.assoc.offset, bytes, upto, field.assoc.length);
upto += field.assoc.length;
FacetsConfig.DimConfig ft = getDimConfig(field.dim);
// Drill down:
int start;
if (ft.requireDimensionDrillDown) {
start = 1;
} else {
start = 2;
}
for (int i = 1; i <= label.length; i++) {
doc.add(
new StringField(indexFieldName, pathToString(label.components, i), Field.Store.NO));

View File

@ -218,8 +218,7 @@ public class LongValueFacetCounts extends Facets {
}
private void countAllOneSegment(NumericDocValues values) throws IOException {
int doc;
while ((doc = values.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
totCount++;
increment(values.longValue());
}
@ -255,8 +254,7 @@ public class LongValueFacetCounts extends Facets {
if (singleValues != null) {
countAllOneSegment(singleValues);
} else {
int doc;
while ((doc = values.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
while (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
int limit = values.docValueCount();
totCount += limit;
for (int i = 0; i < limit; i++) {

View File

@ -31,9 +31,6 @@ import org.apache.lucene.util.IntsRef;
*/
public class TaxonomyFacetLabels {
/** Index field name provided to the constructor */
private final String indexFieldName;
/** {@code TaxonomyReader} provided to the constructor */
private final TaxonomyReader taxoReader;
@ -49,7 +46,6 @@ public class TaxonomyFacetLabels {
*/
public TaxonomyFacetLabels(TaxonomyReader taxoReader, String indexFieldName) throws IOException {
this.taxoReader = taxoReader;
this.indexFieldName = indexFieldName;
this.ordsReader = new DocValuesOrdinalsReader(indexFieldName);
}

Some files were not shown because too many files have changed in this diff Show More