mirror of
https://github.com/apache/lucene.git
synced 2025-02-07 02:28:49 +00:00
LUCENE-9911: enable ecjLint unusedExceptionParameter (#70)
Fails the linter if an exception is swallowed (e.g. variable completely unused). If this is intentional for some reason, the exception can simply by annotated with @SuppressWarnings("unused").
This commit is contained in:
parent
7f147fece0
commit
2971f311a2
@ -86,8 +86,7 @@ org.eclipse.jdt.core.compiler.problem.pessimisticNullAnalysisForFreeTypeVariable
|
||||
org.eclipse.jdt.core.compiler.problem.possibleAccidentalBooleanAssignment=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.potentialNullReference=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.potentiallyUnclosedCloseable=ignore
|
||||
# TODO: generics-related warning that is normally enabled by default
|
||||
org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.rawTypeReference=error
|
||||
org.eclipse.jdt.core.compiler.problem.redundantNullAnnotation=error
|
||||
org.eclipse.jdt.core.compiler.problem.redundantNullCheck=error
|
||||
org.eclipse.jdt.core.compiler.problem.redundantSpecificationOfTypeArguments=ignore
|
||||
@ -106,7 +105,7 @@ org.eclipse.jdt.core.compiler.problem.terminalDeprecation=error
|
||||
org.eclipse.jdt.core.compiler.problem.typeParameterHiding=error
|
||||
org.eclipse.jdt.core.compiler.problem.unavoidableGenericTypeProblems=enabled
|
||||
org.eclipse.jdt.core.compiler.problem.uncheckedTypeOperation=error
|
||||
# TODO: resource-related warning that is normally disabled by default
|
||||
# TODO: resource-related warning that is normally enabled by default
|
||||
# this analysis gets confused by some IOUtils method, maybe there is an improvement possible?
|
||||
org.eclipse.jdt.core.compiler.problem.unclosedCloseable=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.undocumentedEmptyBlock=ignore
|
||||
@ -124,7 +123,7 @@ org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownException=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionExemptExceptionAndThrowable=enabled
|
||||
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionIncludeDocCommentReference=enabled
|
||||
org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverriding=disabled
|
||||
org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=error
|
||||
org.eclipse.jdt.core.compiler.problem.unusedImport=error
|
||||
org.eclipse.jdt.core.compiler.problem.unusedLabel=error
|
||||
org.eclipse.jdt.core.compiler.problem.unusedLocal=error
|
||||
@ -134,7 +133,7 @@ org.eclipse.jdt.core.compiler.problem.unusedParameterIncludeDocCommentReference=
|
||||
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenImplementingAbstract=disabled
|
||||
org.eclipse.jdt.core.compiler.problem.unusedParameterWhenOverridingConcrete=disabled
|
||||
org.eclipse.jdt.core.compiler.problem.unusedPrivateMember=error
|
||||
org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=ignore
|
||||
org.eclipse.jdt.core.compiler.problem.unusedTypeParameter=error
|
||||
# TODO: normally enabled by default: warns of unnecessary SuppressedWarnings token
|
||||
# some SuppressWarnings are used for other tools
|
||||
org.eclipse.jdt.core.compiler.problem.unusedWarningToken=ignore
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ar;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -77,7 +78,7 @@ public final class ArabicAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.bg;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -69,7 +70,7 @@ public final class BulgarianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.bn;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
@ -68,7 +69,7 @@ public final class BengaliAnalyzer extends StopwordAnalyzerBase {
|
||||
DEFAULT_STOP_SET =
|
||||
loadStopwordSet(false, BengaliAnalyzer.class, DEFAULT_STOPWORD_FILE, STOPWORDS_COMMENT);
|
||||
} catch (IOException ex) {
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.br;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class BrazilianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ca;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Arrays;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -70,7 +71,7 @@ public final class CatalanAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package org.apache.lucene.analysis.cjk;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -61,7 +62,7 @@ public final class CJKAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ckb;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -68,7 +69,7 @@ public final class SoraniAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.cz;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -65,7 +66,7 @@ public final class CzechAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.da;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class DanishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ package org.apache.lucene.analysis.de;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -72,7 +73,7 @@ public final class GermanAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.el;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
@ -60,7 +61,7 @@ public final class GreekAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.es;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -68,7 +69,7 @@ public final class SpanishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.et;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -60,7 +61,7 @@ public final class EstonianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.eu;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -63,7 +64,7 @@ public final class BasqueAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.fa;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -74,7 +75,7 @@ public final class PersianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.fi;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class FinnishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.fr;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -85,7 +86,7 @@ public final class FrenchAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ga;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.Arrays;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -75,7 +76,7 @@ public final class IrishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.hi;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
@ -70,7 +71,7 @@ public final class HindiAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.hu;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class HungarianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,9 @@ interface AffixCondition {
|
||||
return ALWAYS_FALSE;
|
||||
}
|
||||
return regexpCondition(kind, condition.substring(0, split), conditionChars - strip.length());
|
||||
} catch (PatternSyntaxException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
PatternSyntaxException e) {
|
||||
return ALWAYS_FALSE;
|
||||
} catch (Throwable e) {
|
||||
throw new IllegalArgumentException("On line: " + line, e);
|
||||
|
@ -669,7 +669,9 @@ public class Dictionary {
|
||||
int numLines;
|
||||
try {
|
||||
numLines = Integer.parseInt(args[3]);
|
||||
} catch (NumberFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException e) {
|
||||
return;
|
||||
}
|
||||
affixData = ArrayUtil.grow(affixData, currentAffix * 4 + numLines * 4);
|
||||
@ -1297,7 +1299,9 @@ public class Dictionary {
|
||||
try {
|
||||
int alias = Integer.parseInt(morphData.trim());
|
||||
morphData = morphAliases[alias - 1];
|
||||
} catch (NumberFormatException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException ignored) {
|
||||
}
|
||||
}
|
||||
if (morphData.isBlank()) {
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.hy;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -64,7 +65,7 @@ public final class ArmenianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.id;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
@ -59,7 +60,7 @@ public final class IndonesianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.it;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -78,7 +79,7 @@ public final class ItalianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.lt;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -64,7 +65,7 @@ public final class LithuanianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.lv;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -67,7 +68,7 @@ public final class LatvianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,9 @@ public class DateRecognizerFilter extends FilteringTokenFilter {
|
||||
// We don't care about the date, just that the term can be parsed to one.
|
||||
dateFormat.parse(termAtt.toString());
|
||||
return true;
|
||||
} catch (ParseException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
ParseException e) {
|
||||
// This term is not a date.
|
||||
}
|
||||
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.nl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArrayMap;
|
||||
@ -73,7 +74,7 @@ public final class DutchAnalyzer extends Analyzer {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
|
||||
DEFAULT_STEM_DICT = new CharArrayMap<>(4, false);
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.no;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class NorwegianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.pt;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -68,7 +69,7 @@ public final class PortugueseAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ro;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -67,7 +68,7 @@ public final class RomanianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.sr;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.*;
|
||||
import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
@ -61,7 +62,7 @@ public class SerbianAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.sv;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -69,7 +70,7 @@ public final class SwedishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.th;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.LowerCaseFilter;
|
||||
@ -62,7 +63,7 @@ public final class ThaiAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.tr;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
import org.apache.lucene.analysis.StopFilter;
|
||||
@ -65,7 +66,7 @@ public final class TurkishAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,9 @@ public abstract class CharArrayIterator implements CharacterIterator {
|
||||
bi.setText("\udb40\udc53");
|
||||
bi.next();
|
||||
v = false;
|
||||
} catch (Exception e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception e) {
|
||||
v = true;
|
||||
}
|
||||
HAS_BUGGY_BREAKITERATORS = v;
|
||||
|
@ -85,7 +85,7 @@ public final class FilesystemResourceLoader implements ResourceLoader {
|
||||
public InputStream openResource(String resource) throws IOException {
|
||||
try {
|
||||
return Files.newInputStream(baseDirectory.resolve(resource));
|
||||
} catch (FileNotFoundException | NoSuchFileException fnfe) {
|
||||
} catch (@SuppressWarnings("unused") FileNotFoundException | NoSuchFileException fnfe) {
|
||||
return delegate.openResource(resource);
|
||||
}
|
||||
}
|
||||
|
@ -125,15 +125,12 @@ public class TestClassicAnalyzer extends BaseTokenStreamTestCase {
|
||||
assertAnalyzesTo(a, "Excite@Home", new String[] {"excite@home"});
|
||||
}
|
||||
|
||||
// should not throw NPE
|
||||
public void testLucene1140() throws Exception {
|
||||
try {
|
||||
ClassicAnalyzer analyzer = new ClassicAnalyzer();
|
||||
assertAnalyzesTo(
|
||||
analyzer, "www.nutch.org.", new String[] {"www.nutch.org"}, new String[] {"<HOST>"});
|
||||
analyzer.close();
|
||||
} catch (NullPointerException e) {
|
||||
fail("Should not throw an NPE and it did");
|
||||
}
|
||||
ClassicAnalyzer analyzer = new ClassicAnalyzer();
|
||||
assertAnalyzesTo(
|
||||
analyzer, "www.nutch.org.", new String[] {"www.nutch.org"}, new String[] {"<HOST>"});
|
||||
analyzer.close();
|
||||
}
|
||||
|
||||
public void testDomainNames() throws Exception {
|
||||
|
@ -169,7 +169,9 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
||||
((ResourceLoaderAware) instance).inform(loader);
|
||||
}
|
||||
assertSame(c, instance.create().getClass());
|
||||
} catch (IllegalArgumentException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always
|
||||
// works
|
||||
}
|
||||
@ -193,7 +195,9 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
||||
if (KeywordTokenizer.class != createdClazz) {
|
||||
assertSame(c, createdClazz);
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always
|
||||
// works
|
||||
}
|
||||
@ -214,7 +218,9 @@ public class TestAllAnalyzersHaveFactories extends LuceneTestCase {
|
||||
if (StringReader.class != createdClazz) {
|
||||
assertSame(c, createdClazz);
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalArgumentException e) {
|
||||
// TODO: For now pass because some factories have not yet a default config that always
|
||||
// works
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
||||
try {
|
||||
ctor = factoryClazz.getConstructor(Map.class);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("factory '" + factoryClazz + "' does not have a proper ctor!");
|
||||
throw new RuntimeException("factory '" + factoryClazz + "' does not have a proper ctor!", e);
|
||||
}
|
||||
|
||||
AbstractAnalysisFactory factory = null;
|
||||
@ -146,9 +146,13 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
||||
if (factory instanceof ResourceLoaderAware) {
|
||||
try {
|
||||
((ResourceLoaderAware) factory).inform(new StringMockResourceLoader(""));
|
||||
} catch (IOException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException ignored) {
|
||||
// it's ok if the right files arent available or whatever to throw this
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalArgumentException ignored) {
|
||||
// is this ok? I guess so
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,9 @@ public class TestPerformance extends LuceneTestCase {
|
||||
long start = System.nanoTime();
|
||||
try {
|
||||
speller.suggest(word);
|
||||
} catch (SuggestionTimeoutException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
SuggestionTimeoutException e) {
|
||||
System.out.println("Timeout happened for " + word + ", skipping");
|
||||
return false;
|
||||
}
|
||||
|
@ -1467,7 +1467,9 @@ public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
|
||||
// output token that also happens to be in the input:
|
||||
try {
|
||||
actual = Operations.determinize(actual, 50000);
|
||||
} catch (TooComplexToDeterminizeException tctde) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
TooComplexToDeterminizeException tctde) {
|
||||
// Unfortunately the syns can easily create difficult-to-determinize graphs:
|
||||
assertTrue(approxEquals(actual, expected));
|
||||
continue;
|
||||
@ -1475,7 +1477,9 @@ public class TestSynonymGraphFilter extends BaseTokenStreamTestCase {
|
||||
|
||||
try {
|
||||
expected = Operations.determinize(expected, 50000);
|
||||
} catch (TooComplexToDeterminizeException tctde) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
TooComplexToDeterminizeException tctde) {
|
||||
// Unfortunately the syns can easily create difficult-to-determinize graphs:
|
||||
assertTrue(approxEquals(actual, expected));
|
||||
continue;
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.ja;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.analysis.CharArraySet;
|
||||
@ -86,7 +87,7 @@ public class JapaneseAnalyzer extends StopwordAnalyzerBase {
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword or stoptag set");
|
||||
throw new UncheckedIOException("Unable to load default stopword or stoptag set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ public class JapaneseNumberFilter extends TokenFilter {
|
||||
return number;
|
||||
}
|
||||
return normalizedNumber.stripTrailingZeros().toPlainString();
|
||||
} catch (NumberFormatException | ArithmeticException e) {
|
||||
} catch (@SuppressWarnings("unused") NumberFormatException | ArithmeticException e) {
|
||||
// Return the source number in case of error, i.e. malformed input
|
||||
return number;
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
||||
try {
|
||||
ctor = factoryClazz.getConstructor(Map.class);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("factory '" + factoryClazz + "' does not have a proper ctor!");
|
||||
throw new RuntimeException("factory '" + factoryClazz + "' does not have a proper ctor!", e);
|
||||
}
|
||||
|
||||
AbstractAnalysisFactory factory = null;
|
||||
@ -143,9 +143,13 @@ public class TestFactories extends BaseTokenStreamTestCase {
|
||||
if (factory instanceof ResourceLoaderAware) {
|
||||
try {
|
||||
((ResourceLoaderAware) factory).inform(new StringMockResourceLoader(""));
|
||||
} catch (IOException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException ignored) {
|
||||
// it's ok if the right files arent available or whatever to throw this
|
||||
} catch (IllegalArgumentException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalArgumentException ignored) {
|
||||
// is this ok? I guess so
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@ package org.apache.lucene.analysis.uk;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import morfologik.stemming.Dictionary;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -73,7 +74,7 @@ public final class UkrainianMorfologikAnalyzer extends StopwordAnalyzerBase {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ public class KoreanNumberFilter extends TokenFilter {
|
||||
return number;
|
||||
}
|
||||
return normalizedNumber.stripTrailingZeros().toPlainString();
|
||||
} catch (NumberFormatException | ArithmeticException e) {
|
||||
} catch (@SuppressWarnings("unused") NumberFormatException | ArithmeticException e) {
|
||||
// Return the source number in case of error, i.e. malformed input
|
||||
return number;
|
||||
}
|
||||
|
@ -70,7 +70,9 @@ public final class PhoneticFilter extends TokenFilter {
|
||||
try {
|
||||
String v = encoder.encode(value).toString();
|
||||
if (v.length() > 0 && !value.equals(v)) phonetic = v;
|
||||
} catch (Exception ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception ignored) {
|
||||
} // just use the direct text
|
||||
|
||||
if (phonetic == null) return true;
|
||||
|
@ -80,7 +80,9 @@ public class AnalyzerProfile {
|
||||
try (BufferedReader reader = Files.newBufferedReader(propFile, StandardCharsets.UTF_8)) {
|
||||
prop.load(reader);
|
||||
return prop.getProperty("analysis.data.dir", "");
|
||||
} catch (IOException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException e) {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
package org.apache.lucene.analysis.cn.smart;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Set;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
@ -77,7 +78,7 @@ public final class SmartChineseAnalyzer extends Analyzer {
|
||||
} catch (IOException ex) {
|
||||
// default set should always be present as it is part of the
|
||||
// distribution (JAR)
|
||||
throw new RuntimeException("Unable to load default stopword set");
|
||||
throw new UncheckedIOException("Unable to load default stopword set", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,9 @@ abstract class AbstractDictionary {
|
||||
try {
|
||||
String cchar = new String(buffer, "GB2312");
|
||||
return cchar;
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
UnsupportedEncodingException e) {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +65,9 @@ class BigramDictionary extends AbstractDictionary {
|
||||
try {
|
||||
singleInstance.load(dictRoot);
|
||||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
RuntimeException ex = new RuntimeException(ioe);
|
||||
ex.addSuppressed(e);
|
||||
throw ex;
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -78,7 +78,9 @@ class WordDictionary extends AbstractDictionary {
|
||||
singleInstance = new WordDictionary();
|
||||
try {
|
||||
singleInstance.load();
|
||||
} catch (IOException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IOException e) {
|
||||
String wordDictRoot = AnalyzerProfile.ANALYSIS_DATA_DIR;
|
||||
singleInstance.load(wordDictRoot);
|
||||
} catch (ClassNotFoundException e) {
|
||||
@ -165,7 +167,9 @@ class WordDictionary extends AbstractDictionary {
|
||||
output.writeObject(wordItem_charArrayTable);
|
||||
output.writeObject(wordItem_frequencyTable);
|
||||
// log.info("serialize core dict.");
|
||||
} catch (Exception e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception e) {
|
||||
// log.warn(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +136,9 @@ public class Compile {
|
||||
trie.add(token, diff.exec(token, stem));
|
||||
}
|
||||
}
|
||||
} catch (java.util.NoSuchElementException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
java.util.NoSuchElementException x) {
|
||||
// no base token (stem) on a line
|
||||
}
|
||||
}
|
||||
|
@ -140,9 +140,13 @@ public class Diff {
|
||||
}
|
||||
pos--;
|
||||
}
|
||||
} catch (StringIndexOutOfBoundsException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
StringIndexOutOfBoundsException x) {
|
||||
// x.printStackTrace();
|
||||
} catch (ArrayIndexOutOfBoundsException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
ArrayIndexOutOfBoundsException x) {
|
||||
// x.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
@ -71,7 +71,9 @@ public class DiffIt {
|
||||
static int get(int i, String s) {
|
||||
try {
|
||||
return Integer.parseInt(s.substring(i, i + 1));
|
||||
} catch (Throwable x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception x) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -111,7 +113,9 @@ public class DiffIt {
|
||||
System.out.println(stem + " " + diff.exec(token, stem));
|
||||
}
|
||||
}
|
||||
} catch (java.util.NoSuchElementException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
java.util.NoSuchElementException x) {
|
||||
// no base token (stem) on a line
|
||||
}
|
||||
}
|
||||
|
@ -124,7 +124,9 @@ public class MultiTrie2 extends MultiTrie {
|
||||
lastkey = key;
|
||||
}
|
||||
}
|
||||
} catch (IndexOutOfBoundsException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IndexOutOfBoundsException x) {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -167,7 +169,9 @@ public class MultiTrie2 extends MultiTrie {
|
||||
lastkey = key;
|
||||
}
|
||||
}
|
||||
} catch (IndexOutOfBoundsException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IndexOutOfBoundsException x) {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -150,7 +150,9 @@ public class TestCompile extends LuceneTestCase {
|
||||
Diff.apply(stm, cmd);
|
||||
assertEquals(stem.toLowerCase(Locale.ROOT), stm.toString().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
} catch (java.util.NoSuchElementException x) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
java.util.NoSuchElementException x) {
|
||||
// no base token (stem) on a line
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,9 @@ final class IntersectTermsEnum extends BaseTermsEnum {
|
||||
public BytesRef next() throws IOException {
|
||||
try {
|
||||
return _next();
|
||||
} catch (NoMoreTermsException eoi) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreTermsException eoi) {
|
||||
// Provoke NPE if we are (illegally!) called again:
|
||||
currentFrame = null;
|
||||
return null;
|
||||
|
@ -362,7 +362,9 @@ public final class Lucene40BlockTreeTermsReader extends FieldsProducer {
|
||||
} else {
|
||||
try {
|
||||
return b.utf8ToString() + " " + b;
|
||||
} catch (Throwable t) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable t) {
|
||||
// If BytesRef isn't actually UTF8, or it's eg a
|
||||
// prefix of UTF8 that ends mid-unicode-char, we
|
||||
// fallback to hex:
|
||||
|
@ -91,9 +91,13 @@ interface BugfixDeflater_JDK8252739 {
|
||||
if (restoredLength != testData.length) {
|
||||
return true;
|
||||
}
|
||||
} catch (DataFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
DataFormatException e) {
|
||||
return true;
|
||||
} catch (RuntimeException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
RuntimeException e) {
|
||||
return true;
|
||||
} finally {
|
||||
inflater.end();
|
||||
|
@ -508,7 +508,9 @@ public final class Lucene50CompressingStoredFieldsWriter extends StoredFieldsWri
|
||||
boolean v = true;
|
||||
try {
|
||||
v = Boolean.parseBoolean(System.getProperty(BULK_MERGE_ENABLED_SYSPROP, "true"));
|
||||
} catch (SecurityException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
SecurityException ignored) {
|
||||
}
|
||||
BULK_MERGE_ENABLED = v;
|
||||
}
|
||||
|
@ -791,7 +791,9 @@ public final class Lucene50CompressingTermVectorsWriter extends TermVectorsWrite
|
||||
boolean v = true;
|
||||
try {
|
||||
v = Boolean.parseBoolean(System.getProperty(BULK_MERGE_ENABLED_SYSPROP, "true"));
|
||||
} catch (SecurityException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
SecurityException ignored) {
|
||||
}
|
||||
BULK_MERGE_ENABLED = v;
|
||||
}
|
||||
|
@ -396,26 +396,19 @@ public class TestIndexedDISI extends LuceneTestCase {
|
||||
|
||||
// Illegal values
|
||||
for (byte denseRankPower : new byte[] {-2, 0, 1, 6, 16}) {
|
||||
try {
|
||||
createAndOpenDISI(
|
||||
denseRankPower, (byte) 8); // Illegal write, legal read (should not reach read)
|
||||
fail(
|
||||
"Trying to create an IndexedDISI data stream with denseRankPower-read "
|
||||
+ denseRankPower
|
||||
+ " and denseRankPower-write 8 should fail");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Expected
|
||||
}
|
||||
try {
|
||||
createAndOpenDISI(
|
||||
(byte) 8, denseRankPower); // Legal write, illegal read (should reach read)
|
||||
fail(
|
||||
"Trying to create an IndexedDISI data stream with denseRankPower-write 8 and denseRankPower-read "
|
||||
+ denseRankPower
|
||||
+ " should fail");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// Expected
|
||||
}
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> {
|
||||
createAndOpenDISI(
|
||||
denseRankPower, (byte) 8); // Illegal write, legal read (should not reach read)
|
||||
});
|
||||
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> {
|
||||
createAndOpenDISI(
|
||||
(byte) 8, denseRankPower); // Legal write, illegal read (should reach read)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,9 @@ public class SpatialFileQueryMaker extends AbstractQueryMaker {
|
||||
i--; // skip
|
||||
}
|
||||
}
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
// all-done
|
||||
} finally {
|
||||
src.close();
|
||||
|
@ -279,12 +279,16 @@ public class AnalyzerFactoryTask extends PerfTask {
|
||||
createAnalysisPipelineComponent(stok, clazz);
|
||||
expectedArgType = ArgType.TOKENFILTER;
|
||||
} catch (IllegalArgumentException e2) {
|
||||
throw new RuntimeException(
|
||||
"Line #"
|
||||
+ lineno(stok)
|
||||
+ ": Can't find class '"
|
||||
+ argName
|
||||
+ "' as CharFilterFactory or TokenizerFactory");
|
||||
RuntimeException ex =
|
||||
new RuntimeException(
|
||||
"Line #"
|
||||
+ lineno(stok)
|
||||
+ ": Can't find class '"
|
||||
+ argName
|
||||
+ "' as CharFilterFactory or TokenizerFactory",
|
||||
e2);
|
||||
ex.addSuppressed(e);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
} else { // expectedArgType = ArgType.TOKENFILTER
|
||||
@ -298,7 +302,8 @@ public class AnalyzerFactoryTask extends PerfTask {
|
||||
+ lineno(stok)
|
||||
+ ": Can't find class '"
|
||||
+ className
|
||||
+ "' as TokenFilterFactory");
|
||||
+ "' as TokenFilterFactory",
|
||||
e);
|
||||
}
|
||||
createAnalysisPipelineComponent(stok, clazz);
|
||||
}
|
||||
@ -483,13 +488,17 @@ public class AnalyzerFactoryTask extends PerfTask {
|
||||
// Second, retry lookup after prepending the Lucene analysis package prefix
|
||||
return Class.forName(LUCENE_ANALYSIS_PACKAGE_PREFIX + className).asSubclass(expectedType);
|
||||
} catch (ClassNotFoundException e1) {
|
||||
throw new ClassNotFoundException(
|
||||
"Can't find class '"
|
||||
+ className
|
||||
+ "' or '"
|
||||
+ LUCENE_ANALYSIS_PACKAGE_PREFIX
|
||||
+ className
|
||||
+ "'");
|
||||
ClassNotFoundException ex =
|
||||
new ClassNotFoundException(
|
||||
"Can't find class '"
|
||||
+ className
|
||||
+ "' or '"
|
||||
+ LUCENE_ANALYSIS_PACKAGE_PREFIX
|
||||
+ className
|
||||
+ "'",
|
||||
e1);
|
||||
ex.addSuppressed(e);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -46,7 +46,9 @@ public class NewAnalyzerTask extends PerfTask {
|
||||
// default one anymore
|
||||
Constructor<? extends Analyzer> cnstr = clazz.getConstructor(Version.class);
|
||||
return cnstr.newInstance(Version.LATEST);
|
||||
} catch (NoSuchMethodException nsme) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoSuchMethodException nsme) {
|
||||
// otherwise use default ctor
|
||||
return clazz.getConstructor().newInstance();
|
||||
}
|
||||
@ -80,7 +82,9 @@ public class NewAnalyzerTask extends PerfTask {
|
||||
String coreClassName = "org.apache.lucene.analysis.core." + analyzerName;
|
||||
analyzer = createAnalyzer(coreClassName);
|
||||
analyzerName = coreClassName;
|
||||
} catch (ClassNotFoundException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
ClassNotFoundException e) {
|
||||
// If not a core analyzer, try the base analysis package
|
||||
analyzerName = "org.apache.lucene.analysis." + analyzerName;
|
||||
analyzer = createAnalyzer(analyzerName);
|
||||
|
@ -195,7 +195,9 @@ public class TaskSequence extends PerfTask {
|
||||
countsByTime[slot] += inc;
|
||||
}
|
||||
if (anyExhaustibleTasks) updateExhausted(task);
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
exhausted = true;
|
||||
}
|
||||
}
|
||||
@ -262,7 +264,9 @@ public class TaskSequence extends PerfTask {
|
||||
}
|
||||
|
||||
if (anyExhaustibleTasks) updateExhausted(task);
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
exhausted = true;
|
||||
}
|
||||
}
|
||||
@ -305,7 +309,9 @@ public class TaskSequence extends PerfTask {
|
||||
updateExhausted(task);
|
||||
}
|
||||
count += n;
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
exhausted = true;
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -309,7 +309,9 @@ public class Algorithm implements AutoCloseable {
|
||||
for (String pkg : taskPackages) {
|
||||
try {
|
||||
return Class.forName(pkg + '.' + taskName + "Task");
|
||||
} catch (ClassNotFoundException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
ClassNotFoundException e) {
|
||||
// failed in this package, might succeed in the next one...
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,9 @@ public class QualityQuery implements Comparable<QualityQuery> {
|
||||
int n = Integer.parseInt(queryID);
|
||||
int nOther = Integer.parseInt(other.queryID);
|
||||
return n - nOther;
|
||||
} catch (NumberFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException e) {
|
||||
// fall back to string comparison
|
||||
return queryID.compareTo(other.queryID);
|
||||
}
|
||||
|
@ -96,7 +96,9 @@ public class ExtractWikipedia {
|
||||
doc.get(DocMaker.DATE_FIELD),
|
||||
doc.get(DocMaker.BODY_FIELD));
|
||||
}
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
// continue
|
||||
}
|
||||
long finish = System.currentTimeMillis();
|
||||
|
@ -424,7 +424,9 @@ public class TestTrecContentSource extends LuceneTestCase {
|
||||
assertTrue("Should never get here!", false);
|
||||
}
|
||||
}
|
||||
} catch (NoMoreDataException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreDataException e) {
|
||||
gotExpectedException = true;
|
||||
}
|
||||
assertTrue("Should have gotten NoMoreDataException!", gotExpectedException);
|
||||
|
@ -135,7 +135,9 @@ public class ConfusionMatrixGenerator {
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (TimeoutException timeoutException) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
TimeoutException timeoutException) {
|
||||
// add classification timeout
|
||||
time += 5000;
|
||||
} catch (ExecutionException | InterruptedException executionException) {
|
||||
|
@ -86,7 +86,9 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
|
||||
if (indexProperty != null) {
|
||||
try {
|
||||
index = Boolean.valueOf(indexProperty);
|
||||
} catch (Exception e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
@ -95,7 +97,9 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
|
||||
if (splitProperty != null) {
|
||||
try {
|
||||
split = Boolean.valueOf(splitProperty);
|
||||
} catch (Exception e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception e) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
@ -438,7 +442,9 @@ public final class Test20NewsgroupsClassification extends LuceneTestCase {
|
||||
}
|
||||
}
|
||||
return new NewsPost(body.toString(), subject, groupName);
|
||||
} catch (Throwable e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -155,7 +155,9 @@ public class TestDataSplitter extends LuceneTestCase {
|
||||
private static void closeQuietly(IndexReader reader) throws IOException {
|
||||
try {
|
||||
if (reader != null) reader.close();
|
||||
} catch (Exception e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception e) {
|
||||
// do nothing
|
||||
}
|
||||
}
|
||||
|
@ -242,7 +242,9 @@ public final class OrdsBlockTreeTermsReader extends FieldsProducer {
|
||||
} else {
|
||||
try {
|
||||
return b.utf8ToString() + " " + b;
|
||||
} catch (Throwable t) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable t) {
|
||||
// If BytesRef isn't actually UTF8, or it's eg a
|
||||
// prefix of UTF8 that ends mid-unicode-char, we
|
||||
// fallback to hex:
|
||||
|
@ -1932,7 +1932,9 @@ public final class DirectPostingsFormat extends PostingsFormat {
|
||||
upto++;
|
||||
try {
|
||||
return docID = docIDs[upto];
|
||||
} catch (ArrayIndexOutOfBoundsException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
ArrayIndexOutOfBoundsException e) {
|
||||
}
|
||||
return docID = NO_MORE_DOCS;
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ public class SimpleTextCompoundFormat extends CompoundFormat {
|
||||
tablePos = df.parse(stripPrefix(scratch, TABLEPOS)).longValue();
|
||||
} catch (ParseException e) {
|
||||
throw new CorruptIndexException(
|
||||
"can't parse CFS trailer, got: " + scratch.get().utf8ToString(), in);
|
||||
"can't parse CFS trailer, got: " + scratch.get().utf8ToString(), in, e);
|
||||
}
|
||||
|
||||
// seek to TOC and read it
|
||||
|
@ -77,7 +77,9 @@ public class SimpleTextStoredFieldsReader extends StoredFieldsReader {
|
||||
if (!success) {
|
||||
try {
|
||||
close();
|
||||
} catch (Throwable t) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable t) {
|
||||
} // ensure we throw our original exception
|
||||
}
|
||||
}
|
||||
|
@ -80,7 +80,9 @@ public class SimpleTextTermVectorsReader extends TermVectorsReader {
|
||||
if (!success) {
|
||||
try {
|
||||
close();
|
||||
} catch (Throwable t) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable t) {
|
||||
} // ensure we throw our original exception
|
||||
}
|
||||
}
|
||||
|
@ -111,7 +111,9 @@ public abstract class TokenStream extends AttributeSource implements Closeable {
|
||||
|| Modifier.isFinal(clazz.getMethod("incrementToken").getModifiers())
|
||||
: "TokenStream implementation classes or at least their incrementToken() implementation must be final";
|
||||
return true;
|
||||
} catch (NoSuchMethodException nsme) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoSuchMethodException nsme) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -91,9 +91,13 @@ interface BugfixDeflater_JDK8252739 {
|
||||
if (restoredLength != testData.length) {
|
||||
return true;
|
||||
}
|
||||
} catch (DataFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
DataFormatException e) {
|
||||
return true;
|
||||
} catch (RuntimeException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
RuntimeException e) {
|
||||
return true;
|
||||
} finally {
|
||||
inflater.end();
|
||||
|
@ -205,7 +205,9 @@ public final class Lucene90VectorWriter extends VectorWriter {
|
||||
} else {
|
||||
try {
|
||||
maxConn = Integer.parseInt(maxConnStr);
|
||||
} catch (NumberFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException e) {
|
||||
throw new NumberFormatException(
|
||||
"Received non integer value for max-connections parameter of HnswGraphBuilder, value: "
|
||||
+ maxConnStr);
|
||||
@ -216,7 +218,9 @@ public final class Lucene90VectorWriter extends VectorWriter {
|
||||
} else {
|
||||
try {
|
||||
beamWidth = Integer.parseInt(beamWidthStr);
|
||||
} catch (NumberFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException e) {
|
||||
throw new NumberFormatException(
|
||||
"Received non integer value for beam-width parameter of HnswGraphBuilder, value: "
|
||||
+ beamWidthStr);
|
||||
|
@ -370,7 +370,9 @@ final class IntersectTermsEnum extends BaseTermsEnum {
|
||||
public BytesRef next() throws IOException {
|
||||
try {
|
||||
return _next();
|
||||
} catch (NoMoreTermsException eoi) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NoMoreTermsException eoi) {
|
||||
// Provoke NPE if we are (illegally!) called again:
|
||||
currentFrame = null;
|
||||
return null;
|
||||
|
@ -310,7 +310,9 @@ public final class Lucene90BlockTreeTermsReader extends FieldsProducer {
|
||||
} else {
|
||||
try {
|
||||
return b.utf8ToString() + " " + b;
|
||||
} catch (Throwable t) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable t) {
|
||||
// If BytesRef isn't actually UTF8, or it's eg a
|
||||
// prefix of UTF8 that ends mid-unicode-char, we
|
||||
// fallback to hex:
|
||||
|
@ -495,7 +495,9 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
||||
boolean v = true;
|
||||
try {
|
||||
v = Boolean.parseBoolean(System.getProperty(BULK_MERGE_ENABLED_SYSPROP, "true"));
|
||||
} catch (SecurityException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
SecurityException ignored) {
|
||||
}
|
||||
BULK_MERGE_ENABLED = v;
|
||||
}
|
||||
|
@ -797,7 +797,9 @@ public final class Lucene90CompressingTermVectorsWriter extends TermVectorsWrite
|
||||
boolean v = true;
|
||||
try {
|
||||
v = Boolean.parseBoolean(System.getProperty(BULK_MERGE_ENABLED_SYSPROP, "true"));
|
||||
} catch (SecurityException ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
SecurityException ignored) {
|
||||
}
|
||||
BULK_MERGE_ENABLED = v;
|
||||
}
|
||||
|
@ -116,7 +116,9 @@ public class DateTools {
|
||||
try {
|
||||
return TL_FORMATS.get()[dateString.length()].parse(dateString);
|
||||
} catch (Exception e) {
|
||||
throw new ParseException("Input is not a valid date string: " + dateString, 0);
|
||||
ParseException ex = new ParseException("Input is not a valid date string: " + dateString, 0);
|
||||
ex.initCause(e);
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -714,7 +714,9 @@ abstract class SpatialQuery extends Query {
|
||||
return rel;
|
||||
}
|
||||
});
|
||||
} catch (CollectionTerminatedException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
CollectionTerminatedException e) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -342,7 +342,9 @@ class SimpleGeoJSONPolygonParser {
|
||||
// we only handle doubles
|
||||
try {
|
||||
return Double.parseDouble(b.toString());
|
||||
} catch (NumberFormatException nfe) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException nfe) {
|
||||
upto = uptoStart;
|
||||
throw newParseException("could not parse number as double");
|
||||
}
|
||||
|
@ -301,7 +301,9 @@ public class SimpleWKTShapeParser {
|
||||
} else {
|
||||
try {
|
||||
return Double.parseDouble(stream.sval);
|
||||
} catch (NumberFormatException e) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException e) {
|
||||
throw new ParseException("invalid number found: " + stream.sval, stream.lineno());
|
||||
}
|
||||
}
|
||||
|
@ -1334,7 +1334,9 @@ public final class CheckIndex implements Closeable {
|
||||
long ord = -1;
|
||||
try {
|
||||
ord = termsEnum.ord();
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
UnsupportedOperationException uoe) {
|
||||
hasOrd = false;
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
||||
if (value != null) {
|
||||
coreCount = Integer.parseInt(value);
|
||||
}
|
||||
} catch (Throwable ignored) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Throwable ignored) {
|
||||
}
|
||||
|
||||
maxThreadCount = Math.max(1, Math.min(4, coreCount / 2));
|
||||
@ -466,7 +468,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
||||
if (toSync != null) {
|
||||
try {
|
||||
toSync.join();
|
||||
} catch (InterruptedException ie) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
InterruptedException ie) {
|
||||
// ignore this Exception, we will retry until all threads are dead
|
||||
interrupted = true;
|
||||
}
|
||||
@ -649,7 +653,9 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
|
||||
// Let CMS run new merges if necessary:
|
||||
try {
|
||||
merge(mergeSource, MergeTrigger.MERGE_FINISHED);
|
||||
} catch (AlreadyClosedException ace) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
AlreadyClosedException ace) {
|
||||
// OK
|
||||
} catch (IOException ioe) {
|
||||
throw new UncheckedIOException(ioe);
|
||||
|
@ -274,7 +274,7 @@ public abstract class DirectoryReader extends BaseCompositeReader<LeafReader> {
|
||||
// IOException allowed to throw there, in case
|
||||
// segments_N is corrupt
|
||||
sis = SegmentInfos.readCommit(dir, fileName, 0);
|
||||
} catch (FileNotFoundException | NoSuchFileException fnfe) {
|
||||
} catch (@SuppressWarnings("unused") FileNotFoundException | NoSuchFileException fnfe) {
|
||||
// LUCENE-948: on NFS (and maybe others), if
|
||||
// you have writers switching back and forth
|
||||
// between machines, it's very likely that the
|
||||
|
@ -665,7 +665,9 @@ final class DocumentsWriterFlushControl implements Accountable, Closeable {
|
||||
try {
|
||||
documentsWriter.subtractFlushedNumDocs(dwpt.getNumDocsInRAM());
|
||||
dwpt.abort();
|
||||
} catch (Exception ex) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception ex) {
|
||||
// that's fine we just abort everything here this is best effort
|
||||
} finally {
|
||||
doAfterFlush(dwpt);
|
||||
@ -677,7 +679,9 @@ final class DocumentsWriterFlushControl implements Accountable, Closeable {
|
||||
blockedFlush); // add the blockedFlushes for correct accounting in doAfterFlush
|
||||
documentsWriter.subtractFlushedNumDocs(blockedFlush.getNumDocsInRAM());
|
||||
blockedFlush.abort();
|
||||
} catch (Exception ex) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
Exception ex) {
|
||||
// that's fine we just abort everything here this is best effort
|
||||
} finally {
|
||||
doAfterFlush(blockedFlush);
|
||||
|
@ -284,7 +284,9 @@ final class IndexFileDeleter implements Closeable {
|
||||
try {
|
||||
maxSegmentGen =
|
||||
Math.max(SegmentInfos.generationFromSegmentsFileName(fileName), maxSegmentGen);
|
||||
} catch (NumberFormatException ignore) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException ignore) {
|
||||
// trash file: we have to handle this since we allow anything starting with 'segments'
|
||||
// here
|
||||
}
|
||||
@ -294,7 +296,9 @@ final class IndexFileDeleter implements Closeable {
|
||||
Math.max(
|
||||
SegmentInfos.generationFromSegmentsFileName(fileName.substring(8)),
|
||||
maxSegmentGen);
|
||||
} catch (NumberFormatException ignore) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException ignore) {
|
||||
// trash file: we have to handle this since we allow anything starting with
|
||||
// 'pending_segments' here
|
||||
}
|
||||
@ -317,7 +321,9 @@ final class IndexFileDeleter implements Closeable {
|
||||
|
||||
try {
|
||||
curGen = Math.max(curGen, IndexFileNames.parseGeneration(fileName));
|
||||
} catch (NumberFormatException ignore) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
NumberFormatException ignore) {
|
||||
// trash file: we have to handle this since codec regex is only so good
|
||||
}
|
||||
maxPerSegmentGen.put(segmentName, curGen);
|
||||
@ -400,7 +406,9 @@ final class IndexFileDeleter implements Closeable {
|
||||
try {
|
||||
ensureOpen();
|
||||
return false;
|
||||
} catch (AlreadyClosedException ace) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
AlreadyClosedException ace) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1049,7 +1049,8 @@ public class IndexWriter
|
||||
throw new IllegalArgumentException(
|
||||
"the provided reader is stale: its prior commit file \""
|
||||
+ segmentInfos.getSegmentsFileName()
|
||||
+ "\" is missing from index");
|
||||
+ "\" is missing from index",
|
||||
ioe);
|
||||
}
|
||||
|
||||
if (reader.writer != null) {
|
||||
@ -5628,7 +5629,9 @@ public class IndexWriter
|
||||
Collection<String> files;
|
||||
try {
|
||||
files = info.files();
|
||||
} catch (IllegalStateException ise) {
|
||||
} catch (
|
||||
@SuppressWarnings("unused")
|
||||
IllegalStateException ise) {
|
||||
// OK
|
||||
files = null;
|
||||
}
|
||||
|
@ -1218,7 +1218,7 @@ final class IndexingChain implements Accountable {
|
||||
Math.addExact(invertState.length, invertState.termFreqAttribute.getTermFrequency());
|
||||
} catch (ArithmeticException ae) {
|
||||
throw new IllegalArgumentException(
|
||||
"too many tokens for field \"" + field.name() + "\"");
|
||||
"too many tokens for field \"" + field.name() + "\"", ae);
|
||||
}
|
||||
|
||||
// System.out.println(" term=" + invertState.termAttribute);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user