LUCENE-9856: Static analysis take 3: Remove redundant interfaces (#38)

Co-authored-by: Robert Muir <rmuir@apache.org>
This commit is contained in:
Uwe Schindler 2021-03-24 18:26:12 +01:00 committed by GitHub
parent c23ea2f537
commit 3214e365e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 87 additions and 78 deletions

View File

@ -85,7 +85,7 @@ org.eclipse.jdt.core.compiler.problem.rawTypeReference=ignore
org.eclipse.jdt.core.compiler.problem.redundantNullAnnotation=error
org.eclipse.jdt.core.compiler.problem.redundantNullCheck=ignore
org.eclipse.jdt.core.compiler.problem.redundantSpecificationOfTypeArguments=ignore
org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=ignore
org.eclipse.jdt.core.compiler.problem.redundantSuperinterface=error
org.eclipse.jdt.core.compiler.problem.reportMethodCanBePotentiallyStatic=ignore
org.eclipse.jdt.core.compiler.problem.reportMethodCanBeStatic=ignore
org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=error

View File

@ -147,6 +147,9 @@ public final class ConcatenateGraphFilter extends TokenStream {
super.reset();
// we only capture this if we really need it to save the UTF-8 to UTF-16 conversion
charTermAttribute = getAttribute(CharTermAttribute.class); // may return null
// make sure the TermToBytesRefAttribute attribute is implemented by our class, not via
// CharTermAttribute's
assert getAttribute(TermToBytesRefAttribute.class) instanceof BytesRefBuilderTermAttributeImpl;
wasReset = true;
}
@ -347,8 +350,9 @@ public final class ConcatenateGraphFilter extends TokenStream {
*
* @lucene.internal
*/
@SuppressWarnings("unused") // do not warn/error on redundant interface
public static final class BytesRefBuilderTermAttributeImpl extends AttributeImpl
implements BytesRefBuilderTermAttribute, TermToBytesRefAttribute {
implements BytesRefBuilderTermAttribute, TermToBytesRefAttribute /*required*/ {
private final BytesRefBuilder bytes = new BytesRefBuilder();
private transient CharsRefBuilder charsRef;

View File

@ -31,7 +31,6 @@ import org.apache.lucene.analysis.CharArraySet;
import org.apache.lucene.analysis.TokenFilterFactory;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.util.ResourceLoader;
import org.apache.lucene.util.ResourceLoaderAware;
/**
* Factory for a {@link ProtectedTermFilter}
@ -82,8 +81,7 @@ import org.apache.lucene.util.ResourceLoaderAware;
* @since 7.4.0
* @lucene.spi {@value #NAME}
*/
public class ProtectedTermFilterFactory extends ConditionalTokenFilterFactory
implements ResourceLoaderAware {
public class ProtectedTermFilterFactory extends ConditionalTokenFilterFactory {
public static final String NAME = "protectedTerm";

View File

@ -23,7 +23,7 @@ import org.apache.lucene.util.BytesRef;
*
* @see org.apache.lucene.analysis.payloads.PayloadHelper#encodeFloat(float, byte[], int)
*/
public class FloatEncoder extends AbstractEncoder implements PayloadEncoder {
public class FloatEncoder extends AbstractEncoder {
@Override
public BytesRef encode(char[] buffer, int offset, int length) {

View File

@ -23,7 +23,7 @@ import java.nio.charset.StandardCharsets;
import org.apache.lucene.util.BytesRef;
/** Does nothing other than convert the char array to a byte array using the specified encoding. */
public class IdentityEncoder extends AbstractEncoder implements PayloadEncoder {
public class IdentityEncoder extends AbstractEncoder {
protected Charset charset = StandardCharsets.UTF_8;
public IdentityEncoder() {}

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.BytesRef;
*
* <p>See {@link org.apache.lucene.analysis.payloads.PayloadHelper#encodeInt(int, byte[], int)}.
*/
public class IntegerEncoder extends AbstractEncoder implements PayloadEncoder {
public class IntegerEncoder extends AbstractEncoder {
@Override
public BytesRef encode(char[] buffer, int offset, int length) {

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute, Cloneable {
public class ScriptAttributeImpl extends AttributeImpl implements ScriptAttribute {
private int code = UScript.COMMON;
/** Initializes this attribute with <code>UScript.COMMON</code> */

View File

@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for {@link Token#getBaseForm()}. */
public class BaseFormAttributeImpl extends AttributeImpl implements BaseFormAttribute, Cloneable {
public class BaseFormAttributeImpl extends AttributeImpl implements BaseFormAttribute {
private Token token;
@Override

View File

@ -22,8 +22,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for Kuromoji inflection data. */
public class InflectionAttributeImpl extends AttributeImpl
implements InflectionAttribute, Cloneable {
public class InflectionAttributeImpl extends AttributeImpl implements InflectionAttribute {
private Token token;
@Override

View File

@ -22,8 +22,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for {@link Token#getPartOfSpeech()}. */
public class PartOfSpeechAttributeImpl extends AttributeImpl
implements PartOfSpeechAttribute, Cloneable {
public class PartOfSpeechAttributeImpl extends AttributeImpl implements PartOfSpeechAttribute {
private Token token;
@Override

View File

@ -22,7 +22,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Attribute for Kuromoji reading data */
public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute, Cloneable {
public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute {
private Token token;
@Override

View File

@ -27,7 +27,7 @@ import org.apache.lucene.util.AttributeReflector;
* @see MorphosyntacticTagsAttribute
*/
public class MorphosyntacticTagsAttributeImpl extends AttributeImpl
implements MorphosyntacticTagsAttribute, Cloneable {
implements MorphosyntacticTagsAttribute {
/** Initializes this attribute with no tags */
public MorphosyntacticTagsAttributeImpl() {}

View File

@ -28,8 +28,7 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
public class PartOfSpeechAttributeImpl extends AttributeImpl
implements PartOfSpeechAttribute, Cloneable {
public class PartOfSpeechAttributeImpl extends AttributeImpl implements PartOfSpeechAttribute {
private Token token;
@Override

View File

@ -25,7 +25,7 @@ import org.apache.lucene.util.AttributeReflector;
*
* @lucene.experimental
*/
public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute, Cloneable {
public class ReadingAttributeImpl extends AttributeImpl implements ReadingAttribute {
private Token token;
@Override

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.backward_codecs.lucene50.compressing;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@ -58,8 +57,7 @@ import org.apache.lucene.util.packed.PackedInts;
*
* @lucene.experimental
*/
public final class Lucene50CompressingTermVectorsReader extends TermVectorsReader
implements Closeable {
public final class Lucene50CompressingTermVectorsReader extends TermVectorsReader {
// hard limit on the maximum number of documents per chunk
static final int MAX_DOCUMENTS_PER_CHUNK = 128;

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.backward_codecs.lucene60;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -32,7 +31,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.bkd.BKDReader;
/** Reads point values previously written with Lucene60PointsWriter */
public class Lucene60PointsReader extends PointsReader implements Closeable {
public class Lucene60PointsReader extends PointsReader {
final IndexInput dataIn;
final SegmentReadState readState;
final Map<Integer, BKDReader> readers = new HashMap<>();

View File

@ -20,7 +20,6 @@ import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat
import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat.NUMERIC_BLOCK_SHIFT;
import static org.apache.lucene.backward_codecs.lucene70.Lucene70DocValuesFormat.NUMERIC_BLOCK_SIZE;
import java.io.Closeable; // javadocs
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
@ -54,7 +53,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene70DocValuesFormat} */
final class Lucene70DocValuesConsumer extends DocValuesConsumer implements Closeable {
final class Lucene70DocValuesConsumer extends DocValuesConsumer {
IndexOutput data, meta;
final int maxDoc;

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.backward_codecs.lucene70;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -49,7 +48,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene70DocValuesFormat} */
final class Lucene70DocValuesProducer extends DocValuesProducer implements Closeable {
final class Lucene70DocValuesProducer extends DocValuesProducer {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();

View File

@ -62,7 +62,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene80DocValuesFormat} */
final class Lucene80DocValuesConsumer extends DocValuesConsumer implements Closeable {
final class Lucene80DocValuesConsumer extends DocValuesConsumer {
final Lucene80DocValuesFormat.Mode mode;
IndexOutput data, meta;

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.backward_codecs.lucene80;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -52,7 +51,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene80DocValuesFormat} */
final class Lucene80DocValuesProducer extends DocValuesProducer implements Closeable {
final class Lucene80DocValuesProducer extends DocValuesProducer {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.backward_codecs.lucene60;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
@ -41,7 +40,7 @@ import org.apache.lucene.util.bkd.BKDReader;
import org.apache.lucene.util.bkd.BKDWriter;
/** Writes dimensional values */
public class Lucene60PointsWriter extends PointsWriter implements Closeable {
public class Lucene60PointsWriter extends PointsWriter {
/** Output used to write the BKD tree data file */
protected final IndexOutput dataOut;

View File

@ -36,7 +36,7 @@ import org.apache.lucene.search.spans.SpanTermQuery;
* A QueryMaker that uses common and uncommon actual Wikipedia queries for searching the English
* Wikipedia collection. 90 queries total.
*/
public class EnwikiQueryMaker extends AbstractQueryMaker implements QueryMaker {
public class EnwikiQueryMaker extends AbstractQueryMaker {
// common and a few uncommon queries from wikipedia search logs
private static String[] STANDARD_QUERIES = {

View File

@ -43,7 +43,7 @@ import org.apache.lucene.util.IOUtils;
* <pre>file.query.maker.file=c:/myqueries.txt
* file.query.maker.default.field=body</pre>
*/
public class FileBasedQueryMaker extends AbstractQueryMaker implements QueryMaker {
public class FileBasedQueryMaker extends AbstractQueryMaker {
@Override
protected Query[] prepareQueries() throws Exception {

View File

@ -34,7 +34,7 @@ import org.apache.lucene.search.spans.SpanTermQuery;
* A QueryMaker that makes queries devised manually (by Grant Ingersoll) for searching in the
* Reuters collection.
*/
public class ReutersQueryMaker extends AbstractQueryMaker implements QueryMaker {
public class ReutersQueryMaker extends AbstractQueryMaker {
private static String[] STANDARD_QUERIES = {
// Start with some short queries

View File

@ -30,7 +30,7 @@ import org.apache.lucene.search.TermQuery;
* A QueryMaker that makes queries for a collection created using {@link
* org.apache.lucene.benchmark.byTask.feeds.SingleDocSource}.
*/
public class SimpleQueryMaker extends AbstractQueryMaker implements QueryMaker {
public class SimpleQueryMaker extends AbstractQueryMaker {
/**
* Prepare the queries for this test. Extending classes can override this method for preparing

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.codecs.blockterms;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -52,7 +51,7 @@ import org.apache.lucene.util.IOUtils;
*
* @lucene.experimental
*/
public class BlockTermsWriter extends FieldsConsumer implements Closeable {
public class BlockTermsWriter extends FieldsConsumer {
static final String CODEC_NAME = "BlockTermsWriter";

View File

@ -26,8 +26,9 @@ import org.apache.lucene.util.BytesRef;
*
* @lucene.internal
*/
@SuppressWarnings("unused") // do not warn/error on redundant interface
public class BytesTermAttributeImpl extends AttributeImpl
implements BytesTermAttribute, TermToBytesRefAttribute {
implements BytesTermAttribute, TermToBytesRefAttribute /*required*/ {
private BytesRef bytes;
/** Initialize this attribute with no bytes. */

View File

@ -26,7 +26,7 @@ import org.apache.lucene.util.BytesRefBuilder;
/** Default implementation of {@link CharTermAttribute}. */
public class CharTermAttributeImpl extends AttributeImpl
implements CharTermAttribute, TermToBytesRefAttribute, Cloneable {
implements CharTermAttribute, TermToBytesRefAttribute {
private static int MIN_BUFFER_SIZE = 10;
private char[] termBuffer = new char[ArrayUtil.oversize(MIN_BUFFER_SIZE, Character.BYTES)];

View File

@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link FlagsAttribute}. */
public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute, Cloneable {
public class FlagsAttributeImpl extends AttributeImpl implements FlagsAttribute {
private int flags = 0;
/** Initialize this attribute with no bits set */

View File

@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link OffsetAttribute}. */
public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute, Cloneable {
public class OffsetAttributeImpl extends AttributeImpl implements OffsetAttribute {
private int startOffset;
private int endOffset;

View File

@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.BytesRef;
/** Default implementation of {@link PayloadAttribute}. */
public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute, Cloneable {
public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttribute {
private BytesRef payload;
/** Initialize this attribute with no payload. */

View File

@ -21,7 +21,7 @@ import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link PositionIncrementAttribute}. */
public class PositionIncrementAttributeImpl extends AttributeImpl
implements PositionIncrementAttribute, Cloneable {
implements PositionIncrementAttribute {
private int positionIncrement = 1;
/** Initialize this attribute with position increment of 1 */

View File

@ -20,8 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link PositionLengthAttribute}. */
public class PositionLengthAttributeImpl extends AttributeImpl
implements PositionLengthAttribute, Cloneable {
public class PositionLengthAttributeImpl extends AttributeImpl implements PositionLengthAttribute {
private int positionLength = 1;
/** Initializes this attribute with position length of 1. */

View File

@ -20,8 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link TermFrequencyAttribute}. */
public class TermFrequencyAttributeImpl extends AttributeImpl
implements TermFrequencyAttribute, Cloneable {
public class TermFrequencyAttributeImpl extends AttributeImpl implements TermFrequencyAttribute {
private int termFrequency = 1;
/** Initialize this attribute with term frequency of 1 */

View File

@ -20,7 +20,7 @@ import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
/** Default implementation of {@link TypeAttribute}. */
public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute, Cloneable {
public class TypeAttributeImpl extends AttributeImpl implements TypeAttribute {
private String type;
/** Initialize this attribute with {@link TypeAttribute#DEFAULT_TYPE} */

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.codecs.lucene86;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -33,7 +32,7 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.bkd.BKDReader;
/** Reads point values previously written with {@link Lucene86PointsWriter} */
public class Lucene86PointsReader extends PointsReader implements Closeable {
public class Lucene86PointsReader extends PointsReader {
final IndexInput indexIn, dataIn;
final SegmentReadState readState;
final Map<Integer, BKDReader> readers = new HashMap<>();

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.codecs.lucene86;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -39,7 +38,7 @@ import org.apache.lucene.util.bkd.BKDReader;
import org.apache.lucene.util.bkd.BKDWriter;
/** Writes dimensional values */
public class Lucene86PointsWriter extends PointsWriter implements Closeable {
public class Lucene86PointsWriter extends PointsWriter {
/** Outputs used to write the BKD tree data files. */
protected final IndexOutput metaOut, indexOut, dataOut;

View File

@ -62,7 +62,7 @@ import org.apache.lucene.util.packed.DirectMonotonicWriter;
import org.apache.lucene.util.packed.DirectWriter;
/** writer for {@link Lucene90DocValuesFormat} */
final class Lucene90DocValuesConsumer extends DocValuesConsumer implements Closeable {
final class Lucene90DocValuesConsumer extends DocValuesConsumer {
final Lucene90DocValuesFormat.Mode mode;
IndexOutput data, meta;

View File

@ -16,7 +16,6 @@
*/
package org.apache.lucene.codecs.lucene90;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
@ -52,7 +51,7 @@ import org.apache.lucene.util.packed.DirectMonotonicReader;
import org.apache.lucene.util.packed.DirectReader;
/** reader for {@link Lucene90DocValuesFormat} */
final class Lucene90DocValuesProducer extends DocValuesProducer implements Closeable {
final class Lucene90DocValuesProducer extends DocValuesProducer {
private final Map<String, NumericEntry> numerics = new HashMap<>();
private final Map<String, BinaryEntry> binaries = new HashMap<>();
private final Map<String, SortedEntry> sorted = new HashMap<>();

View File

@ -29,7 +29,6 @@ import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingT
import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingTermVectorsWriter.VERSION_CURRENT;
import static org.apache.lucene.codecs.lucene90.compressing.Lucene90CompressingTermVectorsWriter.VERSION_START;
import java.io.Closeable;
import java.io.IOException;
import java.util.Collection;
import java.util.Collections;
@ -71,8 +70,7 @@ import org.apache.lucene.util.packed.PackedInts;
*
* @lucene.experimental
*/
public final class Lucene90CompressingTermVectorsReader extends TermVectorsReader
implements Closeable {
public final class Lucene90CompressingTermVectorsReader extends TermVectorsReader {
private final FieldInfos fieldInfos;
final FieldsIndex indexReader;

View File

@ -37,7 +37,7 @@ import java.io.IOException;
*
* @see Directory
*/
public abstract class IndexInput extends DataInput implements Cloneable, Closeable {
public abstract class IndexInput extends DataInput implements Closeable {
private final String resourceDescription;

View File

@ -21,6 +21,10 @@ package org.apache.lucene.util;
*
* <p>Attributes are used to add data in a dynamic, yet type-safe way to a source of usually
* streamed objects, e. g. a {@link org.apache.lucene.analysis.TokenStream}.
*
* <p>All implementations must list all implemented {@link Attribute} interfaces in their {@code
* implements} clause. {@code AttributeSource} reflectively identifies all attributes and makes them
* available to consumers like {@code TokenStream}s.
*/
public abstract class AttributeImpl implements Cloneable, Attribute {
/**

View File

@ -181,6 +181,9 @@ public class AttributeSource {
* retrieve the wanted attributes using {@link #getAttribute} after adding with this method and
* cast to your class. The recommended way to use custom implementations is using an {@link
* AttributeFactory}.
*
* <p>This method will only add the Attribute interfaces directly implemented by the class and its
* super classes.
*/
public final void addAttributeImpl(final AttributeImpl att) {
final Class<? extends AttributeImpl> clazz = att.getClass();

View File

@ -28,7 +28,7 @@ import org.apache.lucene.search.DocIdSetIterator;
*
* @lucene.internal
*/
public final class FixedBitSet extends BitSet implements Bits, Accountable {
public final class FixedBitSet extends BitSet {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(FixedBitSet.class);

View File

@ -33,7 +33,7 @@ import org.apache.lucene.search.DocIdSetIterator;
*
* @lucene.internal
*/
public class SparseFixedBitSet extends BitSet implements Bits, Accountable {
public class SparseFixedBitSet extends BitSet {
private static final long BASE_RAM_BYTES_USED =
RamUsageEstimator.shallowSizeOfInstance(SparseFixedBitSet.class);

View File

@ -16,6 +16,7 @@
*/
package org.apache.lucene.analysis.tokenattributes;
import java.util.stream.Stream;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
@ -44,4 +45,11 @@ public class TestBytesRefAttImpl extends LuceneTestCase {
assertEquals("Copied instance's hashcode must be equal", att.hashCode(), copy.hashCode());
return copy;
}
public void testLucene9856() {
assertTrue(
"BytesTermAttributeImpl must explicitly declare to implement TermToBytesRefAttribute",
Stream.of(BytesTermAttributeImpl.class.getInterfaces())
.anyMatch(TermToBytesRefAttribute.class::equals));
}
}

View File

@ -46,8 +46,7 @@ import org.apache.lucene.util.DocIdSetBuilder;
* instantiate one of the {@link Facets} subclasses to do the facet counting. Use the {@code search}
* utility methods to perform an "ordinary" search but also collect into a {@link Collector}.
*/
// redundant 'implements Collector' to workaround javadocs bugs
public class FacetsCollector extends SimpleCollector implements Collector {
public class FacetsCollector extends SimpleCollector {
private LeafReaderContext context;
private Scorable scorer;

View File

@ -34,7 +34,7 @@ import org.xml.sax.ErrorHandler;
import org.xml.sax.SAXException;
/** Assembles a QueryBuilder which uses only core Lucene Query objects */
public class CoreParser implements QueryBuilder, SpanQueryBuilder {
public class CoreParser implements SpanQueryBuilder {
protected String defaultField;
protected Analyzer analyzer;

View File

@ -24,8 +24,8 @@ import java.io.InputStream;
*
* @lucene.experimental
*/
public class GeoCompositeMembershipShape extends GeoBaseCompositeMembershipShape<GeoMembershipShape>
implements GeoMembershipShape {
public class GeoCompositeMembershipShape
extends GeoBaseCompositeMembershipShape<GeoMembershipShape> {
/** Constructor. */
public GeoCompositeMembershipShape(PlanetModel planetModel) {

View File

@ -21,4 +21,4 @@ package org.apache.lucene.spatial3d.geom;
*
* @lucene.experimental
*/
public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance, Membership {}
public interface GeoMembershipShape extends GeoShape, GeoOutsideDistance {}

View File

@ -97,8 +97,7 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
// redundant 'implements Accountable' to workaround javadocs bugs
public class AnalyzingSuggester extends Lookup implements Accountable {
public class AnalyzingSuggester extends Lookup {
/**
* FST&lt;Weight,Surface&gt;: input is the analyzed form, with a null byte between terms weights

View File

@ -100,8 +100,7 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
// redundant 'implements Accountable' to workaround javadocs bugs
public class FreeTextSuggester extends Lookup implements Accountable {
public class FreeTextSuggester extends Lookup {
/** Codec name used in the header for the saved model. */
public static final String CODEC_NAME = "freetextsuggest";

View File

@ -68,7 +68,7 @@ import org.apache.lucene.util.fst.NoOutputs;
* @see FSTCompletion
* @lucene.experimental
*/
public class FSTCompletionLookup extends Lookup implements Accountable {
public class FSTCompletionLookup extends Lookup {
/**
* An invalid bucket count if we're creating an object of this class from an existing FST.
*

View File

@ -57,8 +57,7 @@ import org.apache.lucene.util.fst.Util.TopResults;
*
* @lucene.experimental
*/
// redundant 'implements Accountable' to workaround javadocs bugs
public class WFSTCompletionLookup extends Lookup implements Accountable {
public class WFSTCompletionLookup extends Lookup {
/** FST<Long>, weights are encoded as costs: (Integer.MAX_VALUE-weight) */
// NOTE: like FSTSuggester, this is really a WFSA, if you want to

View File

@ -25,7 +25,6 @@ import org.apache.lucene.search.suggest.Lookup;
import org.apache.lucene.search.suggest.jaspell.JaspellTernarySearchTrie.TSTNode;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder;
@ -37,7 +36,7 @@ import org.apache.lucene.util.CharsRefBuilder;
* @deprecated Migrate to one of the newer suggesters which are much more RAM efficient.
*/
@Deprecated
public class JaspellLookup extends Lookup implements Accountable {
public class JaspellLookup extends Lookup {
JaspellTernarySearchTrie trie = new JaspellTernarySearchTrie();
private boolean usePrefix = true;
private int editDistance = 2;

View File

@ -143,6 +143,18 @@ public abstract class BaseTokenStreamTestCase extends LuceneTestCase {
if (output.length > 0) {
assertTrue("has no CharTermAttribute", ts.hasAttribute(CharTermAttribute.class));
termAtt = ts.getAttribute(CharTermAttribute.class);
// every UTF-16 character-based TokenStream MUST provide a TermToBytesRefAttribute,
// implemented by same instance like the CharTermAttribute:
assertTrue("has no TermToBytesRefAttribute", ts.hasAttribute(TermToBytesRefAttribute.class));
TermToBytesRefAttribute bytesAtt = ts.getAttribute(TermToBytesRefAttribute.class);
// ConcatenateGraphFilter has some tricky logic violating this. We have an extra assert there:
if (!Objects.equals(
bytesAtt.getClass().getSimpleName(), "BytesRefBuilderTermAttributeImpl")) {
assertSame(
"TermToBytesRefAttribute must be implemented by same instance", termAtt, bytesAtt);
}
}
OffsetAttribute offsetAtt = null;

View File

@ -20,6 +20,7 @@ import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.util.BytesRef;
/** TokenStream from a canned list of binary (BytesRef-based) tokens. */
@ -57,6 +58,7 @@ public final class CannedBinaryTokenStream extends TokenStream {
public CannedBinaryTokenStream(BinaryToken... tokens) {
super(Token.TOKEN_ATTRIBUTE_FACTORY);
this.tokens = tokens;
assert termAtt == getAttribute(TermToBytesRefAttribute.class);
}
@Override