mirror of https://github.com/apache/lucene.git
Remove deprecated code (#13286)
Co-authored-by: iamsanjay <sanjaydutt.india@yahoo.com>
This commit is contained in:
parent
13cf882677
commit
66b121f9b0
|
@ -98,6 +98,9 @@ API Changes
|
||||||
|
|
||||||
* GITHUB#13241: Remove Accountable interface on KnnVectorsReader. (Pulkit Gupta)
|
* GITHUB#13241: Remove Accountable interface on KnnVectorsReader. (Pulkit Gupta)
|
||||||
|
|
||||||
|
* GITHUB#13262: Removed deprecated constructors from DoubleField, FloatField, IntField, LongField, and LongPoint.
|
||||||
|
Additionally, deprecated methods have been removed from ByteBuffersIndexInput, BooleanQuery and others. Please refer
|
||||||
|
to MIGRATE.md for further details. (Sanjay Dutt)
|
||||||
|
|
||||||
New Features
|
New Features
|
||||||
---------------------
|
---------------------
|
||||||
|
|
|
@ -756,3 +756,14 @@ Additionally, `OrdinalsReader` (and sub-classes) are fully removed starting with
|
||||||
classes were `@Deprecated` starting with 9.0. Users are encouraged to rely on the default
|
classes were `@Deprecated` starting with 9.0. Users are encouraged to rely on the default
|
||||||
taxonomy facet encodings where possible. If custom formats are needed, users will need
|
taxonomy facet encodings where possible. If custom formats are needed, users will need
|
||||||
to manage the indexed data on their own and create new `Facet` implementations to use it.
|
to manage the indexed data on their own and create new `Facet` implementations to use it.
|
||||||
|
|
||||||
|
### Deprecated code removal (GITHUB#13262)
|
||||||
|
|
||||||
|
1. `IntField(String name, int value)`. Use `IntField(String, int, Field.Store)` with `Field.Store#NO` instead.
|
||||||
|
2. `DoubleField(String name, double value)`. Use `DoubleField(String, double, Field.Store)` with `Field.Store#NO` instead.
|
||||||
|
2. `FloatField(String name, float value)`. Use `FloatField(String, float, Field.Store)` with `Field.Store#NO` instead.
|
||||||
|
3. `LongField(String name, long value)`. Use `LongField(String, long, Field.Store)` with `Field.Store#NO` instead.
|
||||||
|
4. `LongPoint#newDistanceFeatureQuery(String field, float weight, long origin, long pivotDistance)`. Use `LongField#newDistanceFeatureQuery` instead
|
||||||
|
5. `BooleanQuery#TooManyClauses`, `BooleanQuery#getMaxClauseCount()`, `BooleanQuery#setMaxClauseCount()`. Use `IndexSearcher#TooManyClauses`, `IndexSearcher#getMaxClauseCount()`, `IndexSearcher#setMaxClauseCount()` instead
|
||||||
|
6. `ByteBuffersDataInput#size()`. Use `ByteBuffersDataInput#length()` instead
|
||||||
|
7. `SortedSetDocValuesFacetField#label`. `FacetsConfig#pathToString(String[])` can be applied to path as a replacement if string path is desired.
|
|
@ -157,7 +157,7 @@ public abstract class CompressionMode {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
||||||
final int len = (int) buffersInput.size();
|
final int len = (int) buffersInput.length();
|
||||||
byte[] bytes = new byte[len];
|
byte[] bytes = new byte[len];
|
||||||
buffersInput.readBytes(bytes, 0, len);
|
buffersInput.readBytes(bytes, 0, len);
|
||||||
LZ4.compress(bytes, 0, len, out, ht);
|
LZ4.compress(bytes, 0, len, out, ht);
|
||||||
|
@ -179,7 +179,7 @@ public abstract class CompressionMode {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
||||||
final int len = (int) buffersInput.size();
|
final int len = (int) buffersInput.length();
|
||||||
byte[] bytes = new byte[len];
|
byte[] bytes = new byte[len];
|
||||||
buffersInput.readBytes(bytes, 0, len);
|
buffersInput.readBytes(bytes, 0, len);
|
||||||
LZ4.compress(bytes, 0, len, out, ht);
|
LZ4.compress(bytes, 0, len, out, ht);
|
||||||
|
@ -265,7 +265,7 @@ public abstract class CompressionMode {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
||||||
final int len = (int) buffersInput.size();
|
final int len = (int) buffersInput.length();
|
||||||
|
|
||||||
byte[] bytes = new byte[len];
|
byte[] bytes = new byte[len];
|
||||||
buffersInput.readBytes(bytes, 0, len);
|
buffersInput.readBytes(bytes, 0, len);
|
||||||
|
|
|
@ -202,7 +202,7 @@ public final class DeflateWithPresetDictCompressionMode extends CompressionMode
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
||||||
final int len = (int) (buffersInput.size() - buffersInput.position());
|
final int len = (int) (buffersInput.length() - buffersInput.position());
|
||||||
final int dictLength = len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR);
|
final int dictLength = len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR);
|
||||||
final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
|
final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
|
||||||
out.writeVInt(dictLength);
|
out.writeVInt(dictLength);
|
||||||
|
|
|
@ -171,7 +171,7 @@ public final class LZ4WithPresetDictCompressionMode extends CompressionMode {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out) throws IOException {
|
||||||
final int len = (int) (buffersInput.size() - buffersInput.position());
|
final int len = (int) (buffersInput.length() - buffersInput.position());
|
||||||
final int dictLength = Math.min(LZ4.MAX_DISTANCE, len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR));
|
final int dictLength = Math.min(LZ4.MAX_DISTANCE, len / (NUM_SUB_BLOCKS * DICT_SIZE_FACTOR));
|
||||||
final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
|
final int blockLength = (len - dictLength + NUM_SUB_BLOCKS - 1) / NUM_SUB_BLOCKS;
|
||||||
buffer = ArrayUtil.growNoCopy(buffer, dictLength + blockLength);
|
buffer = ArrayUtil.growNoCopy(buffer, dictLength + blockLength);
|
||||||
|
|
|
@ -253,7 +253,7 @@ public final class Lucene90CompressingStoredFieldsWriter extends StoredFieldsWri
|
||||||
// compress stored fields to fieldsStream.
|
// compress stored fields to fieldsStream.
|
||||||
if (sliced) {
|
if (sliced) {
|
||||||
// big chunk, slice it, using ByteBuffersDataInput ignore memory copy
|
// big chunk, slice it, using ByteBuffersDataInput ignore memory copy
|
||||||
final int capacity = (int) bytebuffers.size();
|
final int capacity = (int) bytebuffers.length();
|
||||||
for (int compressed = 0; compressed < capacity; compressed += chunkSize) {
|
for (int compressed = 0; compressed < capacity; compressed += chunkSize) {
|
||||||
int l = Math.min(chunkSize, capacity - compressed);
|
int l = Math.min(chunkSize, capacity - compressed);
|
||||||
ByteBuffersDataInput bbdi = bytebuffers.slice(compressed, l);
|
ByteBuffersDataInput bbdi = bytebuffers.slice(compressed, l);
|
||||||
|
|
|
@ -59,20 +59,6 @@ public final class DoubleField extends Field {
|
||||||
|
|
||||||
private final StoredValue storedValue;
|
private final StoredValue storedValue;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new DoubleField, indexing the provided point and storing it as a DocValue
|
|
||||||
*
|
|
||||||
* @param name field name
|
|
||||||
* @param value the double value
|
|
||||||
* @throws IllegalArgumentException if the field name or value is null.
|
|
||||||
* @deprecated Use {@link #DoubleField(String, double, Field.Store)} with {@link Field.Store#NO}
|
|
||||||
* instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public DoubleField(String name, double value) {
|
|
||||||
this(name, value, Field.Store.NO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new DoubleField, indexing the provided point, storing it as a DocValue, and
|
* Creates a new DoubleField, indexing the provided point, storing it as a DocValue, and
|
||||||
* optionally storing it as a stored field.
|
* optionally storing it as a stored field.
|
||||||
|
|
|
@ -59,20 +59,6 @@ public final class FloatField extends Field {
|
||||||
|
|
||||||
private final StoredValue storedValue;
|
private final StoredValue storedValue;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new FloatField, indexing the provided point and storing it as a DocValue
|
|
||||||
*
|
|
||||||
* @param name field name
|
|
||||||
* @param value the float value
|
|
||||||
* @throws IllegalArgumentException if the field name or value is null.
|
|
||||||
* @deprecated Use {@link #FloatField(String, float, Field.Store)} with {@link Field.Store#NO}
|
|
||||||
* instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public FloatField(String name, float value) {
|
|
||||||
this(name, value, Field.Store.NO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new FloatField, indexing the provided point, storing it as a DocValue, and optionally
|
* Creates a new FloatField, indexing the provided point, storing it as a DocValue, and optionally
|
||||||
* storing it as a stored field.
|
* storing it as a stored field.
|
||||||
|
|
|
@ -61,20 +61,6 @@ public final class IntField extends Field {
|
||||||
|
|
||||||
private final StoredValue storedValue;
|
private final StoredValue storedValue;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new IntField, indexing the provided point and storing it as a DocValue.
|
|
||||||
*
|
|
||||||
* @param name field name
|
|
||||||
* @param value the int value
|
|
||||||
* @throws IllegalArgumentException if the field name or value is null.
|
|
||||||
* @deprecated Use {@link #IntField(String, int, Field.Store)} with {@link Field.Store#NO}
|
|
||||||
* instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public IntField(String name, int value) {
|
|
||||||
this(name, value, Field.Store.NO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new IntField, indexing the provided point, storing it as a DocValue, and optionally
|
* Creates a new IntField, indexing the provided point, storing it as a DocValue, and optionally
|
||||||
* storing it as a stored field.
|
* storing it as a stored field.
|
||||||
|
|
|
@ -64,20 +64,6 @@ public final class LongField extends Field {
|
||||||
|
|
||||||
private final StoredValue storedValue;
|
private final StoredValue storedValue;
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new LongField, indexing the provided point and storing it as a DocValue
|
|
||||||
*
|
|
||||||
* @param name field name
|
|
||||||
* @param value the long value
|
|
||||||
* @throws IllegalArgumentException if the field name or value is null.
|
|
||||||
* @deprecated Use {@link #LongField(String, long, Field.Store)} with {@link Field.Store#NO}
|
|
||||||
* instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public LongField(String name, long value) {
|
|
||||||
this(name, value, Field.Store.NO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new LongField, indexing the provided point, storing it as a DocValue, and optionally
|
* Creates a new LongField, indexing the provided point, storing it as a DocValue, and optionally
|
||||||
* storing it as a stored field.
|
* storing it as a stored field.
|
||||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.lucene.document;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import org.apache.lucene.index.PointValues;
|
import org.apache.lucene.index.PointValues;
|
||||||
import org.apache.lucene.search.BooleanClause.Occur;
|
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
|
||||||
import org.apache.lucene.search.PointInSetQuery;
|
import org.apache.lucene.search.PointInSetQuery;
|
||||||
import org.apache.lucene.search.PointRangeQuery;
|
import org.apache.lucene.search.PointRangeQuery;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
@ -301,22 +299,4 @@ public final class LongPoint extends Field {
|
||||||
}
|
}
|
||||||
return newSetQuery(field, unboxed);
|
return newSetQuery(field, unboxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a field that indexes the same long values into a {@link LongPoint} and doc values (either
|
|
||||||
* {@link NumericDocValuesField} or {@link SortedNumericDocValuesField}), this returns a query
|
|
||||||
* that scores documents based on their distance to {@code origin}: {@code score = weight *
|
|
||||||
* pivotDistance / (pivotDistance + distance)}, ie. score is in the {@code [0, weight]} range, is
|
|
||||||
* equal to {@code weight} when the document's value is equal to {@code origin} and is equal to
|
|
||||||
* {@code weight/2} when the document's value is distant of {@code pivotDistance} from {@code
|
|
||||||
* origin}. In case of multi-valued fields, only the closest point to {@code origin} will be
|
|
||||||
* considered. This query is typically useful to boost results based on recency by adding this
|
|
||||||
* query to a {@link Occur#SHOULD} clause of a {@link BooleanQuery}. @Deprecated Use {@link
|
|
||||||
* LongField#newDistanceFeatureQuery}
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static Query newDistanceFeatureQuery(
|
|
||||||
String field, float weight, long origin, long pivotDistance) {
|
|
||||||
return LongField.newDistanceFeatureQuery(field, weight, origin, pivotDistance);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,7 @@ public class PrefixCodedTerms implements Accountable {
|
||||||
|
|
||||||
private TermIterator(long delGen, ByteBuffersDataInput input) {
|
private TermIterator(long delGen, ByteBuffersDataInput input) {
|
||||||
this.input = input;
|
this.input = input;
|
||||||
end = input.size();
|
end = input.length();
|
||||||
this.delGen = delGen;
|
this.delGen = delGen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ final class SortingStoredFieldsConsumer extends StoredFieldsConsumer {
|
||||||
@Override
|
@Override
|
||||||
public void compress(ByteBuffersDataInput buffersInput, DataOutput out)
|
public void compress(ByteBuffersDataInput buffersInput, DataOutput out)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
out.copyBytes(buffersInput, buffersInput.size());
|
out.copyBytes(buffersInput, buffersInput.length());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,38 +39,6 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
||||||
*/
|
*/
|
||||||
public class BooleanQuery extends Query implements Iterable<BooleanClause> {
|
public class BooleanQuery extends Query implements Iterable<BooleanClause> {
|
||||||
|
|
||||||
/**
|
|
||||||
* Thrown when an attempt is made to add more than {@link #getMaxClauseCount()} clauses. This
|
|
||||||
* typically happens if a PrefixQuery, FuzzyQuery, WildcardQuery, or TermRangeQuery is expanded to
|
|
||||||
* many terms during search.
|
|
||||||
*
|
|
||||||
* @deprecated use {@link IndexSearcher.TooManyClauses}
|
|
||||||
*/
|
|
||||||
@Deprecated // Remove in Lucene 10
|
|
||||||
public static class TooManyClauses extends IndexSearcher.TooManyClauses {}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Return the maximum number of clauses permitted, 1024 by default. Attempts to add more than the
|
|
||||||
* permitted number of clauses cause {@link TooManyClauses} to be thrown.
|
|
||||||
*
|
|
||||||
* @see IndexSearcher#setMaxClauseCount(int)
|
|
||||||
* @deprecated use {@link IndexSearcher#getMaxClauseCount()}
|
|
||||||
*/
|
|
||||||
@Deprecated // Remove in Lucene 10
|
|
||||||
public static int getMaxClauseCount() {
|
|
||||||
return IndexSearcher.getMaxClauseCount();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the maximum number of clauses permitted per BooleanQuery. Default value is 1024.
|
|
||||||
*
|
|
||||||
* @deprecated use {@link IndexSearcher#setMaxClauseCount(int)}
|
|
||||||
*/
|
|
||||||
@Deprecated // Remove in Lucene 10
|
|
||||||
public static void setMaxClauseCount(int maxClauseCount) {
|
|
||||||
IndexSearcher.setMaxClauseCount(maxClauseCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** A builder for boolean queries. */
|
/** A builder for boolean queries. */
|
||||||
public static class Builder {
|
public static class Builder {
|
||||||
|
|
||||||
|
|
|
@ -83,16 +83,6 @@ public final class ByteBuffersDataInput extends DataInput
|
||||||
this.pos = offset;
|
this.pos = offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the total number of bytes in this stream.
|
|
||||||
*
|
|
||||||
* @deprecated Use {@link #length()} instead.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long size() {
|
|
||||||
return length();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long ramBytesUsed() {
|
public long ramBytesUsed() {
|
||||||
// Return a rough estimation for allocated blocks. Note that we do not make
|
// Return a rough estimation for allocated blocks. Note that we do not make
|
||||||
|
|
|
@ -53,7 +53,7 @@ public final class ByteBuffersIndexInput extends IndexInput implements RandomAcc
|
||||||
@Override
|
@Override
|
||||||
public long length() {
|
public long length() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
return in.size();
|
return in.length();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -215,7 +215,7 @@ public final class ByteBuffersIndexInput extends IndexInput implements RandomAcc
|
||||||
public IndexInput clone() {
|
public IndexInput clone() {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
ByteBuffersIndexInput cloned =
|
ByteBuffersIndexInput cloned =
|
||||||
new ByteBuffersIndexInput(in.slice(0, in.size()), "(clone of) " + toString());
|
new ByteBuffersIndexInput(in.slice(0, in.length()), "(clone of) " + toString());
|
||||||
try {
|
try {
|
||||||
cloned.seek(getFilePointer());
|
cloned.seek(getFilePointer());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -37,7 +37,7 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
public void testSanity() throws IOException {
|
public void testSanity() throws IOException {
|
||||||
ByteBuffersDataOutput out = new ByteBuffersDataOutput();
|
ByteBuffersDataOutput out = new ByteBuffersDataOutput();
|
||||||
ByteBuffersDataInput o1 = out.toDataInput();
|
ByteBuffersDataInput o1 = out.toDataInput();
|
||||||
assertEquals(0, o1.size());
|
assertEquals(0, o1.length());
|
||||||
LuceneTestCase.expectThrows(
|
LuceneTestCase.expectThrows(
|
||||||
EOFException.class,
|
EOFException.class,
|
||||||
() -> {
|
() -> {
|
||||||
|
@ -47,9 +47,9 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
out.writeByte((byte) 1);
|
out.writeByte((byte) 1);
|
||||||
|
|
||||||
ByteBuffersDataInput o2 = out.toDataInput();
|
ByteBuffersDataInput o2 = out.toDataInput();
|
||||||
assertEquals(1, o2.size());
|
assertEquals(1, o2.length());
|
||||||
assertEquals(0, o2.position());
|
assertEquals(0, o2.position());
|
||||||
assertEquals(0, o1.size());
|
assertEquals(0, o1.length());
|
||||||
|
|
||||||
assertTrue(o2.ramBytesUsed() > 0);
|
assertTrue(o2.ramBytesUsed() > 0);
|
||||||
assertEquals(1, o2.readByte());
|
assertEquals(1, o2.readByte());
|
||||||
|
@ -106,7 +106,7 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
dst.toDataInput().slice(prefix.length, dst.size() - prefix.length - suffix.length);
|
dst.toDataInput().slice(prefix.length, dst.size() - prefix.length - suffix.length);
|
||||||
|
|
||||||
assertEquals(0, src.position());
|
assertEquals(0, src.position());
|
||||||
assertEquals(dst.size() - prefix.length - suffix.length, src.size());
|
assertEquals(dst.size() - prefix.length - suffix.length, src.length());
|
||||||
for (IOConsumer<DataInput> c : reply) {
|
for (IOConsumer<DataInput> c : reply) {
|
||||||
c.accept(src);
|
c.accept(src);
|
||||||
}
|
}
|
||||||
|
@ -190,8 +190,8 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
curr = skipTo + 1; // +1 for read byte
|
curr = skipTo + 1; // +1 for read byte
|
||||||
}
|
}
|
||||||
|
|
||||||
in.seek(in.size());
|
in.seek(in.length());
|
||||||
assertEquals(in.size(), in.position());
|
assertEquals(in.length(), in.position());
|
||||||
LuceneTestCase.expectThrows(
|
LuceneTestCase.expectThrows(
|
||||||
EOFException.class,
|
EOFException.class,
|
||||||
() -> {
|
() -> {
|
||||||
|
@ -203,18 +203,18 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
@Test
|
@Test
|
||||||
public void testSlicingWindow() throws Exception {
|
public void testSlicingWindow() throws Exception {
|
||||||
ByteBuffersDataOutput dst = new ByteBuffersDataOutput();
|
ByteBuffersDataOutput dst = new ByteBuffersDataOutput();
|
||||||
assertEquals(0, dst.toDataInput().slice(0, 0).size());
|
assertEquals(0, dst.toDataInput().slice(0, 0).length());
|
||||||
|
|
||||||
dst.writeBytes(randomBytesOfLength(1024 * 8));
|
dst.writeBytes(randomBytesOfLength(1024 * 8));
|
||||||
ByteBuffersDataInput in = dst.toDataInput();
|
ByteBuffersDataInput in = dst.toDataInput();
|
||||||
for (int offset = 0, max = (int) dst.size(); offset < max; offset++) {
|
for (int offset = 0, max = (int) dst.size(); offset < max; offset++) {
|
||||||
assertEquals(0, in.slice(offset, 0).size());
|
assertEquals(0, in.slice(offset, 0).length());
|
||||||
assertEquals(1, in.slice(offset, 1).size());
|
assertEquals(1, in.slice(offset, 1).length());
|
||||||
|
|
||||||
int window = Math.min(max - offset, 1024);
|
int window = Math.min(max - offset, 1024);
|
||||||
assertEquals(window, in.slice(offset, window).size());
|
assertEquals(window, in.slice(offset, window).length());
|
||||||
}
|
}
|
||||||
assertEquals(0, in.slice((int) dst.size(), 0).size());
|
assertEquals(0, in.slice((int) dst.size(), 0).length());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -265,17 +265,17 @@ public final class TestByteBuffersDataInput extends RandomizedTest {
|
||||||
buffers.get(0).position(shift);
|
buffers.get(0).position(shift);
|
||||||
|
|
||||||
ByteBuffersDataInput dst = new ByteBuffersDataInput(buffers);
|
ByteBuffersDataInput dst = new ByteBuffersDataInput(buffers);
|
||||||
assertEquals(simulatedLength, dst.size());
|
assertEquals(simulatedLength, dst.length());
|
||||||
|
|
||||||
final long max = dst.size();
|
final long max = dst.length();
|
||||||
long offset = 0;
|
long offset = 0;
|
||||||
for (; offset < max; offset += randomIntBetween(MB, 4 * MB)) {
|
for (; offset < max; offset += randomIntBetween(MB, 4 * MB)) {
|
||||||
assertEquals(0, dst.slice(offset, 0).size());
|
assertEquals(0, dst.slice(offset, 0).length());
|
||||||
assertEquals(1, dst.slice(offset, 1).size());
|
assertEquals(1, dst.slice(offset, 1).length());
|
||||||
|
|
||||||
long window = Math.min(max - offset, 1024);
|
long window = Math.min(max - offset, 1024);
|
||||||
ByteBuffersDataInput slice = dst.slice(offset, window);
|
ByteBuffersDataInput slice = dst.slice(offset, window);
|
||||||
assertEquals(window, slice.size());
|
assertEquals(window, slice.length());
|
||||||
|
|
||||||
// Sanity check of the content against original pages.
|
// Sanity check of the content against original pages.
|
||||||
for (int i = 0; i < window; i++) {
|
for (int i = 0; i < window; i++) {
|
||||||
|
|
|
@ -44,15 +44,6 @@ public class SortedSetDocValuesFacetField extends Field {
|
||||||
/** Path. */
|
/** Path. */
|
||||||
public final String[] path;
|
public final String[] path;
|
||||||
|
|
||||||
/**
|
|
||||||
* String form of path.
|
|
||||||
*
|
|
||||||
* @deprecated This field will be removed in a future version. {@link
|
|
||||||
* FacetsConfig#pathToString(String[])} can be applied to {@code path} as a replacement if
|
|
||||||
* string path is desired.
|
|
||||||
*/
|
|
||||||
@Deprecated public final String label;
|
|
||||||
|
|
||||||
/** Sole constructor. */
|
/** Sole constructor. */
|
||||||
public SortedSetDocValuesFacetField(String dim, String... path) {
|
public SortedSetDocValuesFacetField(String dim, String... path) {
|
||||||
super("dummy", TYPE);
|
super("dummy", TYPE);
|
||||||
|
@ -65,7 +56,6 @@ public class SortedSetDocValuesFacetField extends Field {
|
||||||
}
|
}
|
||||||
this.dim = dim;
|
this.dim = dim;
|
||||||
this.path = path;
|
this.path = path;
|
||||||
this.label = FacetsConfig.pathToString(path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -102,16 +102,6 @@ public class BlockJoinSelector {
|
||||||
return wrap(values, selection, parents, children);
|
return wrap(values, selection, parents, children);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wraps the provided {@link SortedDocValues} in order to only select one value per parent among
|
|
||||||
* its {@code children} using the configured {@code selection} type.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static SortedDocValues wrap(
|
|
||||||
final SortedDocValues values, Type selection, BitSet parents, BitSet children) {
|
|
||||||
return wrap(values, selection, parents, toIter(children));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wraps the provided {@link SortedDocValues} in order to only select one value per parent among
|
* Wraps the provided {@link SortedDocValues} in order to only select one value per parent among
|
||||||
* its {@code children} using the configured {@code selection} type.
|
* its {@code children} using the configured {@code selection} type.
|
||||||
|
@ -125,16 +115,6 @@ public class BlockJoinSelector {
|
||||||
return ToParentDocValues.wrap(values, selection, parents, children);
|
return ToParentDocValues.wrap(values, selection, parents, children);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wraps the provided {@link SortedNumericDocValues} in order to only select one value per parent
|
|
||||||
* among its {@code children} using the configured {@code selection} type.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static NumericDocValues wrap(
|
|
||||||
SortedNumericDocValues sortedNumerics, Type selection, BitSet parents, BitSet children) {
|
|
||||||
return wrap(sortedNumerics, selection, parents, toIter(children));
|
|
||||||
}
|
|
||||||
|
|
||||||
/** creates an iterator for the given bitset */
|
/** creates an iterator for the given bitset */
|
||||||
protected static BitSetIterator toIter(BitSet children) {
|
protected static BitSetIterator toIter(BitSet children) {
|
||||||
return new BitSetIterator(children, 0);
|
return new BitSetIterator(children, 0);
|
||||||
|
@ -167,17 +147,6 @@ public class BlockJoinSelector {
|
||||||
return wrap(values, selection, parents, children);
|
return wrap(values, selection, parents, children);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Wraps the provided {@link NumericDocValues}, iterating over only child documents, in order to
|
|
||||||
* only select one value per parent among its {@code children} using the configured {@code
|
|
||||||
* selection} type.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static NumericDocValues wrap(
|
|
||||||
final NumericDocValues values, Type selection, BitSet parents, BitSet children) {
|
|
||||||
return wrap(values, selection, parents, toIter(children));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wraps the provided {@link NumericDocValues}, iterating over only child documents, in order to
|
* Wraps the provided {@link NumericDocValues}, iterating over only child documents, in order to
|
||||||
* only select one value per parent among its {@code children} using the configured {@code
|
* only select one value per parent among its {@code children} using the configured {@code
|
||||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.QueryVisitor;
|
import org.apache.lucene.search.QueryVisitor;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings;
|
import org.apache.lucene.util.graph.GraphTokenStreamFiniteStrings;
|
||||||
|
@ -201,11 +201,10 @@ final class IntervalBuilder {
|
||||||
private static List<IntervalsSource> analyzeGraph(TokenStream source) throws IOException {
|
private static List<IntervalsSource> analyzeGraph(TokenStream source) throws IOException {
|
||||||
source.reset();
|
source.reset();
|
||||||
GraphTokenStreamFiniteStrings graph = new GraphTokenStreamFiniteStrings(source);
|
GraphTokenStreamFiniteStrings graph = new GraphTokenStreamFiniteStrings(source);
|
||||||
|
|
||||||
List<IntervalsSource> clauses = new ArrayList<>();
|
List<IntervalsSource> clauses = new ArrayList<>();
|
||||||
int[] articulationPoints = graph.articulationPoints();
|
int[] articulationPoints = graph.articulationPoints();
|
||||||
int lastState = 0;
|
int lastState = 0;
|
||||||
int maxClauseCount = BooleanQuery.getMaxClauseCount();
|
int maxClauseCount = IndexSearcher.getMaxClauseCount();
|
||||||
for (int i = 0; i <= articulationPoints.length; i++) {
|
for (int i = 0; i <= articulationPoints.length; i++) {
|
||||||
int start = lastState;
|
int start = lastState;
|
||||||
int end = -1;
|
int end = -1;
|
||||||
|
@ -220,7 +219,7 @@ final class IntervalBuilder {
|
||||||
TokenStream ts = it.next();
|
TokenStream ts = it.next();
|
||||||
IntervalsSource phrase = combineSources(analyzeTerms(ts), 0, true);
|
IntervalsSource phrase = combineSources(analyzeTerms(ts), 0, true);
|
||||||
if (paths.size() >= maxClauseCount) {
|
if (paths.size() >= maxClauseCount) {
|
||||||
throw new BooleanQuery.TooManyClauses();
|
throw new IndexSearcher.TooManyClauses();
|
||||||
}
|
}
|
||||||
paths.add(phrase);
|
paths.add(phrase);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue