mirror of https://github.com/apache/lucene.git
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
366f8d18fe
|
@ -86,6 +86,8 @@ Optimizations
|
|||
(which is used by TermsQuery, multi-term queries and several point queries).
|
||||
(Adrien Grand, Jeff Wartes, David Smiley)
|
||||
|
||||
* LUCENE-7299: Speed up BytesRefHash.sort(). (Adrien Grand)
|
||||
|
||||
Bug Fixes
|
||||
|
||||
* LUCENE-7127: Fix corner case bugs in GeoPointDistanceQuery. (Robert Muir)
|
||||
|
@ -136,6 +138,9 @@ Documentation
|
|||
|
||||
Other
|
||||
|
||||
* LUCENE-7295: TermAutomatonQuery.hashCode calculates Automaton.toDot().hash,
|
||||
equivalence relationship replaced with object identity. (Dawid Weiss)
|
||||
|
||||
* LUCENE-7277: Make Query.hashCode and Query.equals abstract. (Paul Elschot,
|
||||
Dawid Weiss)
|
||||
|
||||
|
@ -153,6 +158,9 @@ Other
|
|||
* SOLR-9109/SOLR-9121: Allow specification of a custom Ivy settings file via system
|
||||
property "ivysettings.xml". (Misha Dmitriev, Christine Poerschke, Uwe Schindler, Steve Rowe)
|
||||
|
||||
* LUCENE-7206: Improve the ToParentBlockJoinQuery's explain by including the explain
|
||||
of the best matching child doc. (Ilya Kasnacheev, Jeff Evans via Martijn van Groningen)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-7292: Use '-release' instead of '-source/-target' during
|
||||
|
|
|
@ -661,7 +661,7 @@ public class MultiDocValues {
|
|||
public final OrdinalMap mapping;
|
||||
|
||||
/** Creates a new MultiSortedDocValues over <code>values</code> */
|
||||
MultiSortedDocValues(SortedDocValues values[], int docStarts[], OrdinalMap mapping) throws IOException {
|
||||
public MultiSortedDocValues(SortedDocValues values[], int docStarts[], OrdinalMap mapping) throws IOException {
|
||||
assert docStarts.length == values.length + 1;
|
||||
this.values = values;
|
||||
this.docStarts = docStarts;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
package org.apache.lucene.util;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
|
@ -40,19 +39,6 @@ public final class ArrayUtil {
|
|||
|
||||
*/
|
||||
|
||||
/**
|
||||
* Parses the string argument as if it was an int value and returns the
|
||||
* result. Throws NumberFormatException if the string does not represent an
|
||||
* int quantity.
|
||||
*
|
||||
* @param chars a string representation of an int quantity.
|
||||
* @return int the value represented by the argument
|
||||
* @throws NumberFormatException if the argument could not be parsed as an int quantity.
|
||||
*/
|
||||
public static int parseInt(char[] chars) throws NumberFormatException {
|
||||
return parseInt(chars, 0, chars.length, 10);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a char array into an int.
|
||||
* @param chars the character array
|
||||
|
@ -225,17 +211,6 @@ public final class ArrayUtil {
|
|||
}
|
||||
}
|
||||
|
||||
public static int getShrinkSize(int currentSize, int targetSize, int bytesPerElement) {
|
||||
final int newSize = oversize(targetSize, bytesPerElement);
|
||||
// Only reallocate if we are "substantially" smaller.
|
||||
// This saves us from "running hot" (constantly making a
|
||||
// bit bigger then a bit smaller, over and over):
|
||||
if (newSize < currentSize / 2)
|
||||
return newSize;
|
||||
else
|
||||
return currentSize;
|
||||
}
|
||||
|
||||
public static <T> T[] grow(T[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
|
@ -247,9 +222,7 @@ public final class ArrayUtil {
|
|||
public static short[] grow(short[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
short[] newArray = new short[oversize(minSize, Short.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Short.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -261,9 +234,7 @@ public final class ArrayUtil {
|
|||
public static float[] grow(float[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
float[] newArray = new float[oversize(minSize, Float.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Float.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -275,9 +246,7 @@ public final class ArrayUtil {
|
|||
public static double[] grow(double[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
double[] newArray = new double[oversize(minSize, Double.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Double.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -286,23 +255,10 @@ public final class ArrayUtil {
|
|||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static short[] shrink(short[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, Short.BYTES);
|
||||
if (newSize != array.length) {
|
||||
short[] newArray = new short[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static int[] grow(int[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
int[] newArray = new int[oversize(minSize, Integer.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Integer.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -311,23 +267,10 @@ public final class ArrayUtil {
|
|||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static int[] shrink(int[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, Integer.BYTES);
|
||||
if (newSize != array.length) {
|
||||
int[] newArray = new int[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static long[] grow(long[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
long[] newArray = new long[oversize(minSize, Long.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Long.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -336,23 +279,10 @@ public final class ArrayUtil {
|
|||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static long[] shrink(long[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, Long.BYTES);
|
||||
if (newSize != array.length) {
|
||||
long[] newArray = new long[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static byte[] grow(byte[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
byte[] newArray = new byte[oversize(minSize, 1)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Byte.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -361,48 +291,10 @@ public final class ArrayUtil {
|
|||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static byte[] shrink(byte[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, 1);
|
||||
if (newSize != array.length) {
|
||||
byte[] newArray = new byte[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static boolean[] grow(boolean[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
boolean[] newArray = new boolean[oversize(minSize, 1)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static boolean[] grow(boolean[] array) {
|
||||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static boolean[] shrink(boolean[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, 1);
|
||||
if (newSize != array.length) {
|
||||
boolean[] newArray = new boolean[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static char[] grow(char[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
char[] newArray = new char[oversize(minSize, Character.BYTES)];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
return Arrays.copyOf(array, oversize(minSize, Character.BYTES));
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
@ -411,71 +303,6 @@ public final class ArrayUtil {
|
|||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static char[] shrink(char[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, Character.BYTES);
|
||||
if (newSize != array.length) {
|
||||
char[] newArray = new char[newSize];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static int[][] grow(int[][] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
int[][] newArray = new int[oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF)][];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
} else {
|
||||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
public static int[][] grow(int[][] array) {
|
||||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static int[][] shrink(int[][] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||
if (newSize != array.length) {
|
||||
int[][] newArray = new int[newSize][];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else {
|
||||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
public static float[][] grow(float[][] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
float[][] newArray = new float[oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF)][];
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
} else {
|
||||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
public static float[][] grow(float[][] array) {
|
||||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static float[][] shrink(float[][] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||
if (newSize != array.length) {
|
||||
float[][] newArray = new float[newSize][];
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else {
|
||||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns hash of chars in range start (inclusive) to
|
||||
* end (inclusive)
|
||||
|
@ -486,44 +313,6 @@ public final class ArrayUtil {
|
|||
code = code * 31 + array[i];
|
||||
return code;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns hash of bytes in range start (inclusive) to
|
||||
* end (inclusive)
|
||||
*/
|
||||
public static int hashCode(byte[] array, int start, int end) {
|
||||
int code = 0;
|
||||
for (int i = end - 1; i >= start; i--)
|
||||
code = code * 31 + array[i];
|
||||
return code;
|
||||
}
|
||||
|
||||
|
||||
// Since Arrays.equals doesn't implement offsets for equals
|
||||
/**
|
||||
* See if two array slices are the same.
|
||||
*
|
||||
* @param left The left array to compare
|
||||
* @param offsetLeft The offset into the array. Must be positive
|
||||
* @param right The right array to compare
|
||||
* @param offsetRight the offset into the right array. Must be positive
|
||||
* @param length The length of the section of the array to compare
|
||||
* @return true if the two arrays, starting at their respective offsets, are equal
|
||||
*
|
||||
* @see java.util.Arrays#equals(char[], char[])
|
||||
*/
|
||||
public static boolean equals(char[] left, int offsetLeft, char[] right, int offsetRight, int length) {
|
||||
if ((offsetLeft + length <= left.length) && (offsetRight + length <= right.length)) {
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (left[offsetLeft + i] != right[offsetRight + i]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Since Arrays.equals doesn't implement offsets for equals
|
||||
/**
|
||||
|
@ -551,35 +340,6 @@ public final class ArrayUtil {
|
|||
return false;
|
||||
}
|
||||
|
||||
/* DISABLE THIS FOR NOW: This has performance problems until Java creates intrinsics for Class#getComponentType() and Array.newInstance()
|
||||
public static <T> T[] grow(T[] array, int minSize) {
|
||||
assert minSize >= 0: "size must be positive (got " + minSize + "): likely integer overflow?";
|
||||
if (array.length < minSize) {
|
||||
@SuppressWarnings("unchecked") final T[] newArray =
|
||||
(T[]) Array.newInstance(array.getClass().getComponentType(), oversize(minSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF));
|
||||
System.arraycopy(array, 0, newArray, 0, array.length);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
|
||||
public static <T> T[] grow(T[] array) {
|
||||
return grow(array, 1 + array.length);
|
||||
}
|
||||
|
||||
public static <T> T[] shrink(T[] array, int targetSize) {
|
||||
assert targetSize >= 0: "size must be positive (got " + targetSize + "): likely integer overflow?";
|
||||
final int newSize = getShrinkSize(array.length, targetSize, RamUsageEstimator.NUM_BYTES_OBJECT_REF);
|
||||
if (newSize != array.length) {
|
||||
@SuppressWarnings("unchecked") final T[] newArray =
|
||||
(T[]) Array.newInstance(array.getClass().getComponentType(), newSize);
|
||||
System.arraycopy(array, 0, newArray, 0, newSize);
|
||||
return newArray;
|
||||
} else
|
||||
return array;
|
||||
}
|
||||
*/
|
||||
|
||||
// Since Arrays.equals doesn't implement offsets for equals
|
||||
/**
|
||||
* See if two array slices are the same.
|
||||
|
@ -606,20 +366,6 @@ public final class ArrayUtil {
|
|||
return false;
|
||||
}
|
||||
|
||||
public static int[] toIntArray(Collection<Integer> ints) {
|
||||
|
||||
final int[] result = new int[ints.size()];
|
||||
int upto = 0;
|
||||
for(int v : ints) {
|
||||
result[upto++] = v;
|
||||
}
|
||||
|
||||
// paranoia:
|
||||
assert upto == result.length;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Swap values stored in slots <code>i</code> and <code>j</code> */
|
||||
public static <T> void swap(T[] arr, int i, int j) {
|
||||
final T tmp = arr[i];
|
||||
|
|
|
@ -158,40 +158,23 @@ public final class BytesRefHash {
|
|||
*/
|
||||
public int[] sort() {
|
||||
final int[] compact = compact();
|
||||
new IntroSorter() {
|
||||
new StringMSBRadixSorter() {
|
||||
|
||||
BytesRef scratch = new BytesRef();
|
||||
|
||||
@Override
|
||||
protected void swap(int i, int j) {
|
||||
final int o = compact[i];
|
||||
int tmp = compact[i];
|
||||
compact[i] = compact[j];
|
||||
compact[j] = o;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int compare(int i, int j) {
|
||||
final int id1 = compact[i], id2 = compact[j];
|
||||
assert bytesStart.length > id1 && bytesStart.length > id2;
|
||||
pool.setBytesRef(scratch1, bytesStart[id1]);
|
||||
pool.setBytesRef(scratch2, bytesStart[id2]);
|
||||
return scratch1.compareTo(scratch2);
|
||||
compact[j] = tmp;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setPivot(int i) {
|
||||
final int id = compact[i];
|
||||
assert bytesStart.length > id;
|
||||
pool.setBytesRef(pivot, bytesStart[id]);
|
||||
protected BytesRef get(int i) {
|
||||
pool.setBytesRef(scratch, bytesStart[compact[i]]);
|
||||
return scratch;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int comparePivot(int j) {
|
||||
final int id = compact[j];
|
||||
assert bytesStart.length > id;
|
||||
pool.setBytesRef(scratch2, bytesStart[id]);
|
||||
return pivot.compareTo(scratch2);
|
||||
}
|
||||
|
||||
private final BytesRef pivot = new BytesRef(),
|
||||
scratch1 = new BytesRef(), scratch2 = new BytesRef();
|
||||
|
||||
}.sort(0, count);
|
||||
return compact;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.util;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/** Radix sorter for variable-length strings. This class sorts based on the most
|
||||
* significant byte first and falls back to {@link IntroSorter} when the size
|
||||
* of the buckets to sort becomes small. It is <b>NOT</b> stable.
|
||||
* Worst-case memory usage is about {@code 2.3 KB} */
|
||||
abstract class StringMSBRadixSorter extends Sorter {
|
||||
|
||||
// after that many levels of recursion we fall back to introsort anyway
|
||||
private static final int LEVEL_THRESHOLD = 8;
|
||||
// size of histograms: 256 + 1 to indicate that the string is finished
|
||||
private static final int HISTOGRAM_SIZE = 257;
|
||||
// buckets below this size will be sorted with introsort
|
||||
private static final int LENGTH_THRESHOLD = 100;
|
||||
|
||||
// we store one histogram per recursion level
|
||||
private final int[][] histograms = new int[LEVEL_THRESHOLD][];
|
||||
private final int[] endOffsets = new int[HISTOGRAM_SIZE];
|
||||
|
||||
/** Get a {@link BytesRef} for the given index. */
|
||||
protected abstract BytesRef get(int i);
|
||||
|
||||
/** Store bytes for the given index into {@code dest}, without the first k bytes. */
|
||||
private void get(int i, int k, BytesRef dest) {
|
||||
BytesRef ref = get(i);
|
||||
assert ref.length >= k;
|
||||
dest.bytes = ref.bytes;
|
||||
dest.offset = ref.offset + k;
|
||||
dest.length = ref.length - k;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final int compare(int i, int j) {
|
||||
throw new UnsupportedOperationException("unused: not a comparison-based sort");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sort(int from, int to) {
|
||||
checkRange(from, to);
|
||||
sort(from, to, 0);
|
||||
}
|
||||
|
||||
private void sort(int from, int to, int k) {
|
||||
if (to - from <= LENGTH_THRESHOLD || k >= LEVEL_THRESHOLD) {
|
||||
introSort(from, to, k);
|
||||
} else {
|
||||
radixSort(from, to, k);
|
||||
}
|
||||
}
|
||||
|
||||
private void introSort(int from, int to, int k) {
|
||||
new IntroSorter() {
|
||||
@Override
|
||||
protected void swap(int i, int j) {
|
||||
StringMSBRadixSorter.this.swap(i, j);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int compare(int i, int j) {
|
||||
get(i, k, scratch1);
|
||||
get(j, k, scratch2);
|
||||
return scratch1.compareTo(scratch2);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setPivot(int i) {
|
||||
get(i, k, pivot);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int comparePivot(int j) {
|
||||
get(j, k, scratch2);
|
||||
return pivot.compareTo(scratch2);
|
||||
}
|
||||
|
||||
private final BytesRef pivot = new BytesRef(),
|
||||
scratch1 = new BytesRef(), scratch2 = new BytesRef();
|
||||
}.sort(from, to);
|
||||
}
|
||||
|
||||
private void radixSort(int from, int to, int k) {
|
||||
int[] histogram = histograms[k];
|
||||
if (histogram == null) {
|
||||
histogram = histograms[k] = new int[HISTOGRAM_SIZE];
|
||||
} else {
|
||||
Arrays.fill(histogram, 0);
|
||||
}
|
||||
|
||||
buildHistogram(from, to, k, histogram);
|
||||
|
||||
// short-circuit: if all keys have the same byte at offset k, then recurse directly
|
||||
for (int i = 0; i < HISTOGRAM_SIZE; ++i) {
|
||||
if (histogram[i] == to - from) {
|
||||
// everything is in the same bucket, recurse
|
||||
if (i > 0) {
|
||||
sort(from, to, k + 1);
|
||||
}
|
||||
return;
|
||||
} else if (histogram[i] != 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int[] startOffsets = histogram;
|
||||
int[] endOffsets = this.endOffsets;
|
||||
sumHistogram(histogram, endOffsets);
|
||||
reorder(from, to, startOffsets, endOffsets, k);
|
||||
endOffsets = startOffsets;
|
||||
|
||||
// recurse on all but the first bucket since all keys are equals in this
|
||||
// bucket (we already compared all bytes)
|
||||
for (int prev = endOffsets[0], i = 1; i < HISTOGRAM_SIZE; ++i) {
|
||||
int h = endOffsets[i];
|
||||
final int bucketLen = h - prev;
|
||||
if (bucketLen > 1) {
|
||||
sort(from + prev, from + h, k + 1);
|
||||
}
|
||||
prev = h;
|
||||
}
|
||||
}
|
||||
|
||||
/** Return a number for the k-th character between 0 and {@link #HISTOGRAM_SIZE}. */
|
||||
private int getBucket(int id, int k) {
|
||||
BytesRef ref = get(id);
|
||||
if (ref.length <= k) {
|
||||
return 0;
|
||||
}
|
||||
final int b = ref.bytes[ref.offset + k] & 0xff;
|
||||
return b + 1;
|
||||
}
|
||||
|
||||
/** Build a histogram of the number of values per {@link #getBucket(int, int) bucket}. */
|
||||
private int[] buildHistogram(int from, int to, int k, int[] histogram) {
|
||||
for (int i = from; i < to; ++i) {
|
||||
histogram[getBucket(i, k)]++;
|
||||
}
|
||||
return histogram;
|
||||
}
|
||||
|
||||
/** Accumulate values of the histogram so that it does not store counts but
|
||||
* start offsets. {@code endOffsets} will store the end offsets. */
|
||||
private static void sumHistogram(int[] histogram, int[] endOffsets) {
|
||||
int accum = 0;
|
||||
for (int i = 0; i < HISTOGRAM_SIZE; ++i) {
|
||||
final int count = histogram[i];
|
||||
histogram[i] = accum;
|
||||
accum += count;
|
||||
endOffsets[i] = accum;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reorder based on start/end offsets for each bucket. When this method
|
||||
* returns, startOffsets and endOffsets are equal.
|
||||
* @param startOffsets start offsets per bucket
|
||||
* @param endOffsets end offsets per bucket
|
||||
*/
|
||||
private void reorder(int from, int to, int[] startOffsets, int[] endOffsets, int k) {
|
||||
// reorder in place, like the dutch flag problem
|
||||
for (int i = 0; i < HISTOGRAM_SIZE; ++i) {
|
||||
final int limit = endOffsets[i];
|
||||
for (int h1 = startOffsets[i]; h1 < limit; h1 = startOffsets[i]) {
|
||||
final int b = getBucket(from + h1, k);
|
||||
final int h2 = startOffsets[b]++;
|
||||
swap(from + h1, from + h2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,6 +18,7 @@ package org.apache.lucene.util.bkd;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
@ -105,20 +106,6 @@ final class HeapPointWriter implements PointWriter {
|
|||
System.arraycopy(bytes, 0, blocks.get(block), blockIndex * packedBytesLength, packedBytesLength);
|
||||
}
|
||||
|
||||
private int[] growExact(int[] arr, int size) {
|
||||
assert size > arr.length;
|
||||
int[] newArr = new int[size];
|
||||
System.arraycopy(arr, 0, newArr, 0, arr.length);
|
||||
return newArr;
|
||||
}
|
||||
|
||||
private long[] growExact(long[] arr, int size) {
|
||||
assert size > arr.length;
|
||||
long[] newArr = new long[size];
|
||||
System.arraycopy(arr, 0, newArr, 0, arr.length);
|
||||
return newArr;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void append(byte[] packedValue, long ord, int docID) {
|
||||
assert closed == false;
|
||||
|
@ -126,12 +113,12 @@ final class HeapPointWriter implements PointWriter {
|
|||
if (docIDs.length == nextWrite) {
|
||||
int nextSize = Math.min(maxSize, ArrayUtil.oversize(nextWrite+1, Integer.BYTES));
|
||||
assert nextSize > nextWrite: "nextSize=" + nextSize + " vs nextWrite=" + nextWrite;
|
||||
docIDs = growExact(docIDs, nextSize);
|
||||
docIDs = Arrays.copyOf(docIDs, nextSize);
|
||||
if (singleValuePerDoc == false) {
|
||||
if (ordsLong != null) {
|
||||
ordsLong = growExact(ordsLong, nextSize);
|
||||
ordsLong = Arrays.copyOf(ordsLong, nextSize);
|
||||
} else {
|
||||
ords = growExact(ords, nextSize);
|
||||
ords = Arrays.copyOf(ords, nextSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.io.IOException;
|
|||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.IntsRef;
|
||||
import org.apache.lucene.util.IntsRefBuilder;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.lucene.util.fst.FST.INPUT_TYPE; // javadoc
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
|
||||
|
@ -403,9 +402,7 @@ public class Builder<T> {
|
|||
final int prefixLenPlus1 = pos1+1;
|
||||
|
||||
if (frontier.length < input.length+1) {
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final UnCompiledNode<T>[] next =
|
||||
new UnCompiledNode[ArrayUtil.oversize(input.length+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(frontier, 0, next, 0, frontier.length);
|
||||
final UnCompiledNode<T>[] next = ArrayUtil.grow(frontier, input.length+1);
|
||||
for(int idx=frontier.length;idx<next.length;idx++) {
|
||||
next[idx] = new UnCompiledNode<>(this, idx);
|
||||
}
|
||||
|
@ -606,9 +603,7 @@ public class Builder<T> {
|
|||
assert label >= 0;
|
||||
assert numArcs == 0 || label > arcs[numArcs-1].label: "arc[-1].label=" + arcs[numArcs-1].label + " new label=" + label + " numArcs=" + numArcs;
|
||||
if (numArcs == arcs.length) {
|
||||
@SuppressWarnings({"rawtypes","unchecked"}) final Arc<T>[] newArcs =
|
||||
new Arc[ArrayUtil.oversize(numArcs+1, RamUsageEstimator.NUM_BYTES_OBJECT_REF)];
|
||||
System.arraycopy(arcs, 0, newArcs, 0, arcs.length);
|
||||
final Arc<T>[] newArcs = ArrayUtil.grow(arcs, numArcs+1);
|
||||
for(int arcIdx=numArcs;arcIdx<newArcs.length;arcIdx++) {
|
||||
newArcs[arcIdx] = new Arc<>();
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.lucene.index;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
@ -233,12 +234,14 @@ public class TestPerSegmentDeletes extends LuceneTestCase {
|
|||
}
|
||||
|
||||
public static int[] toArray(PostingsEnum postingsEnum) throws IOException {
|
||||
List<Integer> docs = new ArrayList<>();
|
||||
int[] docs = new int[0];
|
||||
int numDocs = 0;
|
||||
while (postingsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
int docID = postingsEnum.docID();
|
||||
docs.add(docID);
|
||||
docs = ArrayUtil.grow(docs, numDocs + 1);
|
||||
docs[numDocs + 1] = docID;
|
||||
}
|
||||
return ArrayUtil.toIntArray(docs);
|
||||
return Arrays.copyOf(docs, numDocs);
|
||||
}
|
||||
|
||||
public class RangeMergePolicy extends MergePolicy {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.lucene.util;
|
||||
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Random;
|
||||
|
@ -70,30 +71,37 @@ public class TestArrayUtil extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static int parseInt(String s) {
|
||||
int start = random().nextInt(5);
|
||||
char[] chars = new char[s.length() + start + random().nextInt(4)];
|
||||
s.getChars(0, s.length(), chars, start);
|
||||
return ArrayUtil.parseInt(chars, start, s.length());
|
||||
}
|
||||
|
||||
public void testParseInt() throws Exception {
|
||||
expectThrows(NumberFormatException.class, () -> {
|
||||
ArrayUtil.parseInt("".toCharArray());
|
||||
parseInt("");
|
||||
});
|
||||
|
||||
expectThrows(NumberFormatException.class, () -> {
|
||||
ArrayUtil.parseInt("foo".toCharArray());
|
||||
parseInt("foo");
|
||||
});
|
||||
|
||||
expectThrows(NumberFormatException.class, () -> {
|
||||
ArrayUtil.parseInt(String.valueOf(Long.MAX_VALUE).toCharArray());
|
||||
parseInt(String.valueOf(Long.MAX_VALUE));
|
||||
});
|
||||
|
||||
expectThrows(NumberFormatException.class, () -> {
|
||||
ArrayUtil.parseInt("0.34".toCharArray());
|
||||
parseInt("0.34");
|
||||
});
|
||||
|
||||
int test = ArrayUtil.parseInt("1".toCharArray());
|
||||
int test = parseInt("1");
|
||||
assertTrue(test + " does not equal: " + 1, test == 1);
|
||||
test = ArrayUtil.parseInt("-10000".toCharArray());
|
||||
test = parseInt("-10000");
|
||||
assertTrue(test + " does not equal: " + -10000, test == -10000);
|
||||
test = ArrayUtil.parseInt("1923".toCharArray());
|
||||
test = parseInt("1923");
|
||||
assertTrue(test + " does not equal: " + 1923, test == 1923);
|
||||
test = ArrayUtil.parseInt("-1".toCharArray());
|
||||
test = parseInt("-1");
|
||||
assertTrue(test + " does not equal: " + -1, test == -1);
|
||||
test = ArrayUtil.parseInt("foo 1923 bar".toCharArray(), 4, 4);
|
||||
assertTrue(test + " does not equal: " + 1923, test == 1923);
|
||||
|
@ -102,8 +110,8 @@ public class TestArrayUtil extends LuceneTestCase {
|
|||
public void testSliceEquals() {
|
||||
String left = "this is equal";
|
||||
String right = left;
|
||||
char[] leftChars = left.toCharArray();
|
||||
char[] rightChars = right.toCharArray();
|
||||
byte[] leftChars = left.getBytes(StandardCharsets.UTF_8);
|
||||
byte[] rightChars = right.getBytes(StandardCharsets.UTF_8);
|
||||
assertTrue(left + " does not equal: " + right, ArrayUtil.equals(leftChars, 0, rightChars, 0, left.length()));
|
||||
|
||||
assertFalse(left + " does not equal: " + right, ArrayUtil.equals(leftChars, 1, rightChars, 0, left.length()));
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.util;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
public class TestStringMSBRadixSorter extends LuceneTestCase {
|
||||
|
||||
private void test(BytesRef[] refs, int len) {
|
||||
BytesRef[] expected = Arrays.copyOf(refs, len);
|
||||
Arrays.sort(expected);
|
||||
|
||||
new StringMSBRadixSorter() {
|
||||
|
||||
@Override
|
||||
protected BytesRef get(int i) {
|
||||
return refs[i];
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void swap(int i, int j) {
|
||||
BytesRef tmp = refs[i];
|
||||
refs[i] = refs[j];
|
||||
refs[j] = tmp;
|
||||
}
|
||||
}.sort(0, len);
|
||||
BytesRef[] actual = Arrays.copyOf(refs, len);
|
||||
assertArrayEquals(expected, actual);
|
||||
}
|
||||
|
||||
public void testEmpty() {
|
||||
test(new BytesRef[random().nextInt(5)], 0);
|
||||
}
|
||||
|
||||
public void testOneValue() {
|
||||
BytesRef bytes = new BytesRef(TestUtil.randomSimpleString(random()));
|
||||
test(new BytesRef[] { bytes }, 1);
|
||||
}
|
||||
|
||||
public void testTwoValues() {
|
||||
BytesRef bytes1 = new BytesRef(TestUtil.randomSimpleString(random()));
|
||||
BytesRef bytes2 = new BytesRef(TestUtil.randomSimpleString(random()));
|
||||
test(new BytesRef[] { bytes1, bytes2 }, 2);
|
||||
}
|
||||
|
||||
private void testRandom(int commonPrefixLen, int maxLen) {
|
||||
byte[] commonPrefix = new byte[commonPrefixLen];
|
||||
random().nextBytes(commonPrefix);
|
||||
final int len = random().nextInt(100000);
|
||||
BytesRef[] bytes = new BytesRef[len + random().nextInt(50)];
|
||||
for (int i = 0; i < len; ++i) {
|
||||
byte[] b = new byte[commonPrefixLen + random().nextInt(maxLen)];
|
||||
random().nextBytes(b);
|
||||
System.arraycopy(commonPrefix, 0, b, 0, commonPrefixLen);
|
||||
bytes[i] = new BytesRef(b);
|
||||
}
|
||||
test(bytes, len);
|
||||
}
|
||||
|
||||
public void testRandom() {
|
||||
for (int iter = 0; iter < 10; ++iter) {
|
||||
testRandom(0, 10);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomWithLotsOfDuplicates() {
|
||||
for (int iter = 0; iter < 10; ++iter) {
|
||||
testRandom(0, 2);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomWithSharedPrefix() {
|
||||
for (int iter = 0; iter < 10; ++iter) {
|
||||
testRandom(TestUtil.nextInt(random(), 1, 30), 10);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandomWithSharedPrefixAndLotsOfDuplicates() {
|
||||
for (int iter = 0; iter < 10; ++iter) {
|
||||
testRandom(TestUtil.nextInt(random(), 1, 30), 2);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -280,13 +280,13 @@ public class ToParentBlockJoinCollector implements Collector {
|
|||
// While rare, this could happen if join query had
|
||||
// null scorer on first segment(s) but then became
|
||||
// non-null on later segments
|
||||
og.docs = ArrayUtil.grow(og.docs);
|
||||
og.docs = ArrayUtil.grow(og.docs, numSubScorers);
|
||||
}
|
||||
if (og.counts.length < numSubScorers) {
|
||||
og.counts = ArrayUtil.grow(og.counts);
|
||||
}
|
||||
if (trackScores && og.scores.length < numSubScorers) {
|
||||
og.scores = ArrayUtil.grow(og.scores);
|
||||
og.scores = ArrayUtil.grow(og.scores, numSubScorers);
|
||||
}
|
||||
|
||||
//System.out.println("\ncopyGroups parentDoc=" + og.doc);
|
||||
|
|
|
@ -17,8 +17,10 @@
|
|||
package org.apache.lucene.search.join;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -184,7 +186,7 @@ public class ToParentBlockJoinQuery extends Query {
|
|||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
BlockJoinScorer scorer = (BlockJoinScorer) scorer(context);
|
||||
if (scorer != null && scorer.iterator().advance(doc) == doc) {
|
||||
return scorer.explain(context.docBase);
|
||||
return scorer.explain(context, childWeight);
|
||||
}
|
||||
return Explanation.noMatch("Not a match");
|
||||
}
|
||||
|
@ -436,10 +438,24 @@ public class ToParentBlockJoinQuery extends Query {
|
|||
return parentFreq;
|
||||
}
|
||||
|
||||
public Explanation explain(int docBase) throws IOException {
|
||||
int start = docBase + prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
|
||||
int end = docBase + parentDoc - 1; // -1 b/c parentDoc is parent doc
|
||||
return Explanation.match(score(), String.format(Locale.ROOT, "Score based on child doc range from %d to %d", start, end)
|
||||
public Explanation explain(LeafReaderContext context, Weight childWeight) throws IOException {
|
||||
int start = context.docBase + prevParentDoc + 1; // +1 b/c prevParentDoc is previous parent doc
|
||||
int end = context.docBase + parentDoc - 1; // -1 b/c parentDoc is parent doc
|
||||
|
||||
Explanation bestChild = null;
|
||||
int matches = 0;
|
||||
for (int childDoc = start; childDoc <= end; childDoc++) {
|
||||
Explanation child = childWeight.explain(context, childDoc - context.docBase);
|
||||
if (child.isMatch()) {
|
||||
matches++;
|
||||
if (bestChild == null || child.getValue() > bestChild.getValue()) {
|
||||
bestChild = child;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Explanation.match(score(), String.format(Locale.ROOT,
|
||||
"Score based on %d child docs in range from %d to %d, best match:", matches, start, end), bestChild
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,8 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -50,6 +52,7 @@ import org.apache.lucene.search.BooleanClause.Occur;
|
|||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.CheckHits;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
|
@ -231,6 +234,8 @@ public class TestBlockJoin extends LuceneTestCase {
|
|||
|
||||
ToParentBlockJoinCollector c = new ToParentBlockJoinCollector(Sort.RELEVANCE, 1, true, true);
|
||||
|
||||
CheckHits.checkHitCollector(random(), fullQuery.build(), "country", s, new int[] {2});
|
||||
|
||||
s.search(fullQuery.build(), c);
|
||||
|
||||
TopGroups<Integer> results = c.getTopGroups(childJoinQuery, null, 0, 10, 0, true);
|
||||
|
@ -869,7 +874,16 @@ public class TestBlockJoin extends LuceneTestCase {
|
|||
//System.out.println(" hit docID=" + hit.doc + " childId=" + childId + " parentId=" + document.get("parentID"));
|
||||
assertTrue(explanation.isMatch());
|
||||
assertEquals(hit.score, explanation.getValue(), 0.0f);
|
||||
assertEquals(String.format(Locale.ROOT, "Score based on child doc range from %d to %d", hit.doc - 1 - childId, hit.doc - 1), explanation.getDescription());
|
||||
Matcher m = Pattern.compile("Score based on ([0-9]+) child docs in range from ([0-9]+) to ([0-9]+), best match:").matcher(explanation.getDescription());
|
||||
assertTrue("Block Join description not matches", m.matches());
|
||||
assertTrue("Matched children not positive", Integer.parseInt(m.group(1)) > 0);
|
||||
assertEquals("Wrong child range start", hit.doc - 1 - childId, Integer.parseInt(m.group(2)));
|
||||
assertEquals("Wrong child range end", hit.doc - 1, Integer.parseInt(m.group(3)));
|
||||
Explanation childWeightExplanation = explanation.getDetails()[0];
|
||||
if ("sum of:".equals(childWeightExplanation.getDescription())) {
|
||||
childWeightExplanation = childWeightExplanation.getDetails()[0];
|
||||
}
|
||||
assertTrue("Wrong child weight description", childWeightExplanation.getDescription().startsWith("weight(child"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.lucene.search.Weight;
|
|||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.DocIdSetBuilder;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
|
@ -103,19 +104,21 @@ abstract class LatLonPointBoxQuery extends Query {
|
|||
return new ConstantScoreWeight(this) {
|
||||
|
||||
private DocIdSetIterator buildMatchingIterator(LeafReader reader, PointValues values) throws IOException {
|
||||
MatchingPoints result = new MatchingPoints(reader, field);
|
||||
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
|
||||
|
||||
values.intersect(field,
|
||||
new IntersectVisitor() {
|
||||
|
||||
DocIdSetBuilder.BulkAdder adder;
|
||||
|
||||
@Override
|
||||
public void grow(int count) {
|
||||
result.grow(count);
|
||||
adder = result.grow(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,7 +136,7 @@ abstract class LatLonPointBoxQuery extends Query {
|
|||
}
|
||||
|
||||
// Doc is in-bounds
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -160,7 +163,7 @@ abstract class LatLonPointBoxQuery extends Query {
|
|||
}
|
||||
}
|
||||
});
|
||||
return result.iterator();
|
||||
return result.build().iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.DocIdSetBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
@ -117,18 +118,21 @@ final class LatLonPointDistanceQuery extends Query {
|
|||
LatLonPoint.checkCompatible(fieldInfo);
|
||||
|
||||
// matching docids
|
||||
MatchingPoints result = new MatchingPoints(reader, field);
|
||||
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
|
||||
|
||||
values.intersect(field,
|
||||
new IntersectVisitor() {
|
||||
|
||||
DocIdSetBuilder.BulkAdder adder;
|
||||
|
||||
@Override
|
||||
public void grow(int count) {
|
||||
result.grow(count);
|
||||
adder = result.grow(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -152,7 +156,7 @@ final class LatLonPointDistanceQuery extends Query {
|
|||
|
||||
// its a match only if its sortKey <= our sortKey
|
||||
if (SloppyMath.haversinSortKey(latitude, longitude, docLatitude, docLongitude) <= sortKey) {
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,7 +210,7 @@ final class LatLonPointDistanceQuery extends Query {
|
|||
}
|
||||
});
|
||||
|
||||
return new ConstantScoreScorer(this, score(), result.iterator());
|
||||
return new ConstantScoreScorer(this, score(), result.build().iterator());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.lucene.index.PointValues;
|
|||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.util.DocIdSetBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.geo.Polygon;
|
||||
|
@ -110,25 +111,28 @@ final class LatLonPointInPolygonQuery extends Query {
|
|||
LatLonPoint.checkCompatible(fieldInfo);
|
||||
|
||||
// matching docids
|
||||
MatchingPoints result = new MatchingPoints(reader, field);
|
||||
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
|
||||
|
||||
values.intersect(field,
|
||||
new IntersectVisitor() {
|
||||
|
||||
DocIdSetBuilder.BulkAdder adder;
|
||||
|
||||
@Override
|
||||
public void grow(int count) {
|
||||
result.grow(count);
|
||||
adder = result.grow(count);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) {
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) {
|
||||
if (tree.contains(decodeLatitude(packedValue, 0),
|
||||
decodeLongitude(packedValue, Integer.BYTES))) {
|
||||
result.add(docID);
|
||||
adder.add(docID);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +155,7 @@ final class LatLonPointInPolygonQuery extends Query {
|
|||
}
|
||||
});
|
||||
|
||||
return new ConstantScoreScorer(this, score(), result.iterator());
|
||||
return new ConstantScoreScorer(this, score(), result.build().iterator());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.BitSetIterator;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
|
||||
/**
|
||||
* Accumulates matching hits for points.
|
||||
* <p>
|
||||
* Add matches with ({@link #add(int)}) and call {@link #iterator()} for
|
||||
* an iterator over the results.
|
||||
* <p>
|
||||
* <b>NOTE:</b> it is required that you implement the optional {@code grow()}
|
||||
* method in your IntersectVisitor, this is used for cost computation.
|
||||
* <p>
|
||||
* This implementation currently optimizes bitset structure (sparse vs dense)
|
||||
* and {@link DocIdSetIterator#cost()} (cardinality) based on index statistics.
|
||||
* This API may change as point values evolves.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
final class MatchingPoints {
|
||||
/** bitset we collect into */
|
||||
private final BitSet bits;
|
||||
/** number of documents containing a value for the points field */
|
||||
private final int docCount;
|
||||
/** number of values indexed for the points field */
|
||||
private final long numPoints;
|
||||
/** number of documents in the index segment */
|
||||
private final int maxDoc;
|
||||
/** counter of hits seen */
|
||||
private long counter;
|
||||
|
||||
/**
|
||||
* Creates a new accumulator.
|
||||
* @param reader reader to collect point matches from
|
||||
* @param field field name.
|
||||
*/
|
||||
public MatchingPoints(LeafReader reader, String field) {
|
||||
maxDoc = reader.maxDoc();
|
||||
PointValues values = reader.getPointValues();
|
||||
if (values == null) {
|
||||
throw new IllegalStateException("the query is missing null checks");
|
||||
}
|
||||
docCount = values.getDocCount(field);
|
||||
numPoints = values.size(field);
|
||||
// heuristic: if the field is really sparse, use a sparse impl
|
||||
if (docCount >= 0 && docCount * 100L < maxDoc) {
|
||||
bits = new SparseFixedBitSet(maxDoc);
|
||||
} else {
|
||||
bits = new FixedBitSet(maxDoc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a matching docid.
|
||||
* <p>
|
||||
* NOTE: doc IDs do not need to be provided in any order.
|
||||
*/
|
||||
public void add(int doc) {
|
||||
bits.set(doc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Grows cardinality counter by the given amount.
|
||||
*/
|
||||
public void grow(int amount) {
|
||||
counter += amount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an iterator over the recorded matches.
|
||||
*/
|
||||
public DocIdSetIterator iterator() {
|
||||
// ensure caller implements the grow() api
|
||||
assert counter > 0 || bits.cardinality() == 0 : "the IntersectVisitor is missing grow()";
|
||||
|
||||
// if single-valued (docCount == numPoints), then we know 1 point == 1 doc
|
||||
// otherwise we approximate based on field stats
|
||||
return new BitSetIterator(bits, (long) (counter * (docCount / (double) numPoints)));
|
||||
}
|
||||
}
|
|
@ -245,19 +245,18 @@ public class TermAutomatonQuery extends Query {
|
|||
}
|
||||
|
||||
private boolean equalsTo(TermAutomatonQuery other) {
|
||||
// NOTE: not quite correct, because if terms were added in different
|
||||
// order in each query but the language is the same, we return false:
|
||||
return checkFinished(this) &&
|
||||
checkFinished(other) &&
|
||||
termToID.equals(other.termToID) &&
|
||||
Operations.sameLanguage(det, other.det);
|
||||
other == this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
checkFinished(this);
|
||||
// TODO: LUCENE-7295: Automaton.toDot() is very costly!
|
||||
return classHash() ^ termToID.hashCode() + det.toDot().hashCode();
|
||||
// LUCENE-7295: this used to be very awkward toDot() call; it is safer to assume
|
||||
// that no two instances are equivalent instead (until somebody finds a better way to check
|
||||
// on automaton equivalence quickly).
|
||||
return System.identityHashCode(this);
|
||||
}
|
||||
|
||||
/** Returns the dot (graphviz) representation of this automaton.
|
||||
|
@ -328,7 +327,6 @@ public class TermAutomatonQuery extends Query {
|
|||
}
|
||||
|
||||
final class TermAutomatonWeight extends Weight {
|
||||
private final IndexSearcher searcher;
|
||||
final Automaton automaton;
|
||||
private final Map<Integer,TermContext> termStates;
|
||||
private final Similarity.SimWeight stats;
|
||||
|
@ -337,7 +335,6 @@ public class TermAutomatonQuery extends Query {
|
|||
public TermAutomatonWeight(Automaton automaton, IndexSearcher searcher, Map<Integer,TermContext> termStates) throws IOException {
|
||||
super(TermAutomatonQuery.this);
|
||||
this.automaton = automaton;
|
||||
this.searcher = searcher;
|
||||
this.termStates = termStates;
|
||||
this.similarity = searcher.getSimilarity(true);
|
||||
List<TermStatistics> allTermStats = new ArrayList<>();
|
||||
|
|
|
@ -346,6 +346,10 @@ public class CheckHits {
|
|||
if (expl.getDescription().endsWith("computed from:")) {
|
||||
return; // something more complicated.
|
||||
}
|
||||
String descr = expl.getDescription().toLowerCase(Locale.ROOT);
|
||||
if (descr.startsWith("score based on ") && descr.contains("child docs in range")) {
|
||||
Assert.assertTrue("Child doc explanations are missing", detail.length > 0);
|
||||
}
|
||||
if (detail.length > 0) {
|
||||
if (detail.length==1) {
|
||||
// simple containment, unless it's a freq of: (which lets a query explain how the freq is calculated),
|
||||
|
@ -357,7 +361,6 @@ public class CheckHits {
|
|||
// - end with one of: "product of:", "sum of:", "max of:", or
|
||||
// - have "max plus <x> times others" (where <x> is float).
|
||||
float x = 0;
|
||||
String descr = expl.getDescription().toLowerCase(Locale.ROOT);
|
||||
boolean productOf = descr.endsWith("product of:");
|
||||
boolean sumOf = descr.endsWith("sum of:");
|
||||
boolean maxOf = descr.endsWith("max of:");
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.lucene.util.automaton;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
|
@ -205,8 +206,8 @@ public class AutomatonTestUtil {
|
|||
}
|
||||
|
||||
public int[] getRandomAcceptedString(Random r) {
|
||||
|
||||
final List<Integer> soFar = new ArrayList<>();
|
||||
int[] codePoints = new int[0];
|
||||
int codepointCount = 0;
|
||||
|
||||
int s = 0;
|
||||
|
||||
|
@ -248,11 +249,12 @@ public class AutomatonTestUtil {
|
|||
} else {
|
||||
t = transitions[s][r.nextInt(transitions[s].length)];
|
||||
}
|
||||
soFar.add(getRandomCodePoint(r, t.min, t.max));
|
||||
codePoints = ArrayUtil.grow(codePoints, codepointCount + 1);
|
||||
codePoints[codepointCount++] = getRandomCodePoint(r, t.min, t.max);
|
||||
s = t.dest;
|
||||
}
|
||||
|
||||
return ArrayUtil.toIntArray(soFar);
|
||||
return Arrays.copyOf(codePoints, codepointCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -325,6 +325,10 @@ Other Changes
|
|||
|
||||
* SOLR-9131: Fix "start solr" text in cluster.vm Velocity template (janhoy)
|
||||
|
||||
* SOLR-9159: New cloud based concurrent atomic update test (hossman)
|
||||
|
||||
* SOLR-9119: several static methods in ValueSourceParser have been made private (hossman)
|
||||
|
||||
================== 6.0.1 ==================
|
||||
(No Changes)
|
||||
|
||||
|
|
|
@ -36,12 +36,12 @@ import org.apache.lucene.search.DocValuesRangeQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
import com.ibm.icu.text.Collator;
|
||||
import com.ibm.icu.text.RuleBasedCollator;
|
||||
|
|
|
@ -26,16 +26,6 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntObjectHashMap;
|
||||
import com.carrotsearch.hppc.LongHashSet;
|
||||
import com.carrotsearch.hppc.LongObjectHashMap;
|
||||
import com.carrotsearch.hppc.LongObjectMap;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.LongCursor;
|
||||
import com.carrotsearch.hppc.cursors.LongObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
|
@ -53,7 +43,6 @@ import org.apache.lucene.search.Collector;
|
|||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.solr.search.QueryWrapperFilter;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -61,7 +50,6 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.search.TopDocsCollector;
|
||||
import org.apache.lucene.search.TopFieldCollector;
|
||||
import org.apache.lucene.search.TopScoreDocCollector;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.BitSetIterator;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
|
@ -87,12 +75,24 @@ import org.apache.solr.search.DocIterator;
|
|||
import org.apache.solr.search.DocList;
|
||||
import org.apache.solr.search.DocSlice;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.QueryWrapperFilter;
|
||||
import org.apache.solr.search.SolrConstantScoreQuery;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.SortSpecParsing;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.apache.solr.util.plugin.PluginInfoInitialized;
|
||||
import org.apache.solr.util.plugin.SolrCoreAware;
|
||||
|
||||
import com.carrotsearch.hppc.IntHashSet;
|
||||
import com.carrotsearch.hppc.IntObjectHashMap;
|
||||
import com.carrotsearch.hppc.LongHashSet;
|
||||
import com.carrotsearch.hppc.LongObjectHashMap;
|
||||
import com.carrotsearch.hppc.LongObjectMap;
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.LongCursor;
|
||||
import com.carrotsearch.hppc.cursors.LongObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
/**
|
||||
* The ExpandComponent is designed to work with the CollapsingPostFilter.
|
||||
* The CollapsingPostFilter collapses a result set on a field.
|
||||
|
|
|
@ -14,16 +14,36 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
|
||||
package org.apache.solr.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.CompositeReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader.CoreClosedListener;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiDocValues.MultiSortedDocValues;
|
||||
import org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues;
|
||||
import org.apache.lucene.index.MultiDocValues.OrdinalMap;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
|
@ -33,13 +33,13 @@ import org.apache.lucene.index.SegmentCommitInfo;
|
|||
import org.apache.lucene.index.SegmentInfo;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SegmentReader;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
import org.apache.lucene.util.packed.PackedLongValues;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
|
||||
// TODO: remove this and add indexSort specification directly to solrconfig.xml? But for BWC, also accept SortingMergePolicy specifiction?
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.Set;
|
|||
import java.util.TreeSet;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
|
@ -41,22 +42,16 @@ import org.apache.lucene.spatial.SpatialStrategy;
|
|||
import org.apache.lucene.spatial.query.SpatialArgs;
|
||||
import org.apache.lucene.spatial.query.SpatialArgsParser;
|
||||
import org.apache.lucene.spatial.query.SpatialOperation;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.SpatialOptions;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.util.DistanceUnits;
|
||||
import org.apache.solr.util.MapListener;
|
||||
import org.apache.solr.util.SpatialUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.context.SpatialContextFactory;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
|
@ -66,6 +61,12 @@ import org.locationtech.spatial4j.io.SupportedFormats;
|
|||
import org.locationtech.spatial4j.shape.Point;
|
||||
import org.locationtech.spatial4j.shape.Rectangle;
|
||||
import org.locationtech.spatial4j.shape.Shape;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
|
||||
/**
|
||||
* Abstract base class for Solr FieldTypes based on a Lucene 4 {@link SpatialStrategy}.
|
||||
|
|
|
@ -23,10 +23,10 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.common.util.Base64;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -22,15 +22,14 @@ import java.util.Map;
|
|||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.BoolDocValues;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
|
@ -40,6 +39,7 @@ import org.apache.solr.analysis.SolrAnalyzer;
|
|||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.OrdFieldSource;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
|
|
@ -40,12 +40,12 @@ import org.apache.lucene.search.DocValuesRangeQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
/**
|
||||
* Field for collated sort keys.
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.FieldValueQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
|
|
|
@ -16,12 +16,6 @@
|
|||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
|
@ -31,6 +25,12 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
|
@ -45,7 +45,6 @@ import org.apache.lucene.search.DocValuesRangeQuery;
|
|||
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
|
@ -55,6 +54,7 @@ import org.apache.solr.common.EnumFieldValue;
|
|||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
|
|
|
@ -16,17 +16,17 @@
|
|||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.FileFloatSource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
/** Get values from an external file instead of the index.
|
||||
*
|
||||
|
|
|
@ -49,7 +49,6 @@ import org.apache.lucene.search.SortedNumericSelector;
|
|||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
|
@ -66,6 +65,7 @@ import org.apache.solr.query.SolrRangeQuery;
|
|||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.Sorting;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -18,23 +18,23 @@ package org.apache.solr.schema;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
import org.locationtech.spatial4j.io.GeohashUtils;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.LiteralValueSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.SolrConstantScoreQuery;
|
||||
import org.apache.solr.search.SpatialOptions;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
import org.apache.solr.search.function.distance.GeohashHaversineFunction;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.util.SpatialUtils;
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
import org.locationtech.spatial4j.io.GeohashUtils;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
|
||||
/**
|
||||
* This is a class that represents a <a
|
||||
|
|
|
@ -51,7 +51,7 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.Version;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
|
|
|
@ -22,8 +22,8 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.VectorValueSource;
|
||||
|
@ -37,7 +37,6 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.DelegatingCollector;
|
||||
|
@ -45,8 +44,8 @@ import org.apache.solr.search.ExtendedQueryBase;
|
|||
import org.apache.solr.search.PostFilter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.SpatialOptions;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.util.SpatialUtils;
|
||||
|
||||
import org.locationtech.spatial4j.context.SpatialContext;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
import org.locationtech.spatial4j.shape.Point;
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
|
@ -30,13 +29,14 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.MapSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.SpatialOptions;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.locationtech.spatial4j.distance.DistanceUtils;
|
||||
|
||||
/**
|
||||
* A point type that indexes a point in an n-dimensional space as separate fields and supports range queries.
|
||||
|
|
|
@ -33,14 +33,14 @@ import org.apache.lucene.index.IndexableField;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.AttributeFactory;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.AttributeSource.State;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.solr.analysis.SolrAnalyzer;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.Sorting;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -20,16 +20,16 @@ import java.io.IOException;
|
|||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
/**
|
||||
* Utility Field used for random sorting. It should not be passed a value.
|
||||
|
|
|
@ -27,10 +27,10 @@ import org.apache.lucene.document.SortedSetDocValuesField;
|
|||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
public class StrField extends PrimitiveFieldType {
|
||||
|
||||
|
|
|
@ -16,14 +16,16 @@
|
|||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.SortedSetFieldSource;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.QueryBuilder;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -31,9 +33,7 @@ import org.apache.solr.query.SolrRangeQuery;
|
|||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.Sorting;
|
||||
|
||||
import java.util.Map;
|
||||
import java.io.IOException;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
/** <code>TextField</code> is the basic type for configurable text analysis.
|
||||
* Analyzers for field types using this implementation should be defined in the schema.
|
||||
|
|
|
@ -26,8 +26,8 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.FieldType.LegacyNumericType;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.LegacyFloatField;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
|
@ -47,7 +47,6 @@ import org.apache.lucene.search.LegacyNumericRangeQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
|
@ -61,6 +60,7 @@ import org.apache.solr.response.TextResponseWriter;
|
|||
import org.apache.solr.search.FunctionRangeQuery;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.util.DateMathParser;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
|
|
@ -25,15 +25,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.carrotsearch.hppc.FloatArrayList;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import com.carrotsearch.hppc.IntIntHashMap;
|
||||
import com.carrotsearch.hppc.IntLongHashMap;
|
||||
import com.carrotsearch.hppc.cursors.IntIntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntLongCursor;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
|
@ -49,13 +41,12 @@ import org.apache.lucene.queries.function.FunctionValues;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.BitSetIterator;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -65,8 +56,8 @@ import org.apache.solr.common.SolrException;
|
|||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.handler.component.ResponseBuilder;
|
||||
import org.apache.solr.handler.component.QueryElevationComponent;
|
||||
import org.apache.solr.handler.component.ResponseBuilder;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.request.SolrRequestInfo;
|
||||
|
@ -75,6 +66,14 @@ import org.apache.solr.schema.StrField;
|
|||
import org.apache.solr.schema.TrieFloatField;
|
||||
import org.apache.solr.schema.TrieIntField;
|
||||
import org.apache.solr.schema.TrieLongField;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
|
||||
import com.carrotsearch.hppc.FloatArrayList;
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import com.carrotsearch.hppc.IntIntHashMap;
|
||||
import com.carrotsearch.hppc.IntLongHashMap;
|
||||
import com.carrotsearch.hppc.cursors.IntIntCursor;
|
||||
import com.carrotsearch.hppc.cursors.IntLongCursor;
|
||||
|
||||
/**
|
||||
|
||||
|
|
|
@ -20,16 +20,16 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
|
||||
/**
|
||||
* Lucene 5.0 removes "accidental" insanity, so you must explicitly
|
||||
|
|
|
@ -18,13 +18,12 @@ package org.apache.solr.search;
|
|||
|
||||
import java.net.URL;
|
||||
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
|
||||
import org.apache.solr.core.JmxMonitoredMap.JmxAugmentedSolrInfoMBean;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.core.SolrInfoMBean;
|
||||
import org.apache.solr.core.JmxMonitoredMap.JmxAugmentedSolrInfoMBean;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
|
||||
/**
|
||||
* A SolrInfoMBean that provides introspection of the Solr FieldCache
|
||||
|
|
|
@ -53,7 +53,6 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.index.MultiPostingsEnum;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredFieldVisitor;
|
||||
|
@ -94,7 +93,6 @@ import org.apache.lucene.search.TopScoreDocCollector;
|
|||
import org.apache.lucene.search.TotalHitCountCollector;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
@ -109,6 +107,7 @@ import org.apache.solr.core.DirectoryFactory.DirContext;
|
|||
import org.apache.solr.core.SolrConfig;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.core.SolrInfoMBean;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.request.SolrRequestInfo;
|
||||
|
@ -122,6 +121,7 @@ import org.apache.solr.schema.TrieFloatField;
|
|||
import org.apache.solr.schema.TrieIntField;
|
||||
import org.apache.solr.search.facet.UnInvertedField;
|
||||
import org.apache.solr.search.stats.StatsSource;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.apache.solr.update.IndexFingerprint;
|
||||
import org.apache.solr.update.SolrIndexConfig;
|
||||
import org.slf4j.Logger;
|
||||
|
|
|
@ -91,25 +91,29 @@ public abstract class ValueSourceParser implements NamedListInitializedPlugin {
|
|||
*/
|
||||
public abstract ValueSource parse(FunctionQParser fp) throws SyntaxError;
|
||||
|
||||
/* standard functions */
|
||||
public static Map<String, ValueSourceParser> standardValueSourceParsers = new HashMap<>();
|
||||
/** standard functions supported by default, filled in static class initialization */
|
||||
private static final Map<String, ValueSourceParser> standardVSParsers = new HashMap<>();
|
||||
|
||||
/** standard functions supported by default */
|
||||
public static final Map<String, ValueSourceParser> standardValueSourceParsers
|
||||
= Collections.unmodifiableMap(standardVSParsers);
|
||||
|
||||
/** Adds a new parser for the name and returns any existing one that was overridden.
|
||||
* This is not thread safe.
|
||||
*/
|
||||
public static ValueSourceParser addParser(String name, ValueSourceParser p) {
|
||||
return standardValueSourceParsers.put(name, p);
|
||||
private static ValueSourceParser addParser(String name, ValueSourceParser p) {
|
||||
return standardVSParsers.put(name, p);
|
||||
}
|
||||
|
||||
/** Adds a new parser for the name and returns any existing one that was overridden.
|
||||
* This is not thread safe.
|
||||
*/
|
||||
public static ValueSourceParser addParser(NamedParser p) {
|
||||
return standardValueSourceParsers.put(p.name(), p);
|
||||
private static ValueSourceParser addParser(NamedParser p) {
|
||||
return standardVSParsers.put(p.name(), p);
|
||||
}
|
||||
|
||||
private static void alias(String source, String dest) {
|
||||
standardValueSourceParsers.put(dest, standardValueSourceParsers.get(source));
|
||||
standardVSParsers.put(dest, standardVSParsers.get(source));
|
||||
}
|
||||
|
||||
static {
|
||||
|
|
|
@ -27,17 +27,16 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.uninverting.DocTermOrds;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.TrieField;
|
||||
import org.apache.solr.search.BitDocSet;
|
||||
|
@ -45,6 +44,7 @@ import org.apache.solr.search.DocIterator;
|
|||
import org.apache.solr.search.DocSet;
|
||||
import org.apache.solr.search.SolrCache;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.uninverting.DocTermOrds;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
|
@ -20,13 +20,12 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
|
@ -34,6 +33,7 @@ import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
|||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.Insanity;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
|
|
@ -20,18 +20,18 @@ import java.io.IOException;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.Insanity;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.join.JoinUtil;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.Aliases;
|
||||
|
@ -45,6 +44,7 @@ import org.apache.solr.search.QParser;
|
|||
import org.apache.solr.search.QParserPlugin;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.SyntaxError;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
|
||||
/**
|
||||
|
|
|
@ -14,19 +14,20 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat; // javadocs
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -276,7 +277,7 @@ public class DocTermOrds implements Accountable {
|
|||
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
|
||||
}
|
||||
//System.out.println("DTO uninvert field=" + field + " prefix=" + termPrefix);
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = System.nanoTime();
|
||||
prefix = termPrefix == null ? null : BytesRef.deepCopyOf(termPrefix);
|
||||
|
||||
final int maxDoc = reader.maxDoc();
|
||||
|
@ -444,7 +445,7 @@ public class DocTermOrds implements Accountable {
|
|||
|
||||
numTermsInField = termNum;
|
||||
|
||||
long midPoint = System.currentTimeMillis();
|
||||
long midPoint = System.nanoTime();
|
||||
|
||||
if (termInstances == 0) {
|
||||
// we didn't invert anything
|
||||
|
@ -533,10 +534,10 @@ public class DocTermOrds implements Accountable {
|
|||
}
|
||||
indexedTermsArray = indexedTerms.toArray(new BytesRef[indexedTerms.size()]);
|
||||
|
||||
long endTime = System.currentTimeMillis();
|
||||
long endTime = System.nanoTime();
|
||||
|
||||
total_time = (int)(endTime-startTime);
|
||||
phase1_time = (int)(midPoint-startTime);
|
||||
total_time = (int) TimeUnit.MILLISECONDS.convert(endTime-startTime, TimeUnit.NANOSECONDS);
|
||||
phase1_time = (int) TimeUnit.MILLISECONDS.convert(midPoint-startTime, TimeUnit.NANOSECONDS);
|
||||
}
|
||||
|
||||
/** Number of bytes to represent an unsigned int as a vint. */
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -27,8 +27,8 @@ import java.util.Set;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.uninverting.FieldCache.CacheEntry;
|
||||
import org.apache.lucene.util.MapOfSets;
|
||||
import org.apache.solr.uninverting.FieldCache.CacheEntry;
|
||||
|
||||
/**
|
||||
* Provides methods for sanity checking that entries in the FieldCache
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -37,8 +37,8 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.uninverting.FieldCache.CacheEntry;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.solr.uninverting.FieldCache.CacheEntry;
|
||||
|
||||
/**
|
||||
* A FilterReader that exposes <i>indexed</i> values as if they also had
|
|
@ -18,4 +18,4 @@
|
|||
/**
|
||||
* Support for creating docvalues on-the-fly from the inverted index at runtime.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
|
@ -29,8 +29,9 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
|
||||
/**
|
||||
* Allows access to uninverted docvalues by delete-by-queries.
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
|||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
|
@ -34,6 +33,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.util.SuppressForbidden;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<schema name="minimal-atomic-stress" version="1.6">
|
||||
<!-- minimal fields needed to stress test atomic updates
|
||||
See: TestStressCloudBlindAtomicUpdates
|
||||
-->
|
||||
<uniqueKey>id</uniqueKey>
|
||||
<field name="id" type="string" indexed="true" stored="true" docValues="true"/>
|
||||
<field name="_version_" type="long" indexed="false" stored="false" docValues="true" />
|
||||
|
||||
<!-- atomic updates should work on all of these permutations -->
|
||||
<field name="long_dv" type="long" indexed="false" stored="false" docValues="true" />
|
||||
<field name="long_dv_stored" type="long" indexed="false" stored="true" docValues="true" />
|
||||
<field name="long_dv_stored_idx" type="long" indexed="true" stored="true" docValues="true" />
|
||||
<field name="long_dv_idx" type="long" indexed="true" stored="false" docValues="true" />
|
||||
<field name="long_stored_idx" type="long" indexed="true" stored="true" docValues="false" />
|
||||
|
||||
<fieldType name="string" class="solr.StrField" multiValued="false" indexed="false" stored="false" docValues="false" />
|
||||
<fieldType name="long" class="solr.TrieLongField" multiValued="false" indexed="false" stored="false" docValues="false"/>
|
||||
|
||||
<!-- unused, but play nice with existing solrconfig so we don't have to create a new one just for this test -->
|
||||
<dynamicField name="*" type="string" indexed="true" stored="true" />
|
||||
</schema>
|
|
@ -0,0 +1,483 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.io.File;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.client.solrj.request.schema.SchemaRequest.Field;
|
||||
import org.apache.solr.client.solrj.request.schema.SchemaRequest.FieldType;
|
||||
import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldResponse;
|
||||
import org.apache.solr.client.solrj.response.schema.SchemaResponse.FieldTypeResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrDocumentList;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.SolrInputField;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.TestInjection;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Stress test of Atomic Updates in a MinCloud Cluster.
|
||||
*
|
||||
* Focus of test is parallel threads hammering updates on diff docs using random clients/nodes,
|
||||
* Optimistic Concurrency is not used here because of SOLR-8733, instead we just throw lots of
|
||||
* "inc" operations at a numeric field and check that the math works out at the end.
|
||||
*/
|
||||
@Slow
|
||||
@SuppressSSL(bugUrl="SSL overhead seems to cause OutOfMemory when stress testing")
|
||||
public class TestStressCloudBlindAtomicUpdates extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String DEBUG_LABEL = MethodHandles.lookup().lookupClass().getName();
|
||||
private static final String COLLECTION_NAME = "test_col";
|
||||
|
||||
/** A basic client for operations at the cloud level, default collection will be set */
|
||||
private static CloudSolrClient CLOUD_CLIENT;
|
||||
/** One client per node */
|
||||
private static ArrayList<HttpSolrClient> CLIENTS = new ArrayList<>(5);
|
||||
|
||||
/** Service to execute all parallel work
|
||||
* @see #NUM_THREADS
|
||||
*/
|
||||
private static ExecutorService EXEC_SERVICE;
|
||||
|
||||
/** num parallel threads in use by {@link #EXEC_SERVICE} */
|
||||
private static int NUM_THREADS;
|
||||
|
||||
/**
|
||||
* Used as an increment and multiplier when deciding how many docs should be in
|
||||
* the test index. 1 means every doc in the index is a candidate for updates, bigger numbers mean a
|
||||
* larger index is used (so tested docs are more likeely to be spread out in multiple segments)
|
||||
*/
|
||||
private static int DOC_ID_INCR;
|
||||
|
||||
@BeforeClass
|
||||
private static void createMiniSolrCloudCluster() throws Exception {
|
||||
// NOTE: numDocsToCheck uses atLeast, so nightly & multiplier are alreayd a factor in index size
|
||||
// no need to redundently factor them in here as well
|
||||
DOC_ID_INCR = TestUtil.nextInt(random(), 1, 7);
|
||||
|
||||
NUM_THREADS = atLeast(3);
|
||||
EXEC_SERVICE = ExecutorUtil.newMDCAwareFixedThreadPool
|
||||
(NUM_THREADS, new DefaultSolrThreadFactory(DEBUG_LABEL));
|
||||
|
||||
// at least 2, but don't go crazy on nightly/test.multiplier with "atLeast()"
|
||||
final int numShards = TEST_NIGHTLY ? 5 : 2;
|
||||
final int repFactor = 2;
|
||||
final int numNodes = numShards * repFactor;
|
||||
|
||||
final String configName = DEBUG_LABEL + "_config-set";
|
||||
final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");
|
||||
|
||||
configureCluster(numNodes).addConfig(configName, configDir).configure();
|
||||
|
||||
Map<String, String> collectionProperties = new HashMap<>();
|
||||
collectionProperties.put("config", "solrconfig-tlog.xml");
|
||||
collectionProperties.put("schema", "schema-minimal-atomic-stress.xml");
|
||||
|
||||
assertNotNull(cluster.createCollection(COLLECTION_NAME, numShards, repFactor,
|
||||
configName, null, null, collectionProperties));
|
||||
|
||||
CLOUD_CLIENT = cluster.getSolrClient();
|
||||
CLOUD_CLIENT.setDefaultCollection(COLLECTION_NAME);
|
||||
|
||||
waitForRecoveriesToFinish(CLOUD_CLIENT);
|
||||
|
||||
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
|
||||
CLIENTS.add(getHttpSolrClient(jetty.getBaseUrl() + "/" + COLLECTION_NAME + "/"));
|
||||
}
|
||||
|
||||
// sanity check no one broke the assumptions we make about our schema
|
||||
checkExpectedSchemaType( map("name","long",
|
||||
"class","solr.TrieLongField",
|
||||
"multiValued",Boolean.FALSE,
|
||||
"indexed",Boolean.FALSE,
|
||||
"stored",Boolean.FALSE,
|
||||
"docValues",Boolean.FALSE) );
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
private static void afterClass() throws Exception {
|
||||
TestInjection.reset();
|
||||
ExecutorUtil.shutdownAndAwaitTermination(EXEC_SERVICE);
|
||||
EXEC_SERVICE = null;
|
||||
CLOUD_CLIENT.close(); CLOUD_CLIENT = null;
|
||||
for (HttpSolrClient client : CLIENTS) {
|
||||
client.close();
|
||||
}
|
||||
CLIENTS = null;
|
||||
}
|
||||
|
||||
@Before
|
||||
private void clearCloudCollection() throws Exception {
|
||||
assertEquals(0, CLOUD_CLIENT.deleteByQuery("*:*").getStatus());
|
||||
assertEquals(0, CLOUD_CLIENT.optimize().getStatus());
|
||||
|
||||
TestInjection.reset();
|
||||
|
||||
final int injectionPercentage = (int)Math.ceil(atLeast(1) / 2);
|
||||
final String testInjection = usually() ? "false:0" : ("true:" + injectionPercentage);
|
||||
log.info("TestInjection: fail replica, update pause, tlog pauses: " + testInjection);
|
||||
TestInjection.failReplicaRequests = testInjection;
|
||||
TestInjection.updateLogReplayRandomPause = testInjection;
|
||||
TestInjection.updateRandomPause = testInjection;
|
||||
}
|
||||
|
||||
|
||||
public void test_dv() throws Exception {
|
||||
String field = "long_dv";
|
||||
checkExpectedSchemaField(map("name", field,
|
||||
"type","long",
|
||||
"stored",Boolean.FALSE,
|
||||
"indexed",Boolean.FALSE,
|
||||
"docValues",Boolean.TRUE));
|
||||
|
||||
checkField(field);
|
||||
}
|
||||
public void test_dv_stored() throws Exception {
|
||||
String field = "long_dv_stored";
|
||||
checkExpectedSchemaField(map("name", field,
|
||||
"type","long",
|
||||
"stored",Boolean.TRUE,
|
||||
"indexed",Boolean.FALSE,
|
||||
"docValues",Boolean.TRUE));
|
||||
|
||||
checkField(field);
|
||||
|
||||
}
|
||||
public void test_dv_stored_idx() throws Exception {
|
||||
String field = "long_dv_stored_idx";
|
||||
checkExpectedSchemaField(map("name", field,
|
||||
"type","long",
|
||||
"stored",Boolean.TRUE,
|
||||
"indexed",Boolean.TRUE,
|
||||
"docValues",Boolean.TRUE));
|
||||
|
||||
checkField(field);
|
||||
}
|
||||
public void test_dv_idx() throws Exception {
|
||||
String field = "long_dv_idx";
|
||||
checkExpectedSchemaField(map("name", field,
|
||||
"type","long",
|
||||
"stored",Boolean.FALSE,
|
||||
"indexed",Boolean.TRUE,
|
||||
"docValues",Boolean.TRUE));
|
||||
|
||||
checkField(field);
|
||||
}
|
||||
public void test_stored_idx() throws Exception {
|
||||
String field = "long_stored_idx";
|
||||
checkExpectedSchemaField(map("name", field,
|
||||
"type","long",
|
||||
"stored",Boolean.TRUE,
|
||||
"indexed",Boolean.TRUE,
|
||||
"docValues",Boolean.FALSE));
|
||||
|
||||
checkField(field);
|
||||
}
|
||||
|
||||
public void checkField(final String numericFieldName) throws Exception {
|
||||
|
||||
final CountDownLatch abortLatch = new CountDownLatch(1);
|
||||
|
||||
final int numDocsToCheck = atLeast(37);
|
||||
final int numDocsInIndex = (numDocsToCheck * DOC_ID_INCR);
|
||||
final AtomicLong[] expected = new AtomicLong[numDocsToCheck];
|
||||
|
||||
log.info("Testing " + numericFieldName + ": numDocsToCheck=" + numDocsToCheck + ", numDocsInIndex=" + numDocsInIndex + ", incr=" + DOC_ID_INCR);
|
||||
|
||||
// seed the index & keep track of what docs exist and with what values
|
||||
for (int id = 0; id < numDocsInIndex; id++) {
|
||||
// NOTE: the field we're mutating is a long, but we seed with a random int,
|
||||
// and we will inc/dec by random smaller ints, to ensure we never over/under flow
|
||||
final int initValue = random().nextInt();
|
||||
SolrInputDocument doc = doc(f("id",""+id), f(numericFieldName, initValue));
|
||||
UpdateResponse rsp = update(doc).process(CLOUD_CLIENT);
|
||||
assertEquals(doc.toString() + " => " + rsp.toString(), 0, rsp.getStatus());
|
||||
if (0 == id % DOC_ID_INCR) {
|
||||
expected[(int)(id / DOC_ID_INCR)] = new AtomicLong(initValue);
|
||||
}
|
||||
}
|
||||
assertNotNull("Sanity Check no off-by-one in expected init: ", expected[expected.length-1]);
|
||||
|
||||
|
||||
// sanity check index contents
|
||||
assertEquals(0, CLOUD_CLIENT.commit().getStatus());
|
||||
assertEquals(numDocsInIndex,
|
||||
CLOUD_CLIENT.query(params("q", "*:*")).getResults().getNumFound());
|
||||
|
||||
// spin up parallel workers to hammer updates
|
||||
List<Future<Worker>> results = new ArrayList<Future<Worker>>(NUM_THREADS);
|
||||
for (int workerId = 0; workerId < NUM_THREADS; workerId++) {
|
||||
Worker worker = new Worker(workerId, expected, abortLatch, new Random(random().nextLong()),
|
||||
numericFieldName);
|
||||
// ask for the Worker to be returned in the Future so we can inspect it
|
||||
results.add(EXEC_SERVICE.submit(worker, worker));
|
||||
}
|
||||
// check the results of all our workers
|
||||
for (Future<Worker> r : results) {
|
||||
try {
|
||||
Worker w = r.get();
|
||||
if (! w.getFinishedOk() ) {
|
||||
// quick and dirty sanity check if any workers didn't succeed, but didn't throw an exception either
|
||||
abortLatch.countDown();
|
||||
log.error("worker={} didn't finish ok, but didn't throw exception?", w.workerId);
|
||||
}
|
||||
} catch (ExecutionException ee) {
|
||||
Throwable rootCause = ee.getCause();
|
||||
if (rootCause instanceof Error) {
|
||||
// low level error, or test assertion failure - either way don't leave it wrapped
|
||||
log.error("Worker exec Error, throwing root cause", ee);
|
||||
throw (Error) rootCause;
|
||||
} else {
|
||||
log.error("Worker ExecutionException, re-throwing", ee);
|
||||
throw ee;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertEquals("Abort latch has changed, why didn't we get an exception from a worker?",
|
||||
1L, abortLatch.getCount());
|
||||
|
||||
TestInjection.reset();
|
||||
waitForRecoveriesToFinish(CLOUD_CLIENT);
|
||||
|
||||
// check all the final index contents match our expectations
|
||||
int incorrectDocs = 0;
|
||||
for (int id = 0; id < numDocsInIndex; id += DOC_ID_INCR) {
|
||||
assert 0 == id % DOC_ID_INCR : "WTF? " + id;
|
||||
|
||||
final long expect = expected[(int)(id / DOC_ID_INCR)].longValue();
|
||||
|
||||
final String docId = "" + id;
|
||||
|
||||
// sometimes include an fq on the expected value to ensure the updated values
|
||||
// are "visible" for searching
|
||||
final SolrParams p = (0 != TestUtil.nextInt(random(), 0,15))
|
||||
? params() : params("fq",numericFieldName + ":" + expect);
|
||||
SolrDocument doc = getRandClient(random()).getById(docId, p);
|
||||
|
||||
final boolean foundWithFilter = (null != doc);
|
||||
if (! foundWithFilter) {
|
||||
// try again w/o fq to see what it does have
|
||||
doc = getRandClient(random()).getById(docId);
|
||||
}
|
||||
|
||||
Long actual = (null == doc) ? null : (Long) doc.getFirstValue(numericFieldName);
|
||||
if (actual == null || expect != actual.longValue() || ! foundWithFilter) {
|
||||
log.error("docId={}, foundWithFilter={}, expected={}, actual={}",
|
||||
docId, foundWithFilter, expect, actual);
|
||||
incorrectDocs++;
|
||||
}
|
||||
|
||||
}
|
||||
assertEquals("Some docs had errors -- check logs", 0, incorrectDocs);
|
||||
}
|
||||
|
||||
|
||||
public static final class Worker implements Runnable {
|
||||
public final int workerId;
|
||||
final AtomicLong[] expected;
|
||||
final CountDownLatch abortLatch;
|
||||
final Random rand;
|
||||
final String updateField;
|
||||
final int numDocsToUpdate;
|
||||
boolean ok = false; // set to true only on successful completion
|
||||
public Worker(int workerId, AtomicLong[] expected, CountDownLatch abortLatch, Random rand,
|
||||
String updateField) {
|
||||
this.workerId = workerId;
|
||||
this.expected = expected;
|
||||
this.abortLatch = abortLatch;
|
||||
this.rand = rand;
|
||||
this.updateField = updateField;
|
||||
this.numDocsToUpdate = atLeast(rand, 25);
|
||||
}
|
||||
public boolean getFinishedOk() {
|
||||
return ok;
|
||||
}
|
||||
private void doRandomAtomicUpdate(int docId) throws Exception {
|
||||
assert 0 == docId % DOC_ID_INCR : "WTF? " + docId;
|
||||
|
||||
final int delta = TestUtil.nextInt(rand, -1000, 1000);
|
||||
log.info("worker={}, docId={}, delta={}", workerId, docId, delta);
|
||||
|
||||
SolrClient client = getRandClient(rand);
|
||||
SolrInputDocument doc = doc(f("id",""+docId),
|
||||
f(updateField,Collections.singletonMap("inc",delta)));
|
||||
UpdateResponse rsp = update(doc).process(client);
|
||||
assertEquals(doc + " => " + rsp, 0, rsp.getStatus());
|
||||
|
||||
AtomicLong counter = expected[(int)(docId / DOC_ID_INCR)];
|
||||
assertNotNull("null counter for " + docId + "/" + DOC_ID_INCR, counter);
|
||||
counter.getAndAdd(delta);
|
||||
|
||||
}
|
||||
|
||||
public void run() {
|
||||
final String origThreadName = Thread.currentThread().getName();
|
||||
try {
|
||||
Thread.currentThread().setName(origThreadName + "-w" + workerId);
|
||||
final int maxDocMultiplier = expected.length-1;
|
||||
for (int docIter = 0; docIter < numDocsToUpdate; docIter++) {
|
||||
|
||||
final int docId = DOC_ID_INCR * TestUtil.nextInt(rand, 0, maxDocMultiplier);
|
||||
|
||||
// tweak our thread name to keep track of what we're up to
|
||||
Thread.currentThread().setName(origThreadName + "-w" + workerId + "-d" + docId);
|
||||
|
||||
// no matter how random the doc selection may be per thread, ensure
|
||||
// every doc that is selected by *a* thread gets at least a couple rapid fire updates
|
||||
final int itersPerDoc = atLeast(rand, 2);
|
||||
|
||||
for (int updateIter = 0; updateIter < itersPerDoc; updateIter++) {
|
||||
if (0 == abortLatch.getCount()) {
|
||||
return;
|
||||
}
|
||||
doRandomAtomicUpdate(docId);
|
||||
}
|
||||
if (rand.nextBoolean()) { Thread.yield(); }
|
||||
}
|
||||
|
||||
} catch (Error err) {
|
||||
log.error(Thread.currentThread().getName(), err);
|
||||
abortLatch.countDown();
|
||||
throw err;
|
||||
} catch (Exception ex) {
|
||||
log.error(Thread.currentThread().getName(), ex);
|
||||
abortLatch.countDown();
|
||||
throw new RuntimeException(ex.getMessage(), ex);
|
||||
} finally {
|
||||
Thread.currentThread().setName(origThreadName);
|
||||
}
|
||||
ok = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static UpdateRequest update(SolrInputDocument... docs) {
|
||||
return update(null, docs);
|
||||
}
|
||||
public static UpdateRequest update(SolrParams params, SolrInputDocument... docs) {
|
||||
UpdateRequest r = new UpdateRequest();
|
||||
if (null != params) {
|
||||
r.setParams(new ModifiableSolrParams(params));
|
||||
}
|
||||
r.add(Arrays.asList(docs));
|
||||
return r;
|
||||
}
|
||||
|
||||
public static SolrInputDocument doc(SolrInputField... fields) {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
for (SolrInputField f : fields) {
|
||||
doc.put(f.getName(), f);
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
||||
public static SolrInputField f(String fieldName, Object... values) {
|
||||
SolrInputField f = new SolrInputField(fieldName);
|
||||
f.setValue(values, 1.0F);
|
||||
// TODO: soooooooooo stupid (but currently neccessary because atomic updates freak out
|
||||
// if the Map with the "inc" operation is inside of a collection - even if it's the only "value") ...
|
||||
if (1 == values.length) {
|
||||
f.setValue(values[0], 1.0F);
|
||||
} else {
|
||||
f.setValue(values, 1.0F);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
public static SolrClient getRandClient(Random rand) {
|
||||
int numClients = CLIENTS.size();
|
||||
int idx = TestUtil.nextInt(rand, 0, numClients);
|
||||
return (idx == numClients) ? CLOUD_CLIENT : CLIENTS.get(idx);
|
||||
}
|
||||
|
||||
public static void waitForRecoveriesToFinish(CloudSolrClient client) throws Exception {
|
||||
assert null != client.getDefaultCollection();
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(client.getDefaultCollection(),
|
||||
client.getZkStateReader(),
|
||||
true, true, 330);
|
||||
}
|
||||
|
||||
/**
|
||||
* Use the schema API to verify that the specified expected Field exists with those exact attributes.
|
||||
* @see #CLOUD_CLIENT
|
||||
*/
|
||||
public static void checkExpectedSchemaField(Map<String,Object> expected) throws Exception {
|
||||
String fieldName = (String) expected.get("name");
|
||||
assertNotNull("expected contains no name: " + expected, fieldName);
|
||||
FieldResponse rsp = new Field(fieldName).process(CLOUD_CLIENT);
|
||||
assertNotNull("Field Null Response: " + fieldName, rsp);
|
||||
assertEquals("Field Status: " + fieldName + " => " + rsp.toString(), 0, rsp.getStatus());
|
||||
assertEquals("Field: " + fieldName, expected, rsp.getField());
|
||||
}
|
||||
|
||||
/**
|
||||
* Use the schema API to verify that the specified expected FieldType exists with those exact attributes.
|
||||
* @see #CLOUD_CLIENT
|
||||
*/
|
||||
public static void checkExpectedSchemaType(Map<String,Object> expected) throws Exception {
|
||||
|
||||
String typeName = (String) expected.get("name");
|
||||
assertNotNull("expected contains no type: " + expected, typeName);
|
||||
FieldTypeResponse rsp = new FieldType(typeName).process(CLOUD_CLIENT);
|
||||
assertNotNull("FieldType Null Response: " + typeName, rsp);
|
||||
assertEquals("FieldType Status: " + typeName + " => " + rsp.toString(), 0, rsp.getStatus());
|
||||
assertEquals("FieldType: " + typeName, expected, rsp.getFieldType().getAttributes());
|
||||
|
||||
}
|
||||
}
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.index;
|
||||
package org.apache.solr.index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -23,6 +23,10 @@ import java.util.List;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
|
@ -25,12 +25,12 @@ import org.apache.lucene.index.DocValues;
|
|||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.uninverting.DocTermOrds;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.params.FacetParams;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.uninverting.DocTermOrds;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.junit.After;
|
||||
import org.junit.BeforeClass;
|
||||
|
|
|
@ -42,13 +42,12 @@ import org.apache.lucene.search.IndexSearcher;
|
|||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortField.Type;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TopFieldCollector;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
@ -56,6 +55,7 @@ import org.apache.lucene.util.TestUtil;
|
|||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.uninverting.UninvertingReader;
|
||||
import org.junit.BeforeClass;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
|
|
@ -14,13 +14,13 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
|
@ -31,28 +31,28 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.LegacyIntField;
|
||||
import org.apache.lucene.document.LegacyLongField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
|
||||
// TODO:
|
||||
// - test w/ del docs
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -31,11 +31,11 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DoublePoint;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FloatPoint;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.LegacyFloatField;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
import org.apache.lucene.document.LegacyLongField;
|
||||
|
@ -44,16 +44,15 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LogDocMergePolicy;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LogDocMergePolicy;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -65,6 +64,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
|
@ -14,26 +14,26 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.LegacyFloatField;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
import org.apache.lucene.document.LegacyLongField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.FieldCacheSanityChecker.Insanity;
|
||||
import org.apache.lucene.uninverting.FieldCacheSanityChecker.InsanityType;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.uninverting.FieldCacheSanityChecker.Insanity;
|
||||
import org.apache.solr.uninverting.FieldCacheSanityChecker.InsanityType;
|
||||
|
||||
public class TestFieldCacheSanityChecker extends LuceneTestCase {
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -51,7 +51,7 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -50,12 +50,12 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TopFieldDocs;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BitSetIterator;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
/** random sorting tests with uninversion */
|
||||
public class TestFieldCacheSortRandom extends LuceneTestCase {
|
|
@ -14,9 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -31,26 +29,28 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
|
||||
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
||||
|
||||
public class TestFieldCacheVsDocValues extends LuceneTestCase {
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -23,17 +23,18 @@ import java.util.List;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
|
@ -183,7 +184,7 @@ public class TestFieldCacheWithThreads extends LuceneTestCase {
|
|||
|
||||
final LeafReader sr = getOnlyLeafReader(r);
|
||||
|
||||
final long END_TIME = System.currentTimeMillis() + (TEST_NIGHTLY ? 30 : 1);
|
||||
final long END_TIME = System.nanoTime() + TimeUnit.NANOSECONDS.convert((TEST_NIGHTLY ? 30 : 1), TimeUnit.SECONDS);
|
||||
|
||||
final int NUM_THREADS = TestUtil.nextInt(random(), 1, 10);
|
||||
Thread[] threads = new Thread[NUM_THREADS];
|
||||
|
@ -201,7 +202,7 @@ public class TestFieldCacheWithThreads extends LuceneTestCase {
|
|||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
while(System.currentTimeMillis() < END_TIME) {
|
||||
while(System.nanoTime() < END_TIME) {
|
||||
final SortedDocValues source;
|
||||
source = stringDVDirect;
|
||||
|
|
@ -14,12 +14,11 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -27,9 +26,9 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.LegacyDoubleField;
|
||||
import org.apache.lucene.document.LegacyFloatField;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
import org.apache.lucene.document.LegacyLongField;
|
||||
|
@ -37,14 +36,13 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -54,6 +52,7 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -33,9 +33,9 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
|
@ -14,7 +14,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
@ -33,9 +33,9 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
|
@ -14,42 +14,42 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.uninverting;
|
||||
package org.apache.solr.uninverting;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Collections;
|
||||
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.LegacyIntField;
|
||||
import org.apache.lucene.document.LegacyLongField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LegacyNumericUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
|
||||
public class TestUninvertingReader extends LuceneTestCase {
|
||||
|
|
@ -22,25 +22,24 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.SolrInputField;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.response.ResultContext;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.DocList;
|
||||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.apache.solr.schema.CopyField;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.response.SolrQueryResponse;
|
||||
|
||||
import org.apache.solr.search.DocList;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
|
Loading…
Reference in New Issue