mirror of https://github.com/apache/lucene.git
LUCENE-5666: current state to branch
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/lucene5666@1593790 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7ddf827761
commit
9df7b5c7cf
|
@ -31,6 +31,7 @@ import org.junit.AfterClass;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
/** Base class for all Benchmark unit tests. */
|
||||
@SuppressSysoutChecks(bugUrl = "very noisy")
|
||||
public abstract class BenchmarkTestCase extends LuceneTestCase {
|
||||
private static File WORKDIR;
|
||||
|
||||
|
|
|
@ -52,17 +52,12 @@ import org.apache.lucene.index.LogMergePolicy;
|
|||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
||||
|
||||
/**
|
||||
* Test very simply that perf tasks - simple algorithms - are doing what they should.
|
||||
|
@ -328,7 +323,7 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
"content.source.forever=true",
|
||||
"directory=RAMDirectory",
|
||||
"doc.reuse.fields=false",
|
||||
"doc.stored=false",
|
||||
"doc.stored=true",
|
||||
"doc.tokenized=false",
|
||||
"doc.index.props=true",
|
||||
"# ----- alg ",
|
||||
|
@ -344,11 +339,11 @@ public class TestPerfTasksLogic extends BenchmarkTestCase {
|
|||
Benchmark benchmark = execBenchmark(algLines);
|
||||
|
||||
DirectoryReader r = DirectoryReader.open(benchmark.getRunData().getDirectory());
|
||||
SortedDocValues idx = FieldCache.DEFAULT.getTermsIndex(SlowCompositeReaderWrapper.wrap(r), "country");
|
||||
|
||||
final int maxDoc = r.maxDoc();
|
||||
assertEquals(1000, maxDoc);
|
||||
for(int i=0;i<1000;i++) {
|
||||
assertTrue("doc " + i + " has null country", idx.getOrd(i) != -1);
|
||||
assertNotNull("doc " + i + " has null country", r.document(i).getField("country"));
|
||||
}
|
||||
r.close();
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
|||
import conf.ConfLoader;
|
||||
|
||||
/** Test very simply that perf tasks are parses as expected. */
|
||||
@SuppressSysoutChecks(bugUrl = "very noisy")
|
||||
public class TestPerfTasksParse extends LuceneTestCase {
|
||||
|
||||
static final String NEW_LINE = System.getProperty("line.separator");
|
||||
|
|
|
@ -18,14 +18,13 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
|
||||
/**
|
||||
* Syntactic sugar for encoding doubles as NumericDocValues
|
||||
* via {@link Double#doubleToRawLongBits(double)}.
|
||||
* <p>
|
||||
* Per-document double values can be retrieved via
|
||||
* {@link FieldCache#getDoubles(AtomicReader, String, boolean)}.
|
||||
* {@link AtomicReader#getNumericDocValues(String)}.
|
||||
* <p>
|
||||
* <b>NOTE</b>: In most all cases this will be rather inefficient,
|
||||
* requiring eight bytes per document. Consider encoding double
|
||||
|
|
|
@ -18,8 +18,8 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeFilter; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeQuery; // javadocs
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
@ -57,7 +57,7 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* NumericRangeFilter}. To sort according to a
|
||||
* <code>DoubleField</code>, use the normal numeric sort types, eg
|
||||
* {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>DoubleField</code>
|
||||
* values can also be loaded directly from {@link FieldCache}.</p>
|
||||
* values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
|
||||
*
|
||||
* <p>You may add the same field name as an <code>DoubleField</code> to
|
||||
* the same document more than once. Range querying and
|
||||
|
|
|
@ -18,14 +18,13 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReader; // javadocs
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
|
||||
/**
|
||||
* Syntactic sugar for encoding floats as NumericDocValues
|
||||
* via {@link Float#floatToRawIntBits(float)}.
|
||||
* <p>
|
||||
* Per-document floating point values can be retrieved via
|
||||
* {@link FieldCache#getFloats(AtomicReader, String, boolean)}.
|
||||
* {@link AtomicReader#getNumericDocValues(String)}.
|
||||
* <p>
|
||||
* <b>NOTE</b>: In most all cases this will be rather inefficient,
|
||||
* requiring four bytes per document. Consider encoding floating
|
||||
|
|
|
@ -18,8 +18,8 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeFilter; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeQuery; // javadocs
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
@ -57,7 +57,7 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* NumericRangeFilter}. To sort according to a
|
||||
* <code>FloatField</code>, use the normal numeric sort types, eg
|
||||
* {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>FloatField</code>
|
||||
* values can also be loaded directly from {@link FieldCache}.</p>
|
||||
* values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
|
||||
*
|
||||
* <p>You may add the same field name as an <code>FloatField</code> to
|
||||
* the same document more than once. Range querying and
|
||||
|
|
|
@ -18,8 +18,8 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeFilter; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeQuery; // javadocs
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
@ -57,7 +57,7 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* NumericRangeFilter}. To sort according to a
|
||||
* <code>IntField</code>, use the normal numeric sort types, eg
|
||||
* {@link org.apache.lucene.search.SortField.Type#INT}. <code>IntField</code>
|
||||
* values can also be loaded directly from {@link FieldCache}.</p>
|
||||
* values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
|
||||
*
|
||||
* <p>You may add the same field name as an <code>IntField</code> to
|
||||
* the same document more than once. Range querying and
|
||||
|
|
|
@ -18,8 +18,8 @@ package org.apache.lucene.document;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.FieldCache; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeFilter; // javadocs
|
||||
import org.apache.lucene.search.NumericRangeQuery; // javadocs
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
@ -67,7 +67,7 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* NumericRangeFilter}. To sort according to a
|
||||
* <code>LongField</code>, use the normal numeric sort types, eg
|
||||
* {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LongField</code>
|
||||
* values can also be loaded directly from {@link FieldCache}.</p>
|
||||
* values can also be loaded directly from {@link AtomicReader#getNumericDocValues}.</p>
|
||||
*
|
||||
* <p>You may add the same field name as an <code>LongField</code> to
|
||||
* the same document more than once. Range querying and
|
||||
|
|
|
@ -17,6 +17,8 @@ package org.apache.lucene.index;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -159,4 +161,72 @@ public final class DocValues {
|
|||
}
|
||||
};
|
||||
}
|
||||
|
||||
// some helpers, for transition from fieldcache apis.
|
||||
// as opposed to the AtomicReader apis (which must be strict for consistency), these are lenient
|
||||
|
||||
/**
|
||||
* Returns NumericDocValues for the reader, or {@link #EMPTY_NUMERIC} if it has none.
|
||||
*/
|
||||
public static NumericDocValues getNumeric(AtomicReader in, String field) throws IOException {
|
||||
NumericDocValues dv = in.getNumericDocValues(field);
|
||||
if (dv == null) {
|
||||
return EMPTY_NUMERIC;
|
||||
} else {
|
||||
return dv;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns BinaryDocValues for the reader, or {@link #EMPTY_BINARY} if it has none.
|
||||
*/
|
||||
public static BinaryDocValues getBinary(AtomicReader in, String field) throws IOException {
|
||||
BinaryDocValues dv = in.getBinaryDocValues(field);
|
||||
if (dv == null) {
|
||||
dv = in.getSortedDocValues(field);
|
||||
if (dv == null) {
|
||||
return EMPTY_BINARY;
|
||||
}
|
||||
}
|
||||
return dv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns SortedDocValues for the reader, or {@link #EMPTY_SORTED} if it has none.
|
||||
*/
|
||||
public static SortedDocValues getSorted(AtomicReader in, String field) throws IOException {
|
||||
SortedDocValues dv = in.getSortedDocValues(field);
|
||||
if (dv == null) {
|
||||
return EMPTY_SORTED;
|
||||
} else {
|
||||
return dv;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns SortedSetDocValues for the reader, or {@link #EMPTY_SORTED_SET} if it has none.
|
||||
*/
|
||||
public static SortedSetDocValues getSortedSet(AtomicReader in, String field) throws IOException {
|
||||
SortedSetDocValues dv = in.getSortedSetDocValues(field);
|
||||
if (dv == null) {
|
||||
SortedDocValues sorted = in.getSortedDocValues(field);
|
||||
if (sorted == null) {
|
||||
return EMPTY_SORTED_SET;
|
||||
}
|
||||
return singleton(sorted);
|
||||
}
|
||||
return dv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns Bits for the reader, or {@link Bits} matching nothing if it has none.
|
||||
*/
|
||||
public static Bits getDocsWithField(AtomicReader in, String field) throws IOException {
|
||||
Bits dv = in.getDocsWithField(field);
|
||||
if (dv == null) {
|
||||
return new Bits.MatchNoBits(in.maxDoc());
|
||||
} else {
|
||||
return dv;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.io.IOException;
|
|||
import java.util.Iterator;
|
||||
|
||||
import org.apache.lucene.search.CachingWrapperFilter;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -38,8 +37,8 @@ import org.apache.lucene.util.BytesRef;
|
|||
* to override {@link #numDocs()} as well and vice-versa.
|
||||
* <p><b>NOTE</b>: If this {@link FilterAtomicReader} does not change the
|
||||
* content the contained reader, you could consider overriding
|
||||
* {@link #getCoreCacheKey()} so that {@link FieldCache} and
|
||||
* {@link CachingWrapperFilter} share the same entries for this atomic reader
|
||||
* {@link #getCoreCacheKey()} so that
|
||||
* {@link CachingWrapperFilter} shares the same entries for this atomic reader
|
||||
* and the wrapped one. {@link #getCombinedCoreAndDeletesKey()} could be
|
||||
* overridden as well if the {@link #getLiveDocs() live docs} are not changed
|
||||
* either.
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.lucene.codecs.FieldInfosFormat;
|
|||
import org.apache.lucene.codecs.StoredFieldsReader;
|
||||
import org.apache.lucene.codecs.TermVectorsReader;
|
||||
import org.apache.lucene.index.FieldInfo.DocValuesType;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.CachingWrapperFilter;
|
||||
import org.apache.lucene.store.CompoundFileDirectory;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
|
@ -525,7 +525,7 @@ public final class SegmentReader extends AtomicReader {
|
|||
* sharing the same core are closed. At this point it
|
||||
* is safe for apps to evict this reader from any caches
|
||||
* keyed on {@link #getCoreCacheKey}. This is the same
|
||||
* interface that {@link FieldCache} uses, internally,
|
||||
* interface that {@link CachingWrapperFilter} uses, internally,
|
||||
* to evict entries.</p>
|
||||
*
|
||||
* @lucene.experimental
|
||||
|
|
|
@ -18,13 +18,15 @@ package org.apache.lucene.search;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* A range filter built on top of a cached multi-valued term field (in {@link FieldCache}).
|
||||
* A range filter built on top of a cached multi-valued term field (from {@link AtomicReader#getSortedSetDocValues}).
|
||||
*
|
||||
* <p>Like {@link FieldCacheRangeFilter}, this is just a specialized range query versus
|
||||
* using a TermRangeQuery with {@link DocTermOrdsRewriteMethod}: it will only do
|
||||
|
@ -51,7 +53,7 @@ public abstract class DocTermOrdsRangeFilter extends Filter {
|
|||
public abstract DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a BytesRef range filter using {@link FieldCache#getTermsIndex}. This works with all
|
||||
* Creates a BytesRef range filter using {@link AtomicReader#getSortedSetDocValues}. This works with all
|
||||
* fields containing zero or one term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
|
@ -59,7 +61,7 @@ public abstract class DocTermOrdsRangeFilter extends Filter {
|
|||
return new DocTermOrdsRangeFilter(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final SortedSetDocValues docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
|
||||
final SortedSetDocValues docTermOrds = DocValues.getSortedSet(context.reader(), field);
|
||||
final long lowerPoint = lowerVal == null ? -1 : docTermOrds.lookupTerm(lowerVal);
|
||||
final long upperPoint = upperVal == null ? -1 : docTermOrds.lookupTerm(upperVal);
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -83,7 +84,7 @@ public final class DocTermOrdsRewriteMethod extends MultiTermQuery.RewriteMethod
|
|||
*/
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
final SortedSetDocValues docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), query.field);
|
||||
final SortedSetDocValues docTermOrds = DocValues.getSortedSet(context.reader(), query.field);
|
||||
// Cannot use FixedBitSet because we require long index (ord):
|
||||
final LongBitSet termSet = new LongBitSet(docTermOrds.getValueCount());
|
||||
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
|
||||
|
|
|
@ -24,13 +24,16 @@ import org.apache.lucene.document.IntField; // for javadocs
|
|||
import org.apache.lucene.document.LongField; // for javadocs
|
||||
import org.apache.lucene.index.AtomicReader; // for javadocs
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
||||
/**
|
||||
* A range filter built on top of a cached single term field (in {@link FieldCache}).
|
||||
* A range filter built on top of an uninverted single term field
|
||||
* (from {@link AtomicReader#getNumericDocValues(String)}).
|
||||
*
|
||||
* <p>{@code FieldCacheRangeFilter} builds a single cache for the field the first time it is used.
|
||||
* Each subsequent {@code FieldCacheRangeFilter} on the same field then reuses this cache,
|
||||
|
@ -47,9 +50,10 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* LongField} or {@link DoubleField}. But
|
||||
* it has the problem that it only works with exact one value/document (see below).
|
||||
*
|
||||
* <p>As with all {@link FieldCache} based functionality, {@code FieldCacheRangeFilter} is only valid for
|
||||
* <p>As with all {@link AtomicReader#getNumericDocValues} based functionality,
|
||||
* {@code FieldCacheRangeFilter} is only valid for
|
||||
* fields which exact one term for each document (except for {@link #newStringRange}
|
||||
* where 0 terms are also allowed). Due to a restriction of {@link FieldCache}, for numeric ranges
|
||||
* where 0 terms are also allowed). Due to historical reasons, for numeric ranges
|
||||
* all terms that do not have a numeric value, 0 is assumed.
|
||||
*
|
||||
* <p>Thus it works on dates, prices and other single value fields but will not work on
|
||||
|
@ -57,20 +61,19 @@ import org.apache.lucene.util.NumericUtils;
|
|||
* there is only a single term.
|
||||
*
|
||||
* <p>This class does not have an constructor, use one of the static factory methods available,
|
||||
* that create a correct instance for different data types supported by {@link FieldCache}.
|
||||
* that create a correct instance for different data types.
|
||||
*/
|
||||
|
||||
// nocommit: rename this class
|
||||
// TODO: use docsWithField to handle empty properly
|
||||
public abstract class FieldCacheRangeFilter<T> extends Filter {
|
||||
final String field;
|
||||
final FieldCache.Parser parser;
|
||||
final T lowerVal;
|
||||
final T upperVal;
|
||||
final boolean includeLower;
|
||||
final boolean includeUpper;
|
||||
|
||||
private FieldCacheRangeFilter(String field, FieldCache.Parser parser, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
|
||||
private FieldCacheRangeFilter(String field, T lowerVal, T upperVal, boolean includeLower, boolean includeUpper) {
|
||||
this.field = field;
|
||||
this.parser = parser;
|
||||
this.lowerVal = lowerVal;
|
||||
this.upperVal = upperVal;
|
||||
this.includeLower = includeLower;
|
||||
|
@ -82,15 +85,15 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
public abstract DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a string range filter using {@link FieldCache#getTermsIndex}. This works with all
|
||||
* Creates a string range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
|
||||
* fields containing zero or one term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<String> newStringRange(String field, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<String>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<String>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||
final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
|
||||
final int lowerPoint = lowerVal == null ? -1 : fcsi.lookupTerm(new BytesRef(lowerVal));
|
||||
final int upperPoint = upperVal == null ? -1 : fcsi.lookupTerm(new BytesRef(upperVal));
|
||||
|
||||
|
@ -138,16 +141,16 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a BytesRef range filter using {@link FieldCache#getTermsIndex}. This works with all
|
||||
* Creates a BytesRef range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
|
||||
* fields containing zero or one term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
// TODO: bogus that newStringRange doesnt share this code... generics hell
|
||||
public static FieldCacheRangeFilter<BytesRef> newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<BytesRef>(field, null, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<BytesRef>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||
final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
|
||||
final int lowerPoint = lowerVal == null ? -1 : fcsi.lookupTerm(lowerVal);
|
||||
final int upperPoint = upperVal == null ? -1 : fcsi.lookupTerm(upperVal);
|
||||
|
||||
|
@ -195,21 +198,12 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,boolean)}. This works with all
|
||||
* Creates a numeric range filter using {@link AtomicReader#getSortedDocValues(String)}. This works with all
|
||||
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Integer> newIntRange(String field, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return newIntRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getInts(AtomicReader,String,FieldCache.IntParser,boolean)}. This works with all
|
||||
* int fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Integer> newIntRange(String field, FieldCache.IntParser parser, Integer lowerVal, Integer upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<Integer>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<Integer>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final int inclusiveLowerPoint, inclusiveUpperPoint;
|
||||
|
@ -233,11 +227,11 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||
return null;
|
||||
|
||||
final FieldCache.Ints values = FieldCache.DEFAULT.getInts(context.reader(), field, (FieldCache.IntParser) parser, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
|
||||
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||
@Override
|
||||
protected boolean matchDoc(int doc) {
|
||||
final int value = values.get(doc);
|
||||
final int value = (int) values.get(doc);
|
||||
return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
|
||||
}
|
||||
};
|
||||
|
@ -246,21 +240,12 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,boolean)}. This works with all
|
||||
* Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
|
||||
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Long> newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return newLongRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getLongs(AtomicReader,String,FieldCache.LongParser,boolean)}. This works with all
|
||||
* long fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Long> newLongRange(String field, FieldCache.LongParser parser, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<Long>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<Long>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final long inclusiveLowerPoint, inclusiveUpperPoint;
|
||||
|
@ -284,7 +269,7 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||
return null;
|
||||
|
||||
final FieldCache.Longs values = FieldCache.DEFAULT.getLongs(context.reader(), field, (FieldCache.LongParser) parser, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
|
||||
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||
@Override
|
||||
protected boolean matchDoc(int doc) {
|
||||
|
@ -297,21 +282,12 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,boolean)}. This works with all
|
||||
* Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
|
||||
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Float> newFloatRange(String field, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return newFloatRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getFloats(AtomicReader,String,FieldCache.FloatParser,boolean)}. This works with all
|
||||
* float fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Float> newFloatRange(String field, FieldCache.FloatParser parser, Float lowerVal, Float upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<Float>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<Float>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
// we transform the floating point numbers to sortable integers
|
||||
|
@ -339,11 +315,11 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||
return null;
|
||||
|
||||
final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), field, (FieldCache.FloatParser) parser, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
|
||||
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||
@Override
|
||||
protected boolean matchDoc(int doc) {
|
||||
final float value = values.get(doc);
|
||||
final float value = Float.intBitsToFloat((int)values.get(doc));
|
||||
return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
|
||||
}
|
||||
};
|
||||
|
@ -352,21 +328,12 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,boolean)}. This works with all
|
||||
* Creates a numeric range filter using {@link AtomicReader#getNumericDocValues(String)}. This works with all
|
||||
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Double> newDoubleRange(String field, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return newDoubleRange(field, null, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a numeric range filter using {@link FieldCache#getDoubles(AtomicReader,String,FieldCache.DoubleParser,boolean)}. This works with all
|
||||
* double fields containing exactly one numeric term in the field. The range can be half-open by setting one
|
||||
* of the values to <code>null</code>.
|
||||
*/
|
||||
public static FieldCacheRangeFilter<Double> newDoubleRange(String field, FieldCache.DoubleParser parser, Double lowerVal, Double upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new FieldCacheRangeFilter<Double>(field, parser, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
return new FieldCacheRangeFilter<Double>(field, lowerVal, upperVal, includeLower, includeUpper) {
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
// we transform the floating point numbers to sortable integers
|
||||
|
@ -394,12 +361,12 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
if (inclusiveLowerPoint > inclusiveUpperPoint)
|
||||
return null;
|
||||
|
||||
final FieldCache.Doubles values = FieldCache.DEFAULT.getDoubles(context.reader(), field, (FieldCache.DoubleParser) parser, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), field);
|
||||
// ignore deleted docs if range doesn't contain 0
|
||||
return new FieldCacheDocIdSet(context.reader().maxDoc(), acceptDocs) {
|
||||
@Override
|
||||
protected boolean matchDoc(int doc) {
|
||||
final double value = values.get(doc);
|
||||
final double value = Double.longBitsToDouble(values.get(doc));
|
||||
return value >= inclusiveLowerPoint && value <= inclusiveUpperPoint;
|
||||
}
|
||||
};
|
||||
|
@ -431,7 +398,6 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
) { return false; }
|
||||
if (this.lowerVal != null ? !this.lowerVal.equals(other.lowerVal) : other.lowerVal != null) return false;
|
||||
if (this.upperVal != null ? !this.upperVal.equals(other.upperVal) : other.upperVal != null) return false;
|
||||
if (this.parser != null ? !this.parser.equals(other.parser) : other.parser != null) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -441,7 +407,6 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204;
|
||||
h = (h << 1) | (h >>> 31); // rotate to distinguish lower from upper
|
||||
h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163;
|
||||
h ^= (parser != null) ? parser.hashCode() : -1572457324;
|
||||
h ^= (includeLower ? 1549299360 : -365038026) ^ (includeUpper ? 1721088258 : 1948649653);
|
||||
return h;
|
||||
}
|
||||
|
@ -460,7 +425,4 @@ public abstract class FieldCacheRangeFilter<T> extends Filter {
|
|||
|
||||
/** Returns the upper value of this range filter */
|
||||
public T getUpperVal() { return upperVal; }
|
||||
|
||||
/** Returns the current numeric parser ({@code null} for {@code T} is {@code String}} */
|
||||
public FieldCache.Parser getParser() { return parser; }
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -28,11 +29,12 @@ import org.apache.lucene.util.Bits;
|
|||
import org.apache.lucene.util.LongBitSet;
|
||||
|
||||
/**
|
||||
* Rewrites MultiTermQueries into a filter, using the FieldCache for term enumeration.
|
||||
* Rewrites MultiTermQueries into a filter, using DocValues for term enumeration.
|
||||
* <p>
|
||||
* This can be used to perform these queries against an unindexed docvalues field.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
// nocommit: rename this class
|
||||
public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod {
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +85,7 @@ public final class FieldCacheRewriteMethod extends MultiTermQuery.RewriteMethod
|
|||
*/
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, final Bits acceptDocs) throws IOException {
|
||||
final SortedDocValues fcsi = FieldCache.DEFAULT.getTermsIndex(context.reader(), query.field);
|
||||
final SortedDocValues fcsi = DocValues.getSorted(context.reader(), query.field);
|
||||
// Cannot use FixedBitSet because we require long index (ord):
|
||||
final LongBitSet termSet = new LongBitSet(fcsi.getValueCount());
|
||||
TermsEnum termsEnum = query.getTermsEnum(new Terms() {
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocsEnum; // javadoc @link
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
|
@ -41,17 +42,6 @@ import org.apache.lucene.util.FixedBitSet;
|
|||
* also have different performance characteristics, as
|
||||
* described below.
|
||||
*
|
||||
* <p/>
|
||||
*
|
||||
* The first invocation of this filter on a given field will
|
||||
* be slower, since a {@link SortedDocValues} must be
|
||||
* created. Subsequent invocations using the same field
|
||||
* will re-use this cache. However, as with all
|
||||
* functionality based on {@link FieldCache}, persistent RAM
|
||||
* is consumed to hold the cache, and is not freed until the
|
||||
* {@link IndexReader} is closed. In contrast, TermsFilter
|
||||
* has no persistent RAM consumption.
|
||||
*
|
||||
*
|
||||
* <p/>
|
||||
*
|
||||
|
@ -113,13 +103,9 @@ public class FieldCacheTermsFilter extends Filter {
|
|||
this.terms[i] = new BytesRef(terms[i]);
|
||||
}
|
||||
|
||||
public FieldCache getFieldCache() {
|
||||
return FieldCache.DEFAULT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final SortedDocValues fcsi = getFieldCache().getTermsIndex(context.reader(), field);
|
||||
final SortedDocValues fcsi = DocValues.getSorted(context.reader(), field);
|
||||
final FixedBitSet bits = new FixedBitSet(fcsi.getValueCount());
|
||||
for (int i=0;i<terms.length;i++) {
|
||||
int ord = fcsi.lookupTerm(terms[i]);
|
||||
|
|
|
@ -19,13 +19,12 @@ package org.apache.lucene.search;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.FieldCache.DoubleParser;
|
||||
import org.apache.lucene.search.FieldCache.FloatParser;
|
||||
import org.apache.lucene.search.FieldCache.IntParser;
|
||||
import org.apache.lucene.search.FieldCache.LongParser;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -82,7 +81,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
* when the search is switching to the next segment.
|
||||
* You may need to update internal state of the
|
||||
* comparator, for example retrieving new values from
|
||||
* the {@link FieldCache}.
|
||||
* DocValues.
|
||||
*
|
||||
* <li> {@link #value} Return the sort value stored in
|
||||
* the specified slot. This is only called at the end
|
||||
|
@ -236,7 +235,7 @@ public abstract class FieldComparator<T> {
|
|||
@Override
|
||||
public FieldComparator<T> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
if (missingValue != null) {
|
||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
|
||||
docsWithField = DocValues.getDocsWithField(context.reader(), field);
|
||||
// optimization to remove unneeded checks on the bit interface:
|
||||
if (docsWithField instanceof Bits.MatchAllBits) {
|
||||
docsWithField = null;
|
||||
|
@ -249,18 +248,16 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
/** Parses field's values as double (using {@link
|
||||
* FieldCache#getDoubles} and sorts by ascending value */
|
||||
* AtomicReader#getNumericDocValues} and sorts by ascending value */
|
||||
public static final class DoubleComparator extends NumericComparator<Double> {
|
||||
private final double[] values;
|
||||
private final DoubleParser parser;
|
||||
private FieldCache.Doubles currentReaderValues;
|
||||
private NumericDocValues currentReaderValues;
|
||||
private double bottom;
|
||||
private double topValue;
|
||||
|
||||
DoubleComparator(int numHits, String field, FieldCache.Parser parser, Double missingValue) {
|
||||
DoubleComparator(int numHits, String field, Double missingValue) {
|
||||
super(field, missingValue);
|
||||
values = new double[numHits];
|
||||
this.parser = (DoubleParser) parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -270,7 +267,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public int compareBottom(int doc) {
|
||||
double v2 = currentReaderValues.get(doc);
|
||||
double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -282,7 +279,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public void copy(int slot, int doc) {
|
||||
double v2 = currentReaderValues.get(doc);
|
||||
double v2 = Double.longBitsToDouble(currentReaderValues.get(doc));
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -294,9 +291,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Double> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getDoubles(context.reader(), field, parser, missingValue != null);
|
||||
currentReaderValues = DocValues.getNumeric(context.reader(), field);
|
||||
return super.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -317,7 +312,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public int compareTop(int doc) {
|
||||
double docValue = currentReaderValues.get(doc);
|
||||
double docValue = Double.longBitsToDouble(currentReaderValues.get(doc));
|
||||
// Test for docValue == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -328,18 +323,16 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
/** Parses field's values as float (using {@link
|
||||
* FieldCache#getFloats} and sorts by ascending value */
|
||||
* AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
|
||||
public static final class FloatComparator extends NumericComparator<Float> {
|
||||
private final float[] values;
|
||||
private final FloatParser parser;
|
||||
private FieldCache.Floats currentReaderValues;
|
||||
private NumericDocValues currentReaderValues;
|
||||
private float bottom;
|
||||
private float topValue;
|
||||
|
||||
FloatComparator(int numHits, String field, FieldCache.Parser parser, Float missingValue) {
|
||||
FloatComparator(int numHits, String field, Float missingValue) {
|
||||
super(field, missingValue);
|
||||
values = new float[numHits];
|
||||
this.parser = (FloatParser) parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -350,7 +343,7 @@ public abstract class FieldComparator<T> {
|
|||
@Override
|
||||
public int compareBottom(int doc) {
|
||||
// TODO: are there sneaky non-branch ways to compute sign of float?
|
||||
float v2 = currentReaderValues.get(doc);
|
||||
float v2 = Float.intBitsToFloat((int)currentReaderValues.get(doc));
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -362,7 +355,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public void copy(int slot, int doc) {
|
||||
float v2 = currentReaderValues.get(doc);
|
||||
float v2 = Float.intBitsToFloat((int)currentReaderValues.get(doc));
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -374,9 +367,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Float> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getFloats(context.reader(), field, parser, missingValue != null);
|
||||
currentReaderValues = DocValues.getNumeric(context.reader(), field);
|
||||
return super.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -397,7 +388,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public int compareTop(int doc) {
|
||||
float docValue = currentReaderValues.get(doc);
|
||||
float docValue = Float.intBitsToFloat((int)currentReaderValues.get(doc));
|
||||
// Test for docValue == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -408,18 +399,16 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
/** Parses field's values as int (using {@link
|
||||
* FieldCache#getInts} and sorts by ascending value */
|
||||
* AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
|
||||
public static final class IntComparator extends NumericComparator<Integer> {
|
||||
private final int[] values;
|
||||
private final IntParser parser;
|
||||
private FieldCache.Ints currentReaderValues;
|
||||
private NumericDocValues currentReaderValues;
|
||||
private int bottom; // Value of bottom of queue
|
||||
private int topValue;
|
||||
|
||||
IntComparator(int numHits, String field, FieldCache.Parser parser, Integer missingValue) {
|
||||
IntComparator(int numHits, String field, Integer missingValue) {
|
||||
super(field, missingValue);
|
||||
values = new int[numHits];
|
||||
this.parser = (IntParser) parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -429,7 +418,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public int compareBottom(int doc) {
|
||||
int v2 = currentReaderValues.get(doc);
|
||||
int v2 = (int) currentReaderValues.get(doc);
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -441,7 +430,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public void copy(int slot, int doc) {
|
||||
int v2 = currentReaderValues.get(doc);
|
||||
int v2 = (int) currentReaderValues.get(doc);
|
||||
// Test for v2 == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && v2 == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -453,9 +442,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getInts(context.reader(), field, parser, missingValue != null);
|
||||
currentReaderValues = DocValues.getNumeric(context.reader(), field);
|
||||
return super.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -476,7 +463,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public int compareTop(int doc) {
|
||||
int docValue = currentReaderValues.get(doc);
|
||||
int docValue = (int) currentReaderValues.get(doc);
|
||||
// Test for docValue == 0 to save Bits.get method call for
|
||||
// the common case (doc has value and value is non-zero):
|
||||
if (docsWithField != null && docValue == 0 && !docsWithField.get(doc)) {
|
||||
|
@ -487,18 +474,16 @@ public abstract class FieldComparator<T> {
|
|||
}
|
||||
|
||||
/** Parses field's values as long (using {@link
|
||||
* FieldCache#getLongs} and sorts by ascending value */
|
||||
* AtomicReader#getNumericDocValues(String)} and sorts by ascending value */
|
||||
public static final class LongComparator extends NumericComparator<Long> {
|
||||
private final long[] values;
|
||||
private final LongParser parser;
|
||||
private FieldCache.Longs currentReaderValues;
|
||||
private NumericDocValues currentReaderValues;
|
||||
private long bottom;
|
||||
private long topValue;
|
||||
|
||||
LongComparator(int numHits, String field, FieldCache.Parser parser, Long missingValue) {
|
||||
LongComparator(int numHits, String field, Long missingValue) {
|
||||
super(field, missingValue);
|
||||
values = new long[numHits];
|
||||
this.parser = (LongParser) parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -534,9 +519,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Long> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
// NOTE: must do this before calling super otherwise
|
||||
// we compute the docsWithField Bits twice!
|
||||
currentReaderValues = FieldCache.DEFAULT.getLongs(context.reader(), field, parser, missingValue != null);
|
||||
currentReaderValues = DocValues.getNumeric(context.reader(), field);
|
||||
return super.setNextReader(context);
|
||||
}
|
||||
|
||||
|
@ -712,7 +695,7 @@ public abstract class FieldComparator<T> {
|
|||
* ordinals. This is functionally equivalent to {@link
|
||||
* org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the string
|
||||
* to their relative ordinal positions (using the index
|
||||
* returned by {@link FieldCache#getTermsIndex}), and
|
||||
* returned by {@link AtomicReader#getSortedDocValues(String)}), and
|
||||
* does most comparisons using the ordinals. For medium
|
||||
* to large results, this comparator will be much faster
|
||||
* than {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small
|
||||
|
@ -856,7 +839,7 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
/** Retrieves the SortedDocValues for the field in this segment */
|
||||
protected SortedDocValues getSortedDocValues(AtomicReaderContext context, String field) throws IOException {
|
||||
return FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||
return DocValues.getSorted(context.reader(), field);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1029,8 +1012,8 @@ public abstract class FieldComparator<T> {
|
|||
|
||||
@Override
|
||||
public FieldComparator<BytesRef> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
docTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, true);
|
||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
|
||||
docTerms = DocValues.getBinary(context.reader(), field);
|
||||
docsWithField = DocValues.getDocsWithField(context.reader(), field);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,15 +18,17 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.Bits.MatchAllBits;
|
||||
import org.apache.lucene.util.Bits.MatchNoBits;
|
||||
|
||||
/**
|
||||
* A {@link Filter} that accepts all documents that have one or more values in a
|
||||
* given field. This {@link Filter} request {@link Bits} from the
|
||||
* {@link FieldCache} and build the bits if not present.
|
||||
* given field. This {@link Filter} request {@link Bits} from
|
||||
* {@link AtomicReader#getDocsWithField}
|
||||
*/
|
||||
public class FieldValueFilter extends Filter {
|
||||
private final String field;
|
||||
|
@ -76,7 +78,7 @@ public class FieldValueFilter extends Filter {
|
|||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs)
|
||||
throws IOException {
|
||||
final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(
|
||||
final Bits docsWithField = DocValues.getDocsWithField(
|
||||
context.reader(), field);
|
||||
if (negate) {
|
||||
if (docsWithField instanceof MatchAllBits) {
|
||||
|
|
|
@ -23,13 +23,10 @@ import org.apache.lucene.util.PriorityQueue;
|
|||
|
||||
/**
|
||||
* Expert: A hit queue for sorting by hits by terms in more than one field.
|
||||
* Uses <code>FieldCache.DEFAULT</code> for maintaining
|
||||
* internal term lookup tables.
|
||||
*
|
||||
* @lucene.experimental
|
||||
* @since 2.9
|
||||
* @see IndexSearcher#search(Query,Filter,int,Sort)
|
||||
* @see FieldCache
|
||||
*/
|
||||
public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends PriorityQueue<T> {
|
||||
|
||||
|
|
|
@ -94,7 +94,6 @@ public class SortField {
|
|||
private String field;
|
||||
private Type type; // defaults to determining type dynamically
|
||||
boolean reverse = false; // defaults to natural order
|
||||
private FieldCache.Parser parser;
|
||||
|
||||
// Used for CUSTOM sort
|
||||
private FieldComparatorSource comparatorSource;
|
||||
|
@ -124,44 +123,6 @@ public class SortField {
|
|||
this.reverse = reverse;
|
||||
}
|
||||
|
||||
/** Creates a sort by terms in the given field, parsed
|
||||
* to numeric values using a custom {@link FieldCache.Parser}.
|
||||
* @param field Name of field to sort by. Must not be null.
|
||||
* @param parser Instance of a {@link FieldCache.Parser},
|
||||
* which must subclass one of the existing numeric
|
||||
* parsers from {@link FieldCache}. Sort type is inferred
|
||||
* by testing which numeric parser the parser subclasses.
|
||||
* @throws IllegalArgumentException if the parser fails to
|
||||
* subclass an existing numeric parser, or field is null
|
||||
*/
|
||||
public SortField(String field, FieldCache.Parser parser) {
|
||||
this(field, parser, false);
|
||||
}
|
||||
|
||||
/** Creates a sort, possibly in reverse, by terms in the given field, parsed
|
||||
* to numeric values using a custom {@link FieldCache.Parser}.
|
||||
* @param field Name of field to sort by. Must not be null.
|
||||
* @param parser Instance of a {@link FieldCache.Parser},
|
||||
* which must subclass one of the existing numeric
|
||||
* parsers from {@link FieldCache}. Sort type is inferred
|
||||
* by testing which numeric parser the parser subclasses.
|
||||
* @param reverse True if natural order should be reversed.
|
||||
* @throws IllegalArgumentException if the parser fails to
|
||||
* subclass an existing numeric parser, or field is null
|
||||
*/
|
||||
public SortField(String field, FieldCache.Parser parser, boolean reverse) {
|
||||
if (parser instanceof FieldCache.IntParser) initFieldType(field, Type.INT);
|
||||
else if (parser instanceof FieldCache.FloatParser) initFieldType(field, Type.FLOAT);
|
||||
else if (parser instanceof FieldCache.LongParser) initFieldType(field, Type.LONG);
|
||||
else if (parser instanceof FieldCache.DoubleParser) initFieldType(field, Type.DOUBLE);
|
||||
else {
|
||||
throw new IllegalArgumentException("Parser instance does not subclass existing numeric parser from FieldCache (got " + parser + ")");
|
||||
}
|
||||
|
||||
this.reverse = reverse;
|
||||
this.parser = parser;
|
||||
}
|
||||
|
||||
/** Pass this to {@link #setMissingValue} to have missing
|
||||
* string values sort first. */
|
||||
public final static Object STRING_FIRST = new Object() {
|
||||
|
@ -239,14 +200,6 @@ public class SortField {
|
|||
return type;
|
||||
}
|
||||
|
||||
/** Returns the instance of a {@link FieldCache} parser that fits to the given sort type.
|
||||
* May return <code>null</code> if no parser was specified. Sorting is using the default parser then.
|
||||
* @return An instance of a {@link FieldCache} parser, or <code>null</code>.
|
||||
*/
|
||||
public FieldCache.Parser getParser() {
|
||||
return parser;
|
||||
}
|
||||
|
||||
/** Returns whether the sort should be reversed.
|
||||
* @return True if natural order should be reversed.
|
||||
*/
|
||||
|
@ -320,8 +273,7 @@ public class SortField {
|
|||
}
|
||||
|
||||
/** Returns true if <code>o</code> is equal to this. If a
|
||||
* {@link FieldComparatorSource} or {@link
|
||||
* FieldCache.Parser} was provided, it must properly
|
||||
* {@link FieldComparatorSource} was provided, it must properly
|
||||
* implement equals (unless a singleton is always used). */
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
@ -337,8 +289,7 @@ public class SortField {
|
|||
}
|
||||
|
||||
/** Returns true if <code>o</code> is equal to this. If a
|
||||
* {@link FieldComparatorSource} or {@link
|
||||
* FieldCache.Parser} was provided, it must properly
|
||||
* {@link FieldComparatorSource} was provided, it must properly
|
||||
* implement hashCode (unless a singleton is always
|
||||
* used). */
|
||||
@Override
|
||||
|
@ -381,16 +332,16 @@ public class SortField {
|
|||
return new FieldComparator.DocComparator(numHits);
|
||||
|
||||
case INT:
|
||||
return new FieldComparator.IntComparator(numHits, field, parser, (Integer) missingValue);
|
||||
return new FieldComparator.IntComparator(numHits, field, (Integer) missingValue);
|
||||
|
||||
case FLOAT:
|
||||
return new FieldComparator.FloatComparator(numHits, field, parser, (Float) missingValue);
|
||||
return new FieldComparator.FloatComparator(numHits, field, (Float) missingValue);
|
||||
|
||||
case LONG:
|
||||
return new FieldComparator.LongComparator(numHits, field, parser, (Long) missingValue);
|
||||
return new FieldComparator.LongComparator(numHits, field, (Long) missingValue);
|
||||
|
||||
case DOUBLE:
|
||||
return new FieldComparator.DoubleComparator(numHits, field, parser, (Double) missingValue);
|
||||
return new FieldComparator.DoubleComparator(numHits, field, (Double) missingValue);
|
||||
|
||||
case CUSTOM:
|
||||
assert comparatorSource != null;
|
||||
|
|
|
@ -46,7 +46,6 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -820,18 +819,21 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
assertEquals("wrong number of hits", 34, hits.length);
|
||||
|
||||
// check decoding into field cache
|
||||
FieldCache.Ints fci = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieInt", false);
|
||||
// nocommit: instead use the NumericUtils termsenum stuff to test this directly...
|
||||
/*
|
||||
NumericDocValues fci = FieldCache.DEFAULT.getNumerics(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
int maxDoc = searcher.getIndexReader().maxDoc();
|
||||
for(int doc=0;doc<maxDoc;doc++) {
|
||||
int val = fci.get(doc);
|
||||
long val = fci.get(doc);
|
||||
assertTrue("value in id bounds", val >= 0 && val < 35);
|
||||
}
|
||||
|
||||
FieldCache.Longs fcl = FieldCache.DEFAULT.getLongs(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieLong", false);
|
||||
NumericDocValues fcl = FieldCache.DEFAULT.getNumerics(SlowCompositeReaderWrapper.wrap(searcher.getIndexReader()), "trieLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
|
||||
for(int doc=0;doc<maxDoc;doc++) {
|
||||
long val = fcl.get(doc);
|
||||
assertTrue("value in id bounds", val >= 0L && val < 35L);
|
||||
}
|
||||
*/
|
||||
|
||||
reader.close();
|
||||
}
|
||||
|
|
|
@ -32,13 +32,11 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.NoSuchDirectoryException;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
@ -753,44 +751,6 @@ public void testFilesOpenClose() throws IOException {
|
|||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1579: Ensure that on a reopened reader, that any
|
||||
// shared segments reuse the doc values arrays in
|
||||
// FieldCache
|
||||
public void testFieldCacheReuseAfterReopen() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("number", 17, Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
AtomicReader r1 = getOnlySegmentReader(r);
|
||||
final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(r1, "number", false);
|
||||
assertEquals(17, ints.get(0));
|
||||
|
||||
// Add new segment
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Reopen reader1 --> reader2
|
||||
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
|
||||
assertNotNull(r2);
|
||||
r.close();
|
||||
AtomicReader sub0 = r2.leaves().get(0).reader();
|
||||
final FieldCache.Ints ints2 = FieldCache.DEFAULT.getInts(sub0, "number", false);
|
||||
r2.close();
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
writer.shutdown();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
// LUCENE-1586: getUniqueTermCount
|
||||
public void testUniqueTermCount() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.document.SortedDocValuesField;
|
|||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -109,7 +108,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
|
||||
DirectoryReader r = w.getReader();
|
||||
w.shutdown();
|
||||
assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
|
||||
assertEquals(17, DocValues.getNumeric(getOnlySegmentReader(r), "field").get(0));
|
||||
r.close();
|
||||
d.close();
|
||||
}
|
||||
|
@ -133,7 +132,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
|
||||
DirectoryReader r = w.getReader();
|
||||
w.shutdown();
|
||||
assertEquals(17, FieldCache.DEFAULT.getInts(getOnlySegmentReader(r), "field", false).get(0));
|
||||
assertEquals(17, DocValues.getNumeric(getOnlySegmentReader(r), "field").get(0));
|
||||
r.close();
|
||||
d.close();
|
||||
}
|
||||
|
@ -176,7 +175,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
w.addDocument(doc);
|
||||
w.forceMerge(1);
|
||||
DirectoryReader r = w.getReader();
|
||||
BinaryDocValues s = FieldCache.DEFAULT.getTerms(getOnlySegmentReader(r), "field", false);
|
||||
BinaryDocValues s = DocValues.getSorted(getOnlySegmentReader(r), "field");
|
||||
|
||||
BytesRef bytes1 = new BytesRef();
|
||||
s.get(0, bytes1);
|
||||
|
@ -783,7 +782,7 @@ public class TestDocValuesIndexing extends LuceneTestCase {
|
|||
AtomicReader subR = r.leaves().get(0).reader();
|
||||
assertEquals(2, subR.numDocs());
|
||||
|
||||
Bits bits = FieldCache.DEFAULT.getDocsWithField(subR, "dv");
|
||||
Bits bits = DocValues.getDocsWithField(subR, "dv");
|
||||
assertTrue(bits.get(0));
|
||||
assertTrue(bits.get(1));
|
||||
r.close();
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
|
@ -1751,11 +1750,6 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
w.shutdown();
|
||||
assertEquals(1, reader.docFreq(new Term("content", bigTerm)));
|
||||
|
||||
SortedDocValues dti = FieldCache.DEFAULT.getTermsIndex(SlowCompositeReaderWrapper.wrap(reader), "content", random().nextFloat() * PackedInts.FAST);
|
||||
assertEquals(4, dti.getValueCount());
|
||||
BytesRef br = new BytesRef();
|
||||
dti.lookupOrd(2, br);
|
||||
assertEquals(bigTermBytesRef, br);
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
|
|
@ -33,11 +33,11 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.English;
|
||||
|
@ -240,6 +240,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
for(int docCount=0;docCount<numDocs;docCount++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("id", docCount, Field.Store.YES));
|
||||
doc.add(new NumericDocValuesField("id", docCount));
|
||||
List<Token> tokens = new ArrayList<>();
|
||||
final int numTokens = atLeast(100);
|
||||
//final int numTokens = atLeast(20);
|
||||
|
@ -296,7 +297,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
DocsEnum docs = null;
|
||||
DocsAndPositionsEnum docsAndPositions = null;
|
||||
DocsAndPositionsEnum docsAndPositionsAndOffsets = null;
|
||||
final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(sub, "id", false);
|
||||
final NumericDocValues docIDToID = DocValues.getNumeric(sub, "id");
|
||||
for(String term : terms) {
|
||||
//System.out.println(" term=" + term);
|
||||
if (termsEnum.seekExact(new BytesRef(term))) {
|
||||
|
@ -305,7 +306,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
int doc;
|
||||
//System.out.println(" doc/freq");
|
||||
while((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
|
||||
final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
|
||||
//System.out.println(" doc=" + docIDToID.get(doc) + " docID=" + doc + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
assertEquals(expected.size(), docs.freq());
|
||||
|
@ -316,7 +317,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertNotNull(docsAndPositions);
|
||||
//System.out.println(" doc/freq/pos");
|
||||
while((doc = docsAndPositions.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
|
||||
final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
|
||||
//System.out.println(" doc=" + docIDToID.get(doc) + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
assertEquals(expected.size(), docsAndPositions.freq());
|
||||
|
@ -331,7 +332,7 @@ public class TestPostingsOffsets extends LuceneTestCase {
|
|||
assertNotNull(docsAndPositionsAndOffsets);
|
||||
//System.out.println(" doc/freq/pos/offs");
|
||||
while((doc = docsAndPositionsAndOffsets.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
|
||||
final List<Token> expected = actualTokens.get(term).get(docIDToID.get(doc));
|
||||
final List<Token> expected = actualTokens.get(term).get((int) docIDToID.get(doc));
|
||||
//System.out.println(" doc=" + docIDToID.get(doc) + " " + expected.size() + " freq");
|
||||
assertNotNull(expected);
|
||||
assertEquals(expected.size(), docsAndPositionsAndOffsets.freq());
|
||||
|
|
|
@ -24,8 +24,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LineFileDocs;
|
||||
|
@ -159,6 +159,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("id", id, Field.Store.YES));
|
||||
doc.add(new NumericDocValuesField("id", id));
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);
|
||||
}
|
||||
|
@ -226,8 +227,7 @@ public class TestTermsEnum extends LuceneTestCase {
|
|||
final IndexReader r = w.getReader();
|
||||
w.shutdown();
|
||||
|
||||
// NOTE: intentional insanity!!
|
||||
final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
|
||||
final NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
|
||||
|
||||
for(int iter=0;iter<10*RANDOM_MULTIPLIER;iter++) {
|
||||
|
||||
|
|
|
@ -28,10 +28,13 @@ import org.apache.lucene.document.Field.Store;
|
|||
import org.apache.lucene.document.FloatField;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.LongField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -120,19 +123,33 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
|
||||
Document doc = new Document();
|
||||
Field idField = newStringField(random, "id", "", Field.Store.YES);
|
||||
Field idDVField = new SortedDocValuesField("id", new BytesRef());
|
||||
Field intIdField = new IntField("id_int", 0, Store.YES);
|
||||
Field intDVField = new NumericDocValuesField("id_int", 0);
|
||||
Field floatIdField = new FloatField("id_float", 0, Store.YES);
|
||||
Field floatDVField = new NumericDocValuesField("id_float", 0);
|
||||
Field longIdField = new LongField("id_long", 0, Store.YES);
|
||||
Field longDVField = new NumericDocValuesField("id_long", 0);
|
||||
Field doubleIdField = new DoubleField("id_double", 0, Store.YES);
|
||||
Field doubleDVField = new NumericDocValuesField("id_double", 0);
|
||||
Field randField = newStringField(random, "rand", "", Field.Store.YES);
|
||||
Field randDVField = new SortedDocValuesField("rand", new BytesRef());
|
||||
Field bodyField = newStringField(random, "body", "", Field.Store.NO);
|
||||
Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
|
||||
doc.add(idField);
|
||||
doc.add(idDVField);
|
||||
doc.add(intIdField);
|
||||
doc.add(intDVField);
|
||||
doc.add(floatIdField);
|
||||
doc.add(floatDVField);
|
||||
doc.add(longIdField);
|
||||
doc.add(longDVField);
|
||||
doc.add(doubleIdField);
|
||||
doc.add(doubleDVField);
|
||||
doc.add(randField);
|
||||
doc.add(randDVField);
|
||||
doc.add(bodyField);
|
||||
doc.add(bodyDVField);
|
||||
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random, index.index,
|
||||
newIndexWriterConfig(random, TEST_VERSION_CURRENT, new MockAnalyzer(random))
|
||||
|
@ -146,10 +163,15 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
|
||||
for (int d = minId; d <= maxId; d++) {
|
||||
idField.setStringValue(pad(d));
|
||||
idDVField.setBytesValue(new BytesRef(pad(d)));
|
||||
intIdField.setIntValue(d);
|
||||
intDVField.setLongValue(d);
|
||||
floatIdField.setFloatValue(d);
|
||||
floatDVField.setLongValue(Float.floatToRawIntBits(d));
|
||||
longIdField.setLongValue(d);
|
||||
longDVField.setLongValue(d);
|
||||
doubleIdField.setDoubleValue(d);
|
||||
doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
|
||||
int r = index.allowNegativeRandomInts ? random.nextInt() : random
|
||||
.nextInt(Integer.MAX_VALUE);
|
||||
if (index.maxR < r) {
|
||||
|
@ -166,7 +188,9 @@ public class BaseTestRangeFilter extends LuceneTestCase {
|
|||
minCount++;
|
||||
}
|
||||
randField.setStringValue(pad(r));
|
||||
randDVField.setBytesValue(new BytesRef(pad(r)));
|
||||
bodyField.setStringValue("body");
|
||||
bodyDVField.setBytesValue(new BytesRef("body"));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,34 +95,6 @@ final class JustCompileSearch {
|
|||
}
|
||||
}
|
||||
|
||||
static final class JustCompileExtendedFieldCacheLongParser implements FieldCache.LongParser {
|
||||
|
||||
@Override
|
||||
public long parseLong(BytesRef string) {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum termsEnum(Terms terms) {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static final class JustCompileExtendedFieldCacheDoubleParser implements FieldCache.DoubleParser {
|
||||
|
||||
@Override
|
||||
public double parseDouble(BytesRef term) {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum termsEnum(Terms terms) {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static final class JustCompileFieldComparator extends FieldComparator<Object> {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,11 +20,13 @@ package org.apache.lucene.search;
|
|||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import org.apache.lucene.document.DateTools;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.StoredDocument;
|
||||
|
@ -110,6 +112,7 @@ public class TestDateSort extends LuceneTestCase {
|
|||
String dateTimeString = DateTools.timeToString(time, DateTools.Resolution.SECOND);
|
||||
Field dateTimeField = newStringField(DATE_TIME_FIELD, dateTimeString, Field.Store.YES);
|
||||
document.add(dateTimeField);
|
||||
document.add(new SortedDocValuesField(DATE_TIME_FIELD, new BytesRef(dateTimeString)));
|
||||
|
||||
return document;
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
assumeTrue("requires codec support for SORTED_SET", defaultCodecSupportsSortedSet());
|
||||
dir = newDirectory();
|
||||
fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
|
||||
|
@ -63,10 +64,7 @@ public class TestDocTermOrdsRangeFilter extends LuceneTestCase {
|
|||
for (int j = 0; j < numTerms; j++) {
|
||||
String s = TestUtil.randomUnicodeString(random());
|
||||
doc.add(newStringField(fieldName, s, Field.Store.NO));
|
||||
// if the default codec doesn't support sortedset, we will uninvert at search time
|
||||
if (defaultCodecSupportsSortedSet()) {
|
||||
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
|
||||
}
|
||||
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
|
||||
terms.add(s);
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -51,6 +51,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase {
|
|||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
assumeTrue("requires codec support for SORTED_SET", defaultCodecSupportsSortedSet());
|
||||
dir = newDirectory();
|
||||
fieldName = random().nextBoolean() ? "field" : ""; // sometimes use an empty string as field name
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
|
||||
|
@ -65,10 +66,7 @@ public class TestDocTermOrdsRewriteMethod extends LuceneTestCase {
|
|||
for (int j = 0; j < numTerms; j++) {
|
||||
String s = TestUtil.randomUnicodeString(random());
|
||||
doc.add(newStringField(fieldName, s, Field.Store.NO));
|
||||
// if the default codec doesn't support sortedset, we will uninvert at search time
|
||||
if (defaultCodecSupportsSortedSet()) {
|
||||
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
|
||||
}
|
||||
doc.add(new SortedSetDocValuesField(fieldName, new BytesRef(s)));
|
||||
terms.add(s);
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
|
|
|
@ -23,8 +23,10 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FloatDocValuesField;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper;
|
||||
|
@ -158,12 +160,12 @@ public class TestDocValuesScoring extends LuceneTestCase {
|
|||
@Override
|
||||
public SimScorer simScorer(SimWeight stats, AtomicReaderContext context) throws IOException {
|
||||
final SimScorer sub = sim.simScorer(stats, context);
|
||||
final FieldCache.Floats values = FieldCache.DEFAULT.getFloats(context.reader(), boostField, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), boostField);
|
||||
|
||||
return new SimScorer() {
|
||||
@Override
|
||||
public float score(int doc, float freq) {
|
||||
return values.get(doc) * sub.score(doc, freq);
|
||||
return Float.intBitsToFloat((int)values.get(doc)) * sub.score(doc, freq);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -178,7 +180,7 @@ public class TestDocValuesScoring extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public Explanation explain(int doc, Explanation freq) {
|
||||
Explanation boostExplanation = new Explanation(values.get(doc), "indexDocValue(" + boostField + ")");
|
||||
Explanation boostExplanation = new Explanation(Float.intBitsToFloat((int)values.get(doc)), "indexDocValue(" + boostField + ")");
|
||||
Explanation simExplanation = sub.explain(doc, freq);
|
||||
Explanation expl = new Explanation(boostExplanation.getValue() * simExplanation.getValue(), "product of:");
|
||||
expl.addDetail(boostExplanation);
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.FieldValueHitQueue.Entry;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
|
@ -126,6 +127,9 @@ public class TestElevationComparator extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
for (int i = 0; i < vals.length - 2; i += 2) {
|
||||
doc.add(newTextField(vals[i], vals[i + 1], Field.Store.YES));
|
||||
if (vals[i].equals("id")) {
|
||||
doc.add(new SortedDocValuesField(vals[i], new BytesRef(vals[i+1])));
|
||||
}
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
@ -185,7 +189,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Integer> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
idIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldname);
|
||||
idIndex = DocValues.getSorted(context.reader(), fieldname);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
@ -30,6 +31,7 @@ import org.apache.lucene.search.spans.SpanOrQuery;
|
|||
import org.apache.lucene.search.spans.SpanQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -73,6 +75,7 @@ public class TestExplanations extends LuceneTestCase {
|
|||
for (int i = 0; i < docFields.length; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField(KEY, ""+i, Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(KEY, new BytesRef(""+i)));
|
||||
Field f = newTextField(FIELD, docFields[i], Field.Store.NO);
|
||||
f.setBoost(i);
|
||||
doc.add(f);
|
||||
|
@ -118,9 +121,6 @@ public class TestExplanations extends LuceneTestCase {
|
|||
}
|
||||
return out;
|
||||
}
|
||||
public ItemizedFilter(String keyField, int [] keys) {
|
||||
super(keyField, int2str(keys));
|
||||
}
|
||||
public ItemizedFilter(int [] keys) {
|
||||
super(KEY, int2str(keys));
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
|
@ -440,6 +441,7 @@ public class TestFieldCacheRangeFilter extends BaseTestRangeFilter {
|
|||
for (int d = -20; d <= 20; d++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("id_int", d, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("id_int", d));
|
||||
doc.add(newStringField("body", "body", Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@ package org.apache.lucene.search;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -35,6 +37,7 @@ import java.util.List;
|
|||
*/
|
||||
public class TestFieldCacheTermsFilter extends LuceneTestCase {
|
||||
public void testMissingTerms() throws Exception {
|
||||
assumeTrue("requires support for missing values", defaultCodecSupportsMissingDocValues());
|
||||
String fieldName = "field1";
|
||||
Directory rd = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), rd);
|
||||
|
@ -42,6 +45,7 @@ public class TestFieldCacheTermsFilter extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
int term = i * 10; //terms are units of 10;
|
||||
doc.add(newStringField(fieldName, "" + term, Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField(fieldName, new BytesRef("" + term)));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
IndexReader reader = w.getReader();
|
||||
|
|
|
@ -21,16 +21,20 @@ import java.io.IOException;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // suppress codecs without missing
|
||||
public class TestFieldValueFilter extends LuceneTestCase {
|
||||
|
||||
public void testFieldValueFilterNoValue() throws IOException {
|
||||
|
@ -96,9 +100,12 @@ public class TestFieldValueFilter extends LuceneTestCase {
|
|||
if (random().nextBoolean()) {
|
||||
docStates[i] = 1;
|
||||
doc.add(newTextField("some", "value", Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField("some", new BytesRef("value")));
|
||||
}
|
||||
doc.add(newTextField("all", "test", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("all", new BytesRef("test")));
|
||||
doc.add(newTextField("id", "" + i, Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField("id", new BytesRef("" + i)));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
writer.commit();
|
||||
|
|
|
@ -565,46 +565,6 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
|
|||
testFloatRange(2);
|
||||
}
|
||||
|
||||
private void testSorting(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = TestUtil.nextInt(random(), 10, 20);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
if (lower>upper) {
|
||||
int a=lower; lower=upper; upper=a;
|
||||
}
|
||||
Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
|
||||
TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
|
||||
if (topDocs.totalHits==0) continue;
|
||||
ScoreDoc[] sd = topDocs.scoreDocs;
|
||||
assertNotNull(sd);
|
||||
int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
|
||||
for (int j=1; j<sd.length; j++) {
|
||||
int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
|
||||
assertTrue("Docs should be sorted backwards", last>act );
|
||||
last=act;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_8bit() throws Exception {
|
||||
testSorting(8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_4bit() throws Exception {
|
||||
testSorting(4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_2bit() throws Exception {
|
||||
testSorting(2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEqualsAndHash() throws Exception {
|
||||
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.TestNumericUtils; // NaN arrays
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -608,51 +607,6 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
|
|||
testDoubleRange(2);
|
||||
}
|
||||
|
||||
private void testSorting(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = TestUtil.nextInt(random(), 10, 20);
|
||||
for (int i = 0; i < num; i++) {
|
||||
long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
if (lower>upper) {
|
||||
long a=lower; lower=upper; upper=a;
|
||||
}
|
||||
Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
|
||||
TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
|
||||
if (topDocs.totalHits==0) continue;
|
||||
ScoreDoc[] sd = topDocs.scoreDocs;
|
||||
assertNotNull(sd);
|
||||
long last=searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
|
||||
for (int j=1; j<sd.length; j++) {
|
||||
long act=searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
|
||||
assertTrue("Docs should be sorted backwards", last>act );
|
||||
last=act;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_8bit() throws Exception {
|
||||
testSorting(8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_6bit() throws Exception {
|
||||
testSorting(6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_4bit() throws Exception {
|
||||
testSorting(4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_2bit() throws Exception {
|
||||
testSorting(2);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEqualsAndHash() throws Exception {
|
||||
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.FilteredTermsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
|
@ -66,11 +67,14 @@ public class TestRegexpRandom2 extends LuceneTestCase {
|
|||
Document doc = new Document();
|
||||
Field field = newStringField(fieldName, "", Field.Store.NO);
|
||||
doc.add(field);
|
||||
Field dvField = new SortedDocValuesField(fieldName, new BytesRef());
|
||||
doc.add(dvField);
|
||||
List<String> terms = new ArrayList<>();
|
||||
int num = atLeast(200);
|
||||
for (int i = 0; i < num; i++) {
|
||||
String s = TestUtil.randomUnicodeString(random());
|
||||
field.setStringValue(s);
|
||||
dvField.setBytesValue(new BytesRef(s));
|
||||
terms.add(s);
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,804 +0,0 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DoubleDocValuesField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FloatDocValuesField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
/** Tests basic sorting on docvalues fields.
|
||||
* These are mostly like TestSort's tests, except each test
|
||||
* indexes the field up-front as docvalues, and checks no fieldcaches were made */
|
||||
@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // avoid codecs that don't support "missing"
|
||||
public class TestSortDocValues extends LuceneTestCase {
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
// ensure there is nothing in fieldcache before test starts
|
||||
FieldCache.DEFAULT.purgeAllCaches();
|
||||
}
|
||||
|
||||
private void assertNoFieldCaches() {
|
||||
// docvalues sorting should NOT create any fieldcache entries!
|
||||
assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
|
||||
}
|
||||
|
||||
/** Tests sorting on type string */
|
||||
public void testString() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'foo'
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests reverse sorting on type string */
|
||||
public void testStringReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'foo' comes after 'bar' in reverse order
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type string_val */
|
||||
public void testStringVal() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'foo'
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests reverse sorting on type string_val */
|
||||
public void testStringValReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new BinaryDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'foo' comes after 'bar' in reverse order
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type string_val, but with a SortedDocValuesField */
|
||||
public void testStringValSorted() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'foo'
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests reverse sorting on type string_val, but with a SortedDocValuesField */
|
||||
public void testStringValReverseSorted() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("value", "bar", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(newStringField("value", "foo", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.STRING_VAL, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'foo' comes after 'bar' in reverse order
|
||||
assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("bar", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type int */
|
||||
public void testInt() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 300000));
|
||||
doc.add(newStringField("value", "300000", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("300000", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type int in reverse */
|
||||
public void testIntReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 300000));
|
||||
doc.add(newStringField("value", "300000", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.INT, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// reverse numeric order
|
||||
assertEquals("300000", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type int with a missing value */
|
||||
public void testIntMissing() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.INT));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as a 0
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type int, specifying the missing value should be treated as Integer.MAX_VALUE */
|
||||
public void testIntMissingLast() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortField("value", SortField.Type.INT);
|
||||
sortField.setMissingValue(Integer.MAX_VALUE);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as a Integer.MAX_VALUE
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type long */
|
||||
public void testLong() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 3000000000L));
|
||||
doc.add(newStringField("value", "3000000000", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("3000000000", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type long in reverse */
|
||||
public void testLongReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 3000000000L));
|
||||
doc.add(newStringField("value", "3000000000", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.LONG, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// reverse numeric order
|
||||
assertEquals("3000000000", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type long with a missing value */
|
||||
public void testLongMissing() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.LONG));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as 0
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type long, specifying the missing value should be treated as Long.MAX_VALUE */
|
||||
public void testLongMissingLast() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", -1));
|
||||
doc.add(newStringField("value", "-1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new NumericDocValuesField("value", 4));
|
||||
doc.add(newStringField("value", "4", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortField("value", SortField.Type.LONG);
|
||||
sortField.setMissingValue(Long.MAX_VALUE);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as Long.MAX_VALUE
|
||||
assertEquals("-1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type float */
|
||||
public void testFloat() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 30.1F));
|
||||
doc.add(newStringField("value", "30.1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", -1.3F));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 4.2F));
|
||||
doc.add(newStringField("value", "4.2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("30.1", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type float in reverse */
|
||||
public void testFloatReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 30.1F));
|
||||
doc.add(newStringField("value", "30.1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", -1.3F));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 4.2F));
|
||||
doc.add(newStringField("value", "4.2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// reverse numeric order
|
||||
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type float with a missing value */
|
||||
public void testFloatMissing() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", -1.3F));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 4.2F));
|
||||
doc.add(newStringField("value", "4.2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.FLOAT));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as 0
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type float, specifying the missing value should be treated as Float.MAX_VALUE */
|
||||
public void testFloatMissingLast() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", -1.3F));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new FloatDocValuesField("value", 4.2F));
|
||||
doc.add(newStringField("value", "4.2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortField("value", SortField.Type.FLOAT);
|
||||
sortField.setMissingValue(Float.MAX_VALUE);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// null is treated as Float.MAX_VALUE
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type double */
|
||||
public void testDouble() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 30.1));
|
||||
doc.add(newStringField("value", "30.1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", -1.3));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
|
||||
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
|
||||
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(4, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("30.1", searcher.doc(td.scoreDocs[3].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type double with +/- zero */
|
||||
public void testDoubleSignedZero() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", +0D));
|
||||
doc.add(newStringField("value", "+0", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", -0D));
|
||||
doc.add(newStringField("value", "-0", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("-0", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("+0", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type double in reverse */
|
||||
public void testDoubleReverse() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 30.1));
|
||||
doc.add(newStringField("value", "30.1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", -1.3));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
|
||||
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
|
||||
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE, true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(4, td.totalHits);
|
||||
// numeric order
|
||||
assertEquals("30.1", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[3].doc).get("value"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type double with a missing value */
|
||||
public void testDoubleMissing() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", -1.3));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
|
||||
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
|
||||
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortField("value", SortField.Type.DOUBLE));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(4, td.totalHits);
|
||||
// null treated as a 0
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[3].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
/** Tests sorting on type double, specifying the missing value should be treated as Double.MAX_VALUE */
|
||||
public void testDoubleMissingLast() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", -1.3));
|
||||
doc.add(newStringField("value", "-1.3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333333));
|
||||
doc.add(newStringField("value", "4.2333333333333", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new DoubleDocValuesField("value", 4.2333333333332));
|
||||
doc.add(newStringField("value", "4.2333333333332", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortField("value", SortField.Type.DOUBLE);
|
||||
sortField.setMissingValue(Double.MAX_VALUE);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(4, td.totalHits);
|
||||
// null treated as Double.MAX_VALUE
|
||||
assertEquals("-1.3", searcher.doc(td.scoreDocs[0].doc).get("value"));
|
||||
assertEquals("4.2333333333332", searcher.doc(td.scoreDocs[1].doc).get("value"));
|
||||
assertEquals("4.2333333333333", searcher.doc(td.scoreDocs[2].doc).get("value"));
|
||||
assertNull(searcher.doc(td.scoreDocs[3].doc).get("value"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -32,7 +32,9 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -87,7 +89,6 @@ public class TestSortRandom extends LuceneTestCase {
|
|||
|
||||
br = new BytesRef(s);
|
||||
doc.add(new SortedDocValuesField("stringdv", br));
|
||||
doc.add(newStringField("string", s, Field.Store.NO));
|
||||
docValues.add(br);
|
||||
|
||||
} else {
|
||||
|
@ -124,17 +125,12 @@ public class TestSortRandom extends LuceneTestCase {
|
|||
final SortField sf;
|
||||
final boolean sortMissingLast;
|
||||
final boolean missingIsNull;
|
||||
if (random.nextBoolean()) {
|
||||
sf = new SortField("stringdv", SortField.Type.STRING, reverse);
|
||||
// Can only use sort missing if the DVFormat
|
||||
// supports docsWithField:
|
||||
sortMissingLast = defaultCodecSupportsDocsWithField() && random().nextBoolean();
|
||||
missingIsNull = defaultCodecSupportsDocsWithField();
|
||||
} else {
|
||||
sf = new SortField("string", SortField.Type.STRING, reverse);
|
||||
sortMissingLast = random().nextBoolean();
|
||||
missingIsNull = true;
|
||||
}
|
||||
sf = new SortField("stringdv", SortField.Type.STRING, reverse);
|
||||
// Can only use sort missing if the DVFormat
|
||||
// supports docsWithField:
|
||||
sortMissingLast = defaultCodecSupportsDocsWithField() && random().nextBoolean();
|
||||
missingIsNull = defaultCodecSupportsDocsWithField();
|
||||
|
||||
if (sortMissingLast) {
|
||||
sf.setMissingValue(SortField.STRING_LAST);
|
||||
}
|
||||
|
@ -264,14 +260,14 @@ public class TestSortRandom extends LuceneTestCase {
|
|||
@Override
|
||||
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
|
||||
final int maxDoc = context.reader().maxDoc();
|
||||
final FieldCache.Ints idSource = FieldCache.DEFAULT.getInts(context.reader(), "id", false);
|
||||
final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
|
||||
assertNotNull(idSource);
|
||||
final FixedBitSet bits = new FixedBitSet(maxDoc);
|
||||
for(int docID=0;docID<maxDoc;docID++) {
|
||||
if (random.nextFloat() <= density && (acceptDocs == null || acceptDocs.get(docID))) {
|
||||
bits.set(docID);
|
||||
//System.out.println(" acc id=" + idSource.getInt(docID) + " docID=" + docID);
|
||||
matchValues.add(docValues.get(idSource.get(docID)));
|
||||
matchValues.add(docValues.get((int) idSource.get(docID)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,10 +22,10 @@ import org.apache.lucene.document.StringField;
|
|||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.runner.JUnitCore;
|
||||
import org.junit.runner.Result;
|
||||
|
@ -58,14 +58,17 @@ public class TestFailOnFieldCacheInsanity extends WithNestedTests {
|
|||
|
||||
public void testDummy() throws Exception {
|
||||
makeIndex();
|
||||
/* nocommit
|
||||
assertNotNull(FieldCache.DEFAULT.getTermsIndex(subR, "ints"));
|
||||
assertNotNull(FieldCache.DEFAULT.getTerms(subR, "ints", false));
|
||||
*/
|
||||
// NOTE: do not close reader/directory, else it
|
||||
// purges FC entries
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
// nocommit: move this to solr?
|
||||
@Test @Ignore
|
||||
public void testFailOnFieldCacheInsanity() {
|
||||
Result r = JUnitCore.runClasses(Nested1.class);
|
||||
boolean insane = false;
|
||||
|
|
|
@ -25,10 +25,6 @@ import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
|
|||
import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
|
||||
import org.apache.lucene.queries.function.valuesource.IntFieldSource;
|
||||
import org.apache.lucene.queries.function.valuesource.LongFieldSource;
|
||||
import org.apache.lucene.search.FieldCache.DoubleParser;
|
||||
import org.apache.lucene.search.FieldCache.FloatParser;
|
||||
import org.apache.lucene.search.FieldCache.IntParser;
|
||||
import org.apache.lucene.search.FieldCache.LongParser;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
||||
/**
|
||||
|
@ -87,13 +83,13 @@ public final class SimpleBindings extends Bindings {
|
|||
SortField field = (SortField) o;
|
||||
switch(field.getType()) {
|
||||
case INT:
|
||||
return new IntFieldSource(field.getField(), (IntParser) field.getParser());
|
||||
return new IntFieldSource(field.getField());
|
||||
case LONG:
|
||||
return new LongFieldSource(field.getField(), (LongParser) field.getParser());
|
||||
return new LongFieldSource(field.getField());
|
||||
case FLOAT:
|
||||
return new FloatFieldSource(field.getField(), (FloatParser) field.getParser());
|
||||
return new FloatFieldSource(field.getField());
|
||||
case DOUBLE:
|
||||
return new DoubleFieldSource(field.getField(), (DoubleParser) field.getParser());
|
||||
return new DoubleFieldSource(field.getField());
|
||||
case SCORE:
|
||||
return getScoreValueSource();
|
||||
default:
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package org.apache.lucene.expressions;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DoubleField;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.expressions.js.JavascriptCompiler;
|
||||
|
@ -53,24 +52,24 @@ public class TestDemoExpressions extends LuceneTestCase {
|
|||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
doc.add(newTextField("body", "some contents and more contents", Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("popularity", 5));
|
||||
doc.add(new DoubleField("latitude", 40.759011, Field.Store.NO));
|
||||
doc.add(new DoubleField("longitude", -73.9844722, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
|
||||
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
|
||||
iw.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
doc.add(newTextField("body", "another document with different contents", Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("popularity", 20));
|
||||
doc.add(new DoubleField("latitude", 40.718266, Field.Store.NO));
|
||||
doc.add(new DoubleField("longitude", -74.007819, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
|
||||
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
|
||||
iw.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
doc.add(newTextField("body", "crappy contents", Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("popularity", 2));
|
||||
doc.add(new DoubleField("latitude", 40.7051157, Field.Store.NO));
|
||||
doc.add(new DoubleField("longitude", -74.0088305, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
|
||||
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
|
||||
iw.addDocument(doc);
|
||||
|
||||
reader = iw.getReader();
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Set;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.facet.DrillSideways.DrillSidewaysResult;
|
||||
import org.apache.lucene.facet.sortedset.DefaultSortedSetDocValuesReaderState;
|
||||
|
@ -497,6 +498,7 @@ public class TestDrillSideways extends FacetTestCase {
|
|||
for(Doc rawDoc : docs) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", rawDoc.id, Field.Store.YES));
|
||||
doc.add(new SortedDocValuesField("id", new BytesRef(rawDoc.id)));
|
||||
doc.add(newStringField("content", rawDoc.contentToken, Field.Store.NO));
|
||||
|
||||
if (VERBOSE) {
|
||||
|
|
|
@ -80,27 +80,27 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
|
|||
// Reused across documents, to add the necessary facet
|
||||
// fields:
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("num", 10, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 10));
|
||||
doc.add(new FacetField("Author", "Bob"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 20, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 20));
|
||||
doc.add(new FacetField("Author", "Lisa"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 30, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 30));
|
||||
doc.add(new FacetField("Author", "Lisa"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 40, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 40));
|
||||
doc.add(new FacetField("Author", "Susan"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 45, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 45));
|
||||
doc.add(new FacetField("Author", "Frank"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
|
@ -145,7 +145,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
|
|||
FacetsConfig config = new FacetsConfig();
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("num", 10, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 10));
|
||||
doc.add(new FacetField("a", "foo1"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
||||
|
@ -154,7 +154,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
|
|||
}
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 20, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 20));
|
||||
doc.add(new FacetField("a", "foo2"));
|
||||
doc.add(new FacetField("b", "bar1"));
|
||||
writer.addDocument(config.build(taxoWriter, doc));
|
||||
|
@ -164,7 +164,7 @@ public class TestTaxonomyFacetSumValueSource extends FacetTestCase {
|
|||
}
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new IntField("num", 30, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("num", 30));
|
||||
doc.add(new FacetField("a", "foo3"));
|
||||
doc.add(new FacetField("b", "bar2"));
|
||||
doc.add(new FacetField("c", "baz1"));
|
||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.lucene.search.grouping;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.CachingCollector;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MultiCollector;
|
||||
|
@ -78,7 +77,7 @@ public class GroupingSearch {
|
|||
private Bits matchingGroupHeads;
|
||||
|
||||
/**
|
||||
* Constructs a <code>GroupingSearch</code> instance that groups documents by index terms using the {@link FieldCache}.
|
||||
* Constructs a <code>GroupingSearch</code> instance that groups documents by index terms using DocValues.
|
||||
* The group field can only have one token per document. This means that the field must not be analysed.
|
||||
*
|
||||
* @param groupField The name of the field to group by.
|
||||
|
|
|
@ -18,9 +18,8 @@ package org.apache.lucene.search.grouping.term;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -161,7 +160,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.readerContext = context;
|
||||
groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
groupIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
|
||||
for (GroupHead groupHead : groups.values()) {
|
||||
for (int i = 0; i < groupHead.comparators.length; i++) {
|
||||
|
@ -276,13 +275,13 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.readerContext = context;
|
||||
groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
groupIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
if (fields[i].getType() == SortField.Type.SCORE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader(), fields[i].getField());
|
||||
sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField());
|
||||
}
|
||||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
|
@ -444,9 +443,9 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.readerContext = context;
|
||||
groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
groupIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
sortsIndex[i] = FieldCache.DEFAULT.getTermsIndex(context.reader(), fields[i].getField());
|
||||
sortsIndex[i] = DocValues.getSorted(context.reader(), fields[i].getField());
|
||||
}
|
||||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
|
@ -587,7 +586,7 @@ public abstract class TermAllGroupHeadsCollector<GH extends AbstractAllGroupHead
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
this.readerContext = context;
|
||||
groupIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
groupIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
|
|
|
@ -18,9 +18,9 @@ package org.apache.lucene.search.grouping.term;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.grouping.AbstractAllGroupsCollector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SentinelIntSet;
|
||||
|
@ -105,7 +105,7 @@ public class TermAllGroupsCollector extends AbstractAllGroupsCollector<BytesRef>
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
index = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
index = DocValues.getSorted(context.reader(), groupField);
|
||||
|
||||
// Clear ordSet and fill it with previous encountered groups that can occur in the current segment.
|
||||
ordSet.clear();
|
||||
|
|
|
@ -18,9 +18,9 @@ package org.apache.lucene.search.grouping.term;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.grouping.AbstractDistinctValuesCollector;
|
||||
import org.apache.lucene.search.grouping.SearchGroup;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -109,8 +109,8 @@ public class TermDistinctValuesCollector extends AbstractDistinctValuesCollector
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
groupFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
countFieldTermIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), countField);
|
||||
groupFieldTermIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
countFieldTermIndex = DocValues.getSorted(context.reader(), countField);
|
||||
ordSet.clear();
|
||||
for (GroupCount group : groups) {
|
||||
int groupOrd = group.groupValue == null ? -1 : groupFieldTermIndex.lookupTerm(group.groupValue);
|
||||
|
|
|
@ -20,9 +20,9 @@ package org.apache.lucene.search.grouping.term;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.grouping.AbstractFirstPassGroupingCollector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -88,6 +88,6 @@ public class TermFirstPassGroupingCollector extends AbstractFirstPassGroupingCol
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
|
||||
super.doSetNextReader(readerContext);
|
||||
index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
|
||||
index = DocValues.getSorted(readerContext.reader(), groupField);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,11 +18,11 @@ package org.apache.lucene.search.grouping.term;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.grouping.AbstractGroupFacetCollector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SentinelIntSet;
|
||||
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
|
||||
/**
|
||||
* An implementation of {@link AbstractGroupFacetCollector} that computes grouped facets based on the indexed terms
|
||||
* from the {@link FieldCache}.
|
||||
* from DocValues.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
|
@ -128,8 +128,8 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
segmentResults.add(createSegmentResult());
|
||||
}
|
||||
|
||||
groupFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
facetFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), facetField);
|
||||
groupFieldTermsIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
facetFieldTermsIndex = DocValues.getSorted(context.reader(), facetField);
|
||||
|
||||
// 1+ to allow for the -1 "not set":
|
||||
segmentFacetCounts = new int[facetFieldTermsIndex.getValueCount()+1];
|
||||
|
@ -283,8 +283,8 @@ public abstract class TermGroupFacetCollector extends AbstractGroupFacetCollecto
|
|||
segmentResults.add(createSegmentResult());
|
||||
}
|
||||
|
||||
groupFieldTermsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), groupField);
|
||||
facetFieldDocTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), facetField);
|
||||
groupFieldTermsIndex = DocValues.getSorted(context.reader(), groupField);
|
||||
facetFieldDocTermOrds = DocValues.getSortedSet(context.reader(), facetField);
|
||||
facetFieldNumTerms = (int) facetFieldDocTermOrds.getValueCount();
|
||||
if (facetFieldNumTerms == 0) {
|
||||
facetOrdTermsEnum = null;
|
||||
|
|
|
@ -21,9 +21,9 @@ import java.io.IOException;
|
|||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.grouping.AbstractSecondPassGroupingCollector;
|
||||
import org.apache.lucene.search.grouping.SearchGroup;
|
||||
|
@ -56,7 +56,7 @@ public class TermSecondPassGroupingCollector extends AbstractSecondPassGroupingC
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext readerContext) throws IOException {
|
||||
super.doSetNextReader(readerContext);
|
||||
index = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), groupField);
|
||||
index = DocValues.getSorted(readerContext.reader(), groupField);
|
||||
|
||||
// Rebuild ordSet
|
||||
ordSet.clear();
|
||||
|
|
|
@ -22,18 +22,20 @@ import org.apache.lucene.document.BinaryDocValuesField;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfo.DocValuesType;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.BytesRefFieldSource;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.QueryUtils;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -256,6 +258,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
docNoGroup.add(content);
|
||||
IntField id = new IntField("id", 0, Field.Store.NO);
|
||||
doc.add(id);
|
||||
NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
|
||||
docNoGroup.add(id);
|
||||
final GroupDoc[] groupDocs = new GroupDoc[numDocs];
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -291,6 +294,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
sort3.setStringValue(groupDoc.sort3.utf8ToString());
|
||||
content.setStringValue(groupDoc.content);
|
||||
id.setIntValue(groupDoc.id);
|
||||
idDV.setLongValue(groupDoc.id);
|
||||
if (groupDoc.group == null) {
|
||||
w.addDocument(docNoGroup);
|
||||
} else {
|
||||
|
@ -301,11 +305,10 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
final DirectoryReader r = w.getReader();
|
||||
w.shutdown();
|
||||
|
||||
// NOTE: intentional but temporary field cache insanity!
|
||||
final FieldCache.Ints docIdToFieldId = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
|
||||
final NumericDocValues docIdToFieldId = MultiDocValues.getNumericValues(r, "id");
|
||||
final int[] fieldIdToDocID = new int[numDocs];
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
int fieldId = docIdToFieldId.get(i);
|
||||
int fieldId = (int) docIdToFieldId.get(i);
|
||||
fieldIdToDocID[fieldId] = i;
|
||||
}
|
||||
|
||||
|
@ -315,7 +318,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
for (int contentID = 0; contentID < 3; contentID++) {
|
||||
final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real" + contentID)), numDocs).scoreDocs;
|
||||
for (ScoreDoc hit : hits) {
|
||||
final GroupDoc gd = groupDocs[docIdToFieldId.get(hit.doc)];
|
||||
final GroupDoc gd = groupDocs[(int) docIdToFieldId.get(hit.doc)];
|
||||
assertTrue(gd.score == 0.0);
|
||||
gd.score = hit.score;
|
||||
int docId = gd.id;
|
||||
|
@ -342,7 +345,7 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
|
|||
int[] actualGroupHeads = allGroupHeadsCollector.retrieveGroupHeads();
|
||||
// The actual group heads contains Lucene ids. Need to change them into our id value.
|
||||
for (int i = 0; i < actualGroupHeads.length; i++) {
|
||||
actualGroupHeads[i] = docIdToFieldId.get(actualGroupHeads[i]);
|
||||
actualGroupHeads[i] = (int) docIdToFieldId.get(actualGroupHeads[i]);
|
||||
}
|
||||
// Allows us the easily iterate and assert the actual and expected results.
|
||||
Arrays.sort(expectedGroupHeads);
|
||||
|
|
|
@ -21,6 +21,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.FieldInfo.IndexOptions;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
|
@ -670,15 +672,11 @@ public class TestGrouping extends LuceneTestCase {
|
|||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT,
|
||||
new MockAnalyzer(random())));
|
||||
boolean canUseIDV = true;
|
||||
|
||||
Document doc = new Document();
|
||||
Document docNoGroup = new Document();
|
||||
Field idvGroupField = new SortedDocValuesField("group_dv", new BytesRef());
|
||||
if (canUseIDV) {
|
||||
doc.add(idvGroupField);
|
||||
docNoGroup.add(idvGroupField);
|
||||
}
|
||||
doc.add(idvGroupField);
|
||||
docNoGroup.add(idvGroupField);
|
||||
|
||||
Field group = newStringField("group", "", Field.Store.NO);
|
||||
doc.add(group);
|
||||
|
@ -693,7 +691,10 @@ public class TestGrouping extends LuceneTestCase {
|
|||
docNoGroup.add(content);
|
||||
IntField id = new IntField("id", 0, Field.Store.NO);
|
||||
doc.add(id);
|
||||
NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
|
||||
doc.add(idDV);
|
||||
docNoGroup.add(id);
|
||||
docNoGroup.add(idDV);
|
||||
final GroupDoc[] groupDocs = new GroupDoc[numDocs];
|
||||
for(int i=0;i<numDocs;i++) {
|
||||
final BytesRef groupValue;
|
||||
|
@ -716,10 +717,9 @@ public class TestGrouping extends LuceneTestCase {
|
|||
groupDocs[i] = groupDoc;
|
||||
if (groupDoc.group != null) {
|
||||
group.setStringValue(groupDoc.group.utf8ToString());
|
||||
if (canUseIDV) {
|
||||
idvGroupField.setBytesValue(BytesRef.deepCopyOf(groupDoc.group));
|
||||
}
|
||||
} else if (canUseIDV) {
|
||||
idvGroupField.setBytesValue(BytesRef.deepCopyOf(groupDoc.group));
|
||||
} else {
|
||||
// TODO: not true
|
||||
// Must explicitly set empty string, else eg if
|
||||
// the segment has all docs missing the field then
|
||||
// we get null back instead of empty BytesRef:
|
||||
|
@ -729,6 +729,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
sort2.setStringValue(groupDoc.sort2.utf8ToString());
|
||||
content.setStringValue(groupDoc.content);
|
||||
id.setIntValue(groupDoc.id);
|
||||
idDV.setLongValue(groupDoc.id);
|
||||
if (groupDoc.group == null) {
|
||||
w.addDocument(docNoGroup);
|
||||
} else {
|
||||
|
@ -742,8 +743,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
final DirectoryReader r = w.getReader();
|
||||
w.shutdown();
|
||||
|
||||
// NOTE: intentional but temporary field cache insanity!
|
||||
final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(r), "id", false);
|
||||
final NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
|
||||
DirectoryReader rBlocks = null;
|
||||
Directory dirBlocks = null;
|
||||
|
||||
|
@ -753,17 +753,12 @@ public class TestGrouping extends LuceneTestCase {
|
|||
System.out.println("\nTEST: searcher=" + s);
|
||||
}
|
||||
|
||||
if (SlowCompositeReaderWrapper.class.isAssignableFrom(s.getIndexReader().getClass())) {
|
||||
canUseIDV = false;
|
||||
} else {
|
||||
canUseIDV = true;
|
||||
}
|
||||
final ShardState shards = new ShardState(s);
|
||||
|
||||
for(int contentID=0;contentID<3;contentID++) {
|
||||
final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
|
||||
for(ScoreDoc hit : hits) {
|
||||
final GroupDoc gd = groupDocs[docIDToID.get(hit.doc)];
|
||||
final GroupDoc gd = groupDocs[(int) docIDToID.get(hit.doc)];
|
||||
assertTrue(gd.score == 0.0);
|
||||
gd.score = hit.score;
|
||||
assertEquals(gd.id, docIDToID.get(hit.doc));
|
||||
|
@ -779,7 +774,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
dirBlocks = newDirectory();
|
||||
rBlocks = getDocBlockReader(dirBlocks, groupDocs);
|
||||
final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
|
||||
final FieldCache.Ints docIDToIDBlocks = FieldCache.DEFAULT.getInts(SlowCompositeReaderWrapper.wrap(rBlocks), "id", false);
|
||||
final NumericDocValues docIDToIDBlocks = MultiDocValues.getNumericValues(rBlocks, "id");
|
||||
|
||||
final IndexSearcher sBlocks = newSearcher(rBlocks);
|
||||
final ShardState shardsBlocks = new ShardState(sBlocks);
|
||||
|
@ -800,7 +795,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
//" dfnew=" + sBlocks.docFreq(new Term("content", "real"+contentID)));
|
||||
final ScoreDoc[] hits = sBlocks.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
|
||||
for(ScoreDoc hit : hits) {
|
||||
final GroupDoc gd = groupDocsByID[docIDToIDBlocks.get(hit.doc)];
|
||||
final GroupDoc gd = groupDocsByID[(int) docIDToIDBlocks.get(hit.doc)];
|
||||
assertTrue(gd.score2 == 0.0);
|
||||
gd.score2 = hit.score;
|
||||
assertEquals(gd.id, docIDToIDBlocks.get(hit.doc));
|
||||
|
@ -854,10 +849,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
System.out.println("TEST: groupSort=" + groupSort + " docSort=" + docSort + " searchTerm=" + searchTerm + " dF=" + r.docFreq(new Term("content", searchTerm)) +" dFBlock=" + rBlocks.docFreq(new Term("content", searchTerm)) + " topNGroups=" + topNGroups + " groupOffset=" + groupOffset + " docOffset=" + docOffset + " doCache=" + doCache + " docsPerGroup=" + docsPerGroup + " doAllGroups=" + doAllGroups + " getScores=" + getScores + " getMaxScores=" + getMaxScores);
|
||||
}
|
||||
|
||||
String groupField = "group";
|
||||
if (canUseIDV && random().nextBoolean()) {
|
||||
groupField += "_dv";
|
||||
}
|
||||
String groupField = "group_dv";
|
||||
if (VERBOSE) {
|
||||
System.out.println(" groupField=" + groupField);
|
||||
}
|
||||
|
@ -940,7 +932,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
|
||||
ValueHolder<Boolean> idvBasedImplsUsedSharded = new ValueHolder<>(false);
|
||||
final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, query, groupSort, docSort,
|
||||
groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, canUseIDV, false, idvBasedImplsUsedSharded);
|
||||
groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores, true, false, idvBasedImplsUsedSharded);
|
||||
final AbstractSecondPassGroupingCollector<?> c2;
|
||||
if (topGroups != null) {
|
||||
|
||||
|
@ -1257,7 +1249,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void assertEquals(FieldCache.Ints docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
|
||||
private void assertEquals(NumericDocValues docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
|
||||
if (expected == null) {
|
||||
assertNull(actual);
|
||||
return;
|
||||
|
|
|
@ -21,10 +21,10 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SimpleCollector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -85,7 +85,7 @@ abstract class TermsCollector extends SimpleCollector {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
|
||||
docTermOrds = DocValues.getSortedSet(context.reader(), field);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,7 +107,7 @@ abstract class TermsCollector extends SimpleCollector {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
|
||||
fromDocTerms = DocValues.getBinary(context.reader(), field);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,10 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.SimpleCollector;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
|
@ -131,7 +131,7 @@ abstract class TermsWithScoreCollector extends SimpleCollector {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
fromDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, false);
|
||||
fromDocTerms = DocValues.getBinary(context.reader(), field);
|
||||
}
|
||||
|
||||
static class Avg extends SV {
|
||||
|
@ -217,7 +217,7 @@ abstract class TermsWithScoreCollector extends SimpleCollector {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
fromDocTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
|
||||
fromDocTermOrds = DocValues.getSortedSet(context.reader(), field);
|
||||
}
|
||||
|
||||
static class Avg extends MV {
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.lucene.search.join;
|
|||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
|
@ -58,14 +59,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
List<Document> docs = new ArrayList<>();
|
||||
Document document = new Document();
|
||||
document.add(new StringField("field2", "a", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("a")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "b", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("b")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "c", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("c")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -78,14 +82,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "c", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("c")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "d", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("d")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "e", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("e")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -97,14 +104,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "e", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("e")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "f", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("f")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "g", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("g")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -116,14 +126,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "g", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("g")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "h", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("h")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "i", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("i")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -136,14 +149,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "i", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("i")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "j", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("j")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "k", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("k")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -155,14 +171,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "k", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("k")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "l", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("l")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "m", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("m")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
@ -180,14 +199,17 @@ public class TestBlockJoinSorting extends LuceneTestCase {
|
|||
docs.clear();
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "m", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("m")));
|
||||
document.add(new StringField("filter_1", "T", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "n", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("n")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
document.add(new StringField("field2", "o", Field.Store.NO));
|
||||
document.add(new SortedDocValuesField("field2", new BytesRef("o")));
|
||||
document.add(new StringField("filter_1", "F", Field.Store.NO));
|
||||
docs.add(document);
|
||||
document = new Document();
|
||||
|
|
|
@ -34,10 +34,13 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocsEnum;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
|
@ -53,7 +56,6 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -68,9 +70,11 @@ import org.apache.lucene.util.Bits;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.Test;
|
||||
|
||||
@SuppressCodecs({"Lucene40", "Lucene41", "Lucene42"}) // we need SortedSet, docsWithField
|
||||
public class TestJoinUtil extends LuceneTestCase {
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
|
@ -89,20 +93,25 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc.add(new TextField("description", "random text", Field.Store.NO));
|
||||
doc.add(new TextField("name", "name1", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 1
|
||||
doc = new Document();
|
||||
doc.add(new TextField("price", "10.0", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "2", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
|
||||
doc.add(new TextField(toField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 2
|
||||
doc = new Document();
|
||||
doc.add(new TextField("price", "20.0", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "3", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
|
||||
doc.add(new TextField(toField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 3
|
||||
|
@ -110,6 +119,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc.add(new TextField("description", "more random text", Field.Store.NO));
|
||||
doc.add(new TextField("name", "name2", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
||||
|
@ -117,14 +127,18 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc = new Document();
|
||||
doc.add(new TextField("price", "10.0", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "5", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
|
||||
doc.add(new TextField(toField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 5
|
||||
doc = new Document();
|
||||
doc.add(new TextField("price", "20.0", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "6", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
|
||||
doc.add(new TextField(toField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
|
||||
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
|
||||
|
@ -180,16 +194,18 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc.add(new TextField("description", "random text", Field.Store.NO));
|
||||
doc.add(new TextField("name", "name1", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "0", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("0")));
|
||||
w.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new TextField("price", "10.0", Field.Store.NO));
|
||||
for(int i=0;i<300;i++){
|
||||
doc.add(new TextField(toField, ""+i, Field.Store.NO));
|
||||
if(!multipleValues){
|
||||
w.addDocument(doc);
|
||||
doc.removeFields(toField);
|
||||
|
||||
if (multipleValues) {
|
||||
for(int i=0;i<300;i++) {
|
||||
doc.add(new SortedSetDocValuesField(toField, new BytesRef(""+i)));
|
||||
}
|
||||
} else {
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("0")));
|
||||
}
|
||||
w.addDocument(doc);
|
||||
|
||||
|
@ -317,20 +333,25 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc.add(new TextField("description", "A random movie", Field.Store.NO));
|
||||
doc.add(new TextField("name", "Movie 1", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 1
|
||||
doc = new Document();
|
||||
doc.add(new TextField("subtitle", "The first subtitle of this movie", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "2", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("2")));
|
||||
doc.add(new TextField(toField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 2
|
||||
doc = new Document();
|
||||
doc.add(new TextField("subtitle", "random subtitle; random event movie", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "3", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("3")));
|
||||
doc.add(new TextField(toField, "1", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("1")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 3
|
||||
|
@ -338,6 +359,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc.add(new TextField("description", "A second random movie", Field.Store.NO));
|
||||
doc.add(new TextField("name", "Movie 2", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
w.commit();
|
||||
|
||||
|
@ -345,14 +367,18 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
doc = new Document();
|
||||
doc.add(new TextField("subtitle", "a very random event happened during christmas night", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "5", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("5")));
|
||||
doc.add(new TextField(toField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
|
||||
// 5
|
||||
doc = new Document();
|
||||
doc.add(new TextField("subtitle", "movie end movie test 123 test 123 random", Field.Store.NO));
|
||||
doc.add(new TextField(idField, "6", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(idField, new BytesRef("6")));
|
||||
doc.add(new TextField(toField, "4", Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField(toField, new BytesRef("4")));
|
||||
w.addDocument(doc);
|
||||
|
||||
IndexSearcher indexSearcher = new IndexSearcher(w.getReader());
|
||||
|
@ -572,6 +598,11 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
context.fromDocuments.get(linkValue).add(docs[i]);
|
||||
context.randomValueFromDocs.get(value).add(docs[i]);
|
||||
document.add(newTextField(random(), "from", linkValue, Field.Store.NO));
|
||||
if (multipleValuesPerDocument) {
|
||||
document.add(new SortedSetDocValuesField("from", new BytesRef(linkValue)));
|
||||
} else {
|
||||
document.add(new SortedDocValuesField("from", new BytesRef(linkValue)));
|
||||
}
|
||||
} else {
|
||||
if (!context.toDocuments.containsKey(linkValue)) {
|
||||
context.toDocuments.put(linkValue, new ArrayList<RandomDoc>());
|
||||
|
@ -583,6 +614,11 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
context.toDocuments.get(linkValue).add(docs[i]);
|
||||
context.randomValueToDocs.get(value).add(docs[i]);
|
||||
document.add(newTextField(random(), "to", linkValue, Field.Store.NO));
|
||||
if (multipleValuesPerDocument) {
|
||||
document.add(new SortedSetDocValuesField("to", new BytesRef(linkValue)));
|
||||
} else {
|
||||
document.add(new SortedDocValuesField("to", new BytesRef(linkValue)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -644,7 +680,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), fromField);
|
||||
docTermOrds = DocValues.getSortedSet(context.reader(), fromField);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -682,8 +718,8 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
terms = FieldCache.DEFAULT.getTerms(context.reader(), fromField, true);
|
||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), fromField);
|
||||
terms = DocValues.getBinary(context.reader(), fromField);
|
||||
docsWithField = DocValues.getDocsWithField(context.reader(), fromField);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -753,7 +789,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
docBase = context.docBase;
|
||||
docTermOrds = FieldCache.DEFAULT.getDocTermOrds(context.reader(), toField);
|
||||
docTermOrds = DocValues.getSortedSet(context.reader(), toField);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -781,7 +817,7 @@ public class TestJoinUtil extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
protected void doSetNextReader(AtomicReaderContext context) throws IOException {
|
||||
terms = FieldCache.DEFAULT.getTerms(context.reader(), toField, false);
|
||||
terms = DocValues.getBinary(context.reader(), toField);
|
||||
docBase = context.docBase;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.search;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -30,6 +30,7 @@ import org.apache.lucene.index.AtomicReader;
|
|||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocTermOrds;
|
||||
import org.apache.lucene.index.IndexReader; // javadocs
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
|
@ -45,67 +46,11 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
* <p>Created: May 19, 2004 11:13:14 AM
|
||||
*
|
||||
* @since lucene 1.4
|
||||
* @see org.apache.lucene.util.FieldCacheSanityChecker
|
||||
* @see FieldCacheSanityChecker
|
||||
*
|
||||
* @lucene.internal
|
||||
*/
|
||||
public interface FieldCache {
|
||||
|
||||
/** Field values as 32-bit signed integers */
|
||||
public static abstract class Ints {
|
||||
/** Return an integer representation of this field's value. */
|
||||
public abstract int get(int docID);
|
||||
|
||||
/** Zero value for every document */
|
||||
public static final Ints EMPTY = new Ints() {
|
||||
@Override
|
||||
public int get(int docID) {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/** Field values as 64-bit signed long integers */
|
||||
public static abstract class Longs {
|
||||
/** Return an long representation of this field's value. */
|
||||
public abstract long get(int docID);
|
||||
|
||||
/** Zero value for every document */
|
||||
public static final Longs EMPTY = new Longs() {
|
||||
@Override
|
||||
public long get(int docID) {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/** Field values as 32-bit floats */
|
||||
public static abstract class Floats {
|
||||
/** Return an float representation of this field's value. */
|
||||
public abstract float get(int docID);
|
||||
|
||||
/** Zero value for every document */
|
||||
public static final Floats EMPTY = new Floats() {
|
||||
@Override
|
||||
public float get(int docID) {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/** Field values as 64-bit doubles */
|
||||
public static abstract class Doubles {
|
||||
/** Return an double representation of this field's value. */
|
||||
public abstract double get(int docID);
|
||||
|
||||
/** Zero value for every document */
|
||||
public static final Doubles EMPTY = new Doubles() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
interface FieldCache {
|
||||
|
||||
/**
|
||||
* Placeholder indicating creation of this cache is currently in-progress.
|
||||
|
@ -115,9 +60,7 @@ public interface FieldCache {
|
|||
}
|
||||
|
||||
/**
|
||||
* Marker interface as super-interface to all parsers. It
|
||||
* is used to specify a custom parser to {@link
|
||||
* SortField#SortField(String, FieldCache.Parser)}.
|
||||
* interface to all parsers. It is used to parse different numeric types.
|
||||
*/
|
||||
public interface Parser {
|
||||
|
||||
|
@ -130,38 +73,9 @@ public interface FieldCache {
|
|||
* @throws IOException if an {@link IOException} occurs
|
||||
*/
|
||||
public TermsEnum termsEnum(Terms terms) throws IOException;
|
||||
}
|
||||
|
||||
/** Interface to parse ints from document fields.
|
||||
* @see FieldCache#getInts(AtomicReader, String, FieldCache.IntParser, boolean)
|
||||
*/
|
||||
public interface IntParser extends Parser {
|
||||
/** Return an integer representation of this field's value. */
|
||||
public int parseInt(BytesRef term);
|
||||
}
|
||||
|
||||
/** Interface to parse floats from document fields.
|
||||
* @see FieldCache#getFloats(AtomicReader, String, FieldCache.FloatParser, boolean)
|
||||
*/
|
||||
public interface FloatParser extends Parser {
|
||||
/** Return an float representation of this field's value. */
|
||||
public float parseFloat(BytesRef term);
|
||||
}
|
||||
|
||||
/** Interface to parse long from document fields.
|
||||
* @see FieldCache#getLongs(AtomicReader, String, FieldCache.LongParser, boolean)
|
||||
*/
|
||||
public interface LongParser extends Parser {
|
||||
/** Return an long representation of this field's value. */
|
||||
public long parseLong(BytesRef term);
|
||||
}
|
||||
|
||||
/** Interface to parse doubles from document fields.
|
||||
* @see FieldCache#getDoubles(AtomicReader, String, FieldCache.DoubleParser, boolean)
|
||||
*/
|
||||
public interface DoubleParser extends Parser {
|
||||
/** Return an double representation of this field's value. */
|
||||
public double parseDouble(BytesRef term);
|
||||
|
||||
/** Parse's this field's value */
|
||||
public long parseValue(BytesRef term);
|
||||
}
|
||||
|
||||
/** Expert: The cache used internally by sorting and range query classes. */
|
||||
|
@ -171,9 +85,9 @@ public interface FieldCache {
|
|||
* A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
|
||||
* via {@link IntField}/{@link NumericTokenStream}.
|
||||
*/
|
||||
public static final IntParser NUMERIC_UTILS_INT_PARSER=new IntParser(){
|
||||
public static final Parser NUMERIC_UTILS_INT_PARSER = new Parser() {
|
||||
@Override
|
||||
public int parseInt(BytesRef term) {
|
||||
public long parseValue(BytesRef term) {
|
||||
return NumericUtils.prefixCodedToInt(term);
|
||||
}
|
||||
|
||||
|
@ -192,11 +106,14 @@ public interface FieldCache {
|
|||
* A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
|
||||
* via {@link FloatField}/{@link NumericTokenStream}.
|
||||
*/
|
||||
public static final FloatParser NUMERIC_UTILS_FLOAT_PARSER=new FloatParser(){
|
||||
public static final Parser NUMERIC_UTILS_FLOAT_PARSER = new Parser() {
|
||||
@Override
|
||||
public float parseFloat(BytesRef term) {
|
||||
return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term));
|
||||
public long parseValue(BytesRef term) {
|
||||
int val = NumericUtils.prefixCodedToInt(term);
|
||||
if (val<0) val ^= 0x7fffffff;
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return FieldCache.class.getName()+".NUMERIC_UTILS_FLOAT_PARSER";
|
||||
|
@ -212,9 +129,9 @@ public interface FieldCache {
|
|||
* A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
|
||||
* via {@link LongField}/{@link NumericTokenStream}.
|
||||
*/
|
||||
public static final LongParser NUMERIC_UTILS_LONG_PARSER = new LongParser(){
|
||||
public static final Parser NUMERIC_UTILS_LONG_PARSER = new Parser() {
|
||||
@Override
|
||||
public long parseLong(BytesRef term) {
|
||||
public long parseValue(BytesRef term) {
|
||||
return NumericUtils.prefixCodedToLong(term);
|
||||
}
|
||||
@Override
|
||||
|
@ -232,10 +149,12 @@ public interface FieldCache {
|
|||
* A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
|
||||
* via {@link DoubleField}/{@link NumericTokenStream}.
|
||||
*/
|
||||
public static final DoubleParser NUMERIC_UTILS_DOUBLE_PARSER = new DoubleParser(){
|
||||
public static final Parser NUMERIC_UTILS_DOUBLE_PARSER = new Parser() {
|
||||
@Override
|
||||
public double parseDouble(BytesRef term) {
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term));
|
||||
public long parseValue(BytesRef term) {
|
||||
long val = NumericUtils.prefixCodedToLong(term);
|
||||
if (val<0) val ^= 0x7fffffffffffffffL;
|
||||
return val;
|
||||
}
|
||||
@Override
|
||||
public String toString() {
|
||||
|
@ -256,83 +175,7 @@ public interface FieldCache {
|
|||
public Bits getDocsWithField(AtomicReader reader, String field) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns an {@link Ints} over the values found in documents in the given
|
||||
* field.
|
||||
*
|
||||
* @see #getInts(AtomicReader, String, IntParser, boolean)
|
||||
*/
|
||||
public Ints getInts(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns an {@link Ints} over the values found in documents in the given
|
||||
* field. If the field was indexed as {@link NumericDocValuesField}, it simply
|
||||
* uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
|
||||
* Otherwise, it checks the internal cache for an appropriate entry, and if
|
||||
* none is found, reads the terms in <code>field</code> as ints and returns
|
||||
* an array of size <code>reader.maxDoc()</code> of the value each document
|
||||
* has in the given field.
|
||||
*
|
||||
* @param reader
|
||||
* Used to get field values.
|
||||
* @param field
|
||||
* Which field contains the longs.
|
||||
* @param parser
|
||||
* Computes int for string values. May be {@code null} if the
|
||||
* requested field was indexed as {@link NumericDocValuesField} or
|
||||
* {@link IntField}.
|
||||
* @param setDocsWithField
|
||||
* If true then {@link #getDocsWithField} will also be computed and
|
||||
* stored in the FieldCache.
|
||||
* @return The values in the given field for each document.
|
||||
* @throws IOException
|
||||
* If any error occurs.
|
||||
*/
|
||||
public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Floats} over the values found in documents in the given
|
||||
* field.
|
||||
*
|
||||
* @see #getFloats(AtomicReader, String, FloatParser, boolean)
|
||||
*/
|
||||
public Floats getFloats(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Floats} over the values found in documents in the given
|
||||
* field. If the field was indexed as {@link NumericDocValuesField}, it simply
|
||||
* uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
|
||||
* Otherwise, it checks the internal cache for an appropriate entry, and if
|
||||
* none is found, reads the terms in <code>field</code> as floats and returns
|
||||
* an array of size <code>reader.maxDoc()</code> of the value each document
|
||||
* has in the given field.
|
||||
*
|
||||
* @param reader
|
||||
* Used to get field values.
|
||||
* @param field
|
||||
* Which field contains the floats.
|
||||
* @param parser
|
||||
* Computes float for string values. May be {@code null} if the
|
||||
* requested field was indexed as {@link NumericDocValuesField} or
|
||||
* {@link FloatField}.
|
||||
* @param setDocsWithField
|
||||
* If true then {@link #getDocsWithField} will also be computed and
|
||||
* stored in the FieldCache.
|
||||
* @return The values in the given field for each document.
|
||||
* @throws IOException
|
||||
* If any error occurs.
|
||||
*/
|
||||
public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Longs} over the values found in documents in the given
|
||||
* field.
|
||||
*
|
||||
* @see #getLongs(AtomicReader, String, LongParser, boolean)
|
||||
*/
|
||||
public Longs getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Longs} over the values found in documents in the given
|
||||
* Returns a {@link NumericDocValues} over the values found in documents in the given
|
||||
* field. If the field was indexed as {@link NumericDocValuesField}, it simply
|
||||
* uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
|
||||
* Otherwise, it checks the internal cache for an appropriate entry, and if
|
||||
|
@ -355,41 +198,7 @@ public interface FieldCache {
|
|||
* @throws IOException
|
||||
* If any error occurs.
|
||||
*/
|
||||
public Longs getLongs(AtomicReader reader, String field, LongParser parser, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Doubles} over the values found in documents in the given
|
||||
* field.
|
||||
*
|
||||
* @see #getDoubles(AtomicReader, String, DoubleParser, boolean)
|
||||
*/
|
||||
public Doubles getDoubles(AtomicReader reader, String field, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/**
|
||||
* Returns a {@link Doubles} over the values found in documents in the given
|
||||
* field. If the field was indexed as {@link NumericDocValuesField}, it simply
|
||||
* uses {@link AtomicReader#getNumericDocValues(String)} to read the values.
|
||||
* Otherwise, it checks the internal cache for an appropriate entry, and if
|
||||
* none is found, reads the terms in <code>field</code> as doubles and returns
|
||||
* an array of size <code>reader.maxDoc()</code> of the value each document
|
||||
* has in the given field.
|
||||
*
|
||||
* @param reader
|
||||
* Used to get field values.
|
||||
* @param field
|
||||
* Which field contains the longs.
|
||||
* @param parser
|
||||
* Computes double for string values. May be {@code null} if the
|
||||
* requested field was indexed as {@link NumericDocValuesField} or
|
||||
* {@link DoubleField}.
|
||||
* @param setDocsWithField
|
||||
* If true then {@link #getDocsWithField} will also be computed and
|
||||
* stored in the FieldCache.
|
||||
* @return The values in the given field for each document.
|
||||
* @throws IOException
|
||||
* If any error occurs.
|
||||
*/
|
||||
public Doubles getDoubles(AtomicReader reader, String field, DoubleParser parser, boolean setDocsWithField) throws IOException;
|
||||
public NumericDocValues getNumerics(AtomicReader reader, String field, Parser parser, boolean setDocsWithField) throws IOException;
|
||||
|
||||
/** Checks the internal cache for an appropriate entry, and if none
|
||||
* is found, reads the term values in <code>field</code>
|
||||
|
@ -562,7 +371,7 @@ public interface FieldCache {
|
|||
/**
|
||||
* If non-null, FieldCacheImpl will warn whenever
|
||||
* entries are created that are not sane according to
|
||||
* {@link org.apache.lucene.util.FieldCacheSanityChecker}.
|
||||
* {@link FieldCacheSanityChecker}.
|
||||
*/
|
||||
public void setInfoStream(PrintStream stream);
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.search;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -38,9 +38,9 @@ import org.apache.lucene.index.SortedDocValues;
|
|||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FieldCacheSanityChecker;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.PagedBytes;
|
||||
import org.apache.lucene.util.packed.GrowableWriter;
|
||||
|
@ -61,11 +61,8 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
|
||||
private synchronized void init() {
|
||||
caches = new HashMap<>(9);
|
||||
caches.put(Integer.TYPE, new IntCache(this));
|
||||
caches.put(Float.TYPE, new FloatCache(this));
|
||||
caches = new HashMap<>(6);
|
||||
caches.put(Long.TYPE, new LongCache(this));
|
||||
caches.put(Double.TYPE, new DoubleCache(this));
|
||||
caches.put(BinaryDocValues.class, new BinaryDocValuesCache(this));
|
||||
caches.put(SortedDocValues.class, new SortedDocValuesCache(this));
|
||||
caches.put(DocTermOrds.class, new DocTermOrdsCache(this));
|
||||
|
@ -352,54 +349,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
caches.get(DocsWithFieldCache.class).put(reader, new CacheKey(field, null), bits);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Ints getInts (AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
return getInts(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Ints getInts(AtomicReader reader, String field, IntParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Ints() {
|
||||
@Override
|
||||
public int get(int docID) {
|
||||
return (int) valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
|
||||
if (info == null) {
|
||||
return Ints.EMPTY;
|
||||
} else if (info.hasDocValues()) {
|
||||
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
|
||||
} else if (!info.isIndexed()) {
|
||||
return Ints.EMPTY;
|
||||
}
|
||||
return (Ints) caches.get(Integer.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
static class IntsFromArray extends Ints {
|
||||
private final PackedInts.Reader values;
|
||||
private final int minValue;
|
||||
|
||||
public IntsFromArray(PackedInts.Reader values, int minValue) {
|
||||
assert values.getBitsPerValue() <= 32;
|
||||
this.values = values;
|
||||
this.minValue = minValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int get(int docID) {
|
||||
final long delta = values.get(docID);
|
||||
return minValue + (int) delta;
|
||||
}
|
||||
}
|
||||
|
||||
private static class HoldsOneThing<T> {
|
||||
private T it;
|
||||
|
||||
|
@ -421,79 +370,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
public long minValue;
|
||||
}
|
||||
|
||||
static final class IntCache extends Cache {
|
||||
IntCache(FieldCacheImpl wrapper) {
|
||||
super(wrapper);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final IntParser parser = (IntParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = NUMERIC_UTILS_INT_PARSER) so
|
||||
// cache key includes NUMERIC_UTILS_INT_PARSER:
|
||||
return wrapper.getInts(reader, key.field, NUMERIC_UTILS_INT_PARSER, setDocsWithField);
|
||||
}
|
||||
|
||||
final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<>();
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private int minValue;
|
||||
private int currentValue;
|
||||
private GrowableWriter values;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = parser.parseInt(term);
|
||||
if (values == null) {
|
||||
// Lazy alloc so for the numeric field case
|
||||
// (which will hit a NumberFormatException
|
||||
// when we first try the DEFAULT_INT_PARSER),
|
||||
// we don't double-alloc:
|
||||
int startBitsPerValue;
|
||||
// Make sure than missing values (0) can be stored without resizing
|
||||
if (currentValue < 0) {
|
||||
minValue = currentValue;
|
||||
startBitsPerValue = PackedInts.bitsRequired((-minValue) & 0xFFFFFFFFL);
|
||||
} else {
|
||||
minValue = 0;
|
||||
startBitsPerValue = PackedInts.bitsRequired(currentValue);
|
||||
}
|
||||
values = new GrowableWriter(startBitsPerValue, reader.maxDoc(), PackedInts.FAST);
|
||||
if (minValue != 0) {
|
||||
values.fill(0, values.size(), (-minValue) & 0xFFFFFFFFL); // default value must be 0
|
||||
}
|
||||
valuesRef.set(new GrowableWriterAndMinValue(values, minValue));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visitDoc(int docID) {
|
||||
values.set(docID, (currentValue - minValue) & 0xFFFFFFFFL);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TermsEnum termsEnum(Terms terms) throws IOException {
|
||||
return parser.termsEnum(terms);
|
||||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
GrowableWriterAndMinValue values = valuesRef.get();
|
||||
if (values == null) {
|
||||
return new IntsFromArray(new PackedInts.NullReader(reader.maxDoc()), 0);
|
||||
}
|
||||
return new IntsFromArray(values.writer.getMutable(), (int) values.minValue);
|
||||
}
|
||||
}
|
||||
|
||||
public Bits getDocsWithField(AtomicReader reader, String field) throws IOException {
|
||||
final FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
|
||||
if (fieldInfo == null) {
|
||||
|
@ -563,145 +439,31 @@ class FieldCacheImpl implements FieldCache {
|
|||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Floats getFloats (AtomicReader reader, String field, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return getFloats(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Floats getFloats(AtomicReader reader, String field, FloatParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Floats() {
|
||||
@Override
|
||||
public float get(int docID) {
|
||||
return Float.intBitsToFloat((int) valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
|
||||
if (info == null) {
|
||||
return Floats.EMPTY;
|
||||
} else if (info.hasDocValues()) {
|
||||
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
|
||||
} else if (!info.isIndexed()) {
|
||||
return Floats.EMPTY;
|
||||
}
|
||||
return (Floats) caches.get(Float.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
static class FloatsFromArray extends Floats {
|
||||
private final float[] values;
|
||||
|
||||
public FloatsFromArray(float[] values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float get(int docID) {
|
||||
return values[docID];
|
||||
}
|
||||
}
|
||||
|
||||
static final class FloatCache extends Cache {
|
||||
FloatCache(FieldCacheImpl wrapper) {
|
||||
super(wrapper);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final FloatParser parser = (FloatParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = NUMERIC_UTILS_FLOAT_PARSER) so
|
||||
// cache key includes NUMERIC_UTILS_FLOAT_PARSER:
|
||||
return wrapper.getFloats(reader, key.field, NUMERIC_UTILS_FLOAT_PARSER, setDocsWithField);
|
||||
}
|
||||
|
||||
final HoldsOneThing<float[]> valuesRef = new HoldsOneThing<>();
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private float currentValue;
|
||||
private float[] values;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = parser.parseFloat(term);
|
||||
if (values == null) {
|
||||
// Lazy alloc so for the numeric field case
|
||||
// (which will hit a NumberFormatException
|
||||
// when we first try the DEFAULT_INT_PARSER),
|
||||
// we don't double-alloc:
|
||||
values = new float[reader.maxDoc()];
|
||||
valuesRef.set(values);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visitDoc(int docID) {
|
||||
values[docID] = currentValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TermsEnum termsEnum(Terms terms) throws IOException {
|
||||
return parser.termsEnum(terms);
|
||||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
|
||||
float[] values = valuesRef.get();
|
||||
if (values == null) {
|
||||
values = new float[reader.maxDoc()];
|
||||
}
|
||||
return new FloatsFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Longs getLongs(AtomicReader reader, String field, boolean setDocsWithField) throws IOException {
|
||||
return getLongs(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Longs getLongs(AtomicReader reader, String field, FieldCache.LongParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
public NumericDocValues getNumerics(AtomicReader reader, String field, Parser parser, boolean setDocsWithField) throws IOException {
|
||||
if (parser == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Longs() {
|
||||
@Override
|
||||
public long get(int docID) {
|
||||
return valuesIn.get(docID);
|
||||
}
|
||||
};
|
||||
return valuesIn;
|
||||
} else {
|
||||
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
|
||||
if (info == null) {
|
||||
return Longs.EMPTY;
|
||||
return DocValues.EMPTY_NUMERIC;
|
||||
} else if (info.hasDocValues()) {
|
||||
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
|
||||
} else if (!info.isIndexed()) {
|
||||
return Longs.EMPTY;
|
||||
return DocValues.EMPTY_NUMERIC;
|
||||
}
|
||||
return (Longs) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
return (NumericDocValues) caches.get(Long.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
static class LongsFromArray extends Longs {
|
||||
static class LongsFromArray extends NumericDocValues {
|
||||
private final PackedInts.Reader values;
|
||||
private final long minValue;
|
||||
|
||||
|
@ -725,13 +487,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final LongParser parser = (LongParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = NUMERIC_UTILS_LONG_PARSER) so
|
||||
// cache key includes NUMERIC_UTILS_LONG_PARSER:
|
||||
return wrapper.getLongs(reader, key.field, NUMERIC_UTILS_LONG_PARSER, setDocsWithField);
|
||||
}
|
||||
final Parser parser = (Parser) key.custom;
|
||||
|
||||
final HoldsOneThing<GrowableWriterAndMinValue> valuesRef = new HoldsOneThing<>();
|
||||
|
||||
|
@ -742,7 +498,7 @@ class FieldCacheImpl implements FieldCache {
|
|||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = parser.parseLong(term);
|
||||
currentValue = parser.parseValue(term);
|
||||
if (values == null) {
|
||||
// Lazy alloc so for the numeric field case
|
||||
// (which will hit a NumberFormatException
|
||||
|
@ -789,111 +545,6 @@ class FieldCacheImpl implements FieldCache {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Doubles getDoubles(AtomicReader reader, String field, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
return getDoubles(reader, field, null, setDocsWithField);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Doubles getDoubles(AtomicReader reader, String field, FieldCache.DoubleParser parser, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
final NumericDocValues valuesIn = reader.getNumericDocValues(field);
|
||||
if (valuesIn != null) {
|
||||
// Not cached here by FieldCacheImpl (cached instead
|
||||
// per-thread by SegmentReader):
|
||||
return new Doubles() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
return Double.longBitsToDouble(valuesIn.get(docID));
|
||||
}
|
||||
};
|
||||
} else {
|
||||
final FieldInfo info = reader.getFieldInfos().fieldInfo(field);
|
||||
if (info == null) {
|
||||
return Doubles.EMPTY;
|
||||
} else if (info.hasDocValues()) {
|
||||
throw new IllegalStateException("Type mismatch: " + field + " was indexed as " + info.getDocValuesType());
|
||||
} else if (!info.isIndexed()) {
|
||||
return Doubles.EMPTY;
|
||||
}
|
||||
return (Doubles) caches.get(Double.TYPE).get(reader, new CacheKey(field, parser), setDocsWithField);
|
||||
}
|
||||
}
|
||||
|
||||
static class DoublesFromArray extends Doubles {
|
||||
private final double[] values;
|
||||
|
||||
public DoublesFromArray(double[] values) {
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
return values[docID];
|
||||
}
|
||||
}
|
||||
|
||||
static final class DoubleCache extends Cache {
|
||||
DoubleCache(FieldCacheImpl wrapper) {
|
||||
super(wrapper);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object createValue(final AtomicReader reader, CacheKey key, boolean setDocsWithField)
|
||||
throws IOException {
|
||||
|
||||
final DoubleParser parser = (DoubleParser) key.custom;
|
||||
if (parser == null) {
|
||||
// Confusing: must delegate to wrapper (vs simply
|
||||
// setting parser = NUMERIC_UTILS_DOUBLE_PARSER) so
|
||||
// cache key includes NUMERIC_UTILS_DOUBLE_PARSER:
|
||||
return wrapper.getDoubles(reader, key.field, NUMERIC_UTILS_DOUBLE_PARSER, setDocsWithField);
|
||||
}
|
||||
|
||||
final HoldsOneThing<double[]> valuesRef = new HoldsOneThing<>();
|
||||
|
||||
Uninvert u = new Uninvert() {
|
||||
private double currentValue;
|
||||
private double[] values;
|
||||
|
||||
@Override
|
||||
public void visitTerm(BytesRef term) {
|
||||
currentValue = parser.parseDouble(term);
|
||||
if (values == null) {
|
||||
// Lazy alloc so for the numeric field case
|
||||
// (which will hit a NumberFormatException
|
||||
// when we first try the DEFAULT_INT_PARSER),
|
||||
// we don't double-alloc:
|
||||
values = new double[reader.maxDoc()];
|
||||
valuesRef.set(values);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void visitDoc(int docID) {
|
||||
values[docID] = currentValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TermsEnum termsEnum(Terms terms) throws IOException {
|
||||
return parser.termsEnum(terms);
|
||||
}
|
||||
};
|
||||
|
||||
u.uninvert(reader, key.field, setDocsWithField);
|
||||
|
||||
if (setDocsWithField) {
|
||||
wrapper.setDocsWithField(reader, key.field, u.docsWithField);
|
||||
}
|
||||
double[] values = valuesRef.get();
|
||||
if (values == null) {
|
||||
values = new double[reader.maxDoc()];
|
||||
}
|
||||
return new DoublesFromArray(values);
|
||||
}
|
||||
}
|
||||
|
||||
public static class SortedDocValuesImpl extends SortedDocValues {
|
||||
private final PagedBytes.Reader bytes;
|
||||
private final MonotonicAppendingLongBuffer termOrdToBytesOffset;
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.util;
|
||||
package org.apache.lucene.uninverting;
|
||||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
*
|
||||
|
@ -23,12 +23,12 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.index.CompositeReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.FieldCache.CacheEntry;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.uninverting.FieldCache.CacheEntry;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.MapOfSets;
|
||||
|
||||
/**
|
||||
* Provides methods for sanity checking that entries in the FieldCache
|
||||
|
@ -52,7 +52,7 @@ import org.apache.lucene.store.AlreadyClosedException;
|
|||
* @see FieldCacheSanityChecker.Insanity
|
||||
* @see FieldCacheSanityChecker.InsanityType
|
||||
*/
|
||||
public final class FieldCacheSanityChecker {
|
||||
final class FieldCacheSanityChecker {
|
||||
|
||||
private boolean estimateRam;
|
||||
|
|
@ -0,0 +1,179 @@
|
|||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.FilterAtomicReader;
|
||||
import org.apache.lucene.index.FilterDirectoryReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.Bits;
|
||||
|
||||
public class UninvertingReader extends FilterAtomicReader {
|
||||
|
||||
public static enum Type {
|
||||
INTEGER,
|
||||
LONG,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
BINARY,
|
||||
SORTED,
|
||||
SORTED_SET
|
||||
}
|
||||
|
||||
public static DirectoryReader wrap(DirectoryReader in, final Map<String,Type> mapping) {
|
||||
return new UninvertingDirectoryReader(in, mapping);
|
||||
}
|
||||
|
||||
static class UninvertingDirectoryReader extends FilterDirectoryReader {
|
||||
final Map<String,Type> mapping;
|
||||
|
||||
public UninvertingDirectoryReader(DirectoryReader in, final Map<String,Type> mapping) {
|
||||
super(in, new FilterDirectoryReader.SubReaderWrapper() {
|
||||
@Override
|
||||
public AtomicReader wrap(AtomicReader reader) {
|
||||
return new UninvertingReader(reader, mapping);
|
||||
}
|
||||
});
|
||||
this.mapping = mapping;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) {
|
||||
return new UninvertingDirectoryReader(in, mapping);
|
||||
}
|
||||
}
|
||||
|
||||
final Map<String,Type> mapping;
|
||||
final FieldInfos fieldInfos;
|
||||
|
||||
UninvertingReader(AtomicReader in, Map<String,Type> mapping) {
|
||||
super(in);
|
||||
this.mapping = mapping;
|
||||
ArrayList<FieldInfo> filteredInfos = new ArrayList<>();
|
||||
for (FieldInfo fi : in.getFieldInfos()) {
|
||||
FieldInfo.DocValuesType type = fi.getDocValuesType();
|
||||
if (fi.isIndexed() && !fi.hasDocValues()) {
|
||||
Type t = mapping.get(fi.name);
|
||||
if (t != null) {
|
||||
switch(t) {
|
||||
case INTEGER:
|
||||
case LONG:
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
type = FieldInfo.DocValuesType.NUMERIC;
|
||||
break;
|
||||
case BINARY:
|
||||
type = FieldInfo.DocValuesType.BINARY;
|
||||
break;
|
||||
case SORTED:
|
||||
type = FieldInfo.DocValuesType.SORTED;
|
||||
break;
|
||||
case SORTED_SET:
|
||||
type = FieldInfo.DocValuesType.SORTED_SET;
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
}
|
||||
filteredInfos.add(new FieldInfo(fi.name, fi.isIndexed(), fi.number, fi.hasVectors(), fi.omitsNorms(),
|
||||
fi.hasPayloads(), fi.getIndexOptions(), type, fi.getNormType(), null));
|
||||
}
|
||||
fieldInfos = new FieldInfos(filteredInfos.toArray(new FieldInfo[filteredInfos.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldInfos getFieldInfos() {
|
||||
return fieldInfos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NumericDocValues getNumericDocValues(String field) throws IOException {
|
||||
Type v = mapping.get(field);
|
||||
if (v != null) {
|
||||
switch (mapping.get(field)) {
|
||||
case INTEGER: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_INT_PARSER, true);
|
||||
case FLOAT: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
|
||||
case LONG: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
|
||||
case DOUBLE: return FieldCache.DEFAULT.getNumerics(in, field, FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
|
||||
}
|
||||
}
|
||||
return super.getNumericDocValues(field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BinaryDocValues getBinaryDocValues(String field) throws IOException {
|
||||
if (mapping.get(field) == Type.BINARY) {
|
||||
return FieldCache.DEFAULT.getTerms(in, field, true);
|
||||
} else {
|
||||
return in.getBinaryDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedDocValues getSortedDocValues(String field) throws IOException {
|
||||
if (mapping.get(field) == Type.SORTED) {
|
||||
return FieldCache.DEFAULT.getTermsIndex(in, field);
|
||||
} else {
|
||||
return in.getSortedDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
|
||||
if (mapping.get(field) == Type.SORTED_SET) {
|
||||
return FieldCache.DEFAULT.getDocTermOrds(in, field);
|
||||
} else {
|
||||
return in.getSortedSetDocValues(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getDocsWithField(String field) throws IOException {
|
||||
if (mapping.containsKey(field)) {
|
||||
return FieldCache.DEFAULT.getDocsWithField(in, field);
|
||||
} else {
|
||||
return in.getDocsWithField(field);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCoreCacheKey() {
|
||||
return in.getCoreCacheKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getCombinedCoreAndDeletesKey() {
|
||||
return in.getCombinedCoreAndDeletesKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Uninverting(" + in.toString() + ")";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<html>
|
||||
<body>
|
||||
Support for creating docvalues on-the-fly from the inverted index at runtime.
|
||||
</body>
|
||||
</html>
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -32,8 +32,22 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocTermOrds;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -306,7 +320,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
TestUtil.nextInt(random(), 2, 10));
|
||||
|
||||
|
||||
final FieldCache.Ints docIDToID = FieldCache.DEFAULT.getInts(r, "id", false);
|
||||
final NumericDocValues docIDToID = FieldCache.DEFAULT.getNumerics(r, "id", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
/*
|
||||
for(int docID=0;docID<subR.maxDoc();docID++) {
|
||||
System.out.println(" docID=" + docID + " id=" + docIDToID[docID]);
|
||||
|
@ -362,7 +376,7 @@ public class TestDocTermOrds extends LuceneTestCase {
|
|||
System.out.println("TEST: docID=" + docID + " of " + r.maxDoc() + " (id=" + docIDToID.get(docID) + ")");
|
||||
}
|
||||
iter.setDocument(docID);
|
||||
final int[] answers = idToOrds[docIDToID.get(docID)];
|
||||
final int[] answers = idToOrds[(int) docIDToID.get(docID)];
|
||||
int upto = 0;
|
||||
long ord;
|
||||
while ((ord = iter.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) {
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.search;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/**
|
||||
* Copyright 2004 The Apache Software Foundation
|
||||
|
@ -47,16 +47,13 @@ import org.apache.lucene.index.DocTermOrds;
|
|||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.FieldCache.Doubles;
|
||||
import org.apache.lucene.search.FieldCache.Floats;
|
||||
import org.apache.lucene.search.FieldCache.Ints;
|
||||
import org.apache.lucene.search.FieldCache.Longs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -141,15 +138,17 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
FieldCache cache = FieldCache.DEFAULT;
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
|
||||
cache.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8));
|
||||
cache.getDoubles(reader, "theDouble", false);
|
||||
cache.getFloats(reader, "theDouble", new FieldCache.FloatParser() {
|
||||
cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getNumerics(reader, "theDouble", new FieldCache.Parser() {
|
||||
@Override
|
||||
public TermsEnum termsEnum(Terms terms) throws IOException {
|
||||
return NumericUtils.filterPrefixCodedLongs(terms.iterator(null));
|
||||
}
|
||||
@Override
|
||||
public float parseFloat(BytesRef term) {
|
||||
return NumericUtils.sortableIntToFloat((int) NumericUtils.prefixCodedToLong(term));
|
||||
public long parseValue(BytesRef term) {
|
||||
int val = (int) NumericUtils.prefixCodedToLong(term);
|
||||
if (val<0) val ^= 0x7fffffff;
|
||||
return val;
|
||||
}
|
||||
}, false);
|
||||
assertTrue(bos.toString(IOUtils.UTF_8).indexOf("WARNING") != -1);
|
||||
|
@ -161,32 +160,28 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
|
||||
public void test() throws IOException {
|
||||
FieldCache cache = FieldCache.DEFAULT;
|
||||
FieldCache.Doubles doubles = cache.getDoubles(reader, "theDouble", random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", doubles, cache.getDoubles(reader, "theDouble", random().nextBoolean()));
|
||||
assertSame("Second request with explicit parser return same array", doubles, cache.getDoubles(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
|
||||
NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", doubles, cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, random().nextBoolean()));
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
assertTrue(doubles.get(i) + " does not equal: " + (Double.MAX_VALUE - i), doubles.get(i) == (Double.MAX_VALUE - i));
|
||||
assertEquals(Double.doubleToLongBits(Double.MAX_VALUE - i), doubles.get(i));
|
||||
}
|
||||
|
||||
FieldCache.Longs longs = cache.getLongs(reader, "theLong", random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", longs, cache.getLongs(reader, "theLong", random().nextBoolean()));
|
||||
assertSame("Second request with explicit parser return same array", longs, cache.getLongs(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
|
||||
NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", longs, cache.getNumerics(reader, "theLong", FieldCache.NUMERIC_UTILS_LONG_PARSER, random().nextBoolean()));
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
assertTrue(longs.get(i) + " does not equal: " + (Long.MAX_VALUE - i) + " i=" + i, longs.get(i) == (Long.MAX_VALUE - i));
|
||||
assertEquals(Long.MAX_VALUE - i, longs.get(i));
|
||||
}
|
||||
|
||||
FieldCache.Ints ints = cache.getInts(reader, "theInt", random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", ints, cache.getInts(reader, "theInt", random().nextBoolean()));
|
||||
assertSame("Second request with explicit parser return same array", ints, cache.getInts(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
|
||||
NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", ints, cache.getNumerics(reader, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean()));
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
assertTrue(ints.get(i) + " does not equal: " + (Integer.MAX_VALUE - i), ints.get(i) == (Integer.MAX_VALUE - i));
|
||||
assertEquals(Integer.MAX_VALUE - i, ints.get(i));
|
||||
}
|
||||
|
||||
FieldCache.Floats floats = cache.getFloats(reader, "theFloat", random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", floats, cache.getFloats(reader, "theFloat", random().nextBoolean()));
|
||||
assertSame("Second request with explicit parser return same array", floats, cache.getFloats(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
|
||||
NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean());
|
||||
assertSame("Second request to cache return same array", floats, cache.getNumerics(reader, "theFloat", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, random().nextBoolean()));
|
||||
for (int i = 0; i < NUM_DOCS; i++) {
|
||||
assertTrue(floats.get(i) + " does not equal: " + (Float.MAX_VALUE - i), floats.get(i) == (Float.MAX_VALUE - i));
|
||||
assertEquals(Float.floatToIntBits(Float.MAX_VALUE - i), floats.get(i));
|
||||
}
|
||||
|
||||
Bits docsWithField = cache.getDocsWithField(reader, "theLong");
|
||||
|
@ -335,22 +330,21 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
FieldCache cache = FieldCache.DEFAULT;
|
||||
cache.purgeAllCaches();
|
||||
assertEquals(0, cache.getCacheEntries().length);
|
||||
cache.getDoubles(reader, "theDouble", true);
|
||||
cache.getNumerics(reader, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
|
||||
|
||||
// The double[] takes two slots (one w/ null parser, one
|
||||
// w/ real parser), and docsWithField should also
|
||||
// The double[] takes one slots, and docsWithField should also
|
||||
// have been populated:
|
||||
assertEquals(3, cache.getCacheEntries().length);
|
||||
assertEquals(2, cache.getCacheEntries().length);
|
||||
Bits bits = cache.getDocsWithField(reader, "theDouble");
|
||||
|
||||
// No new entries should appear:
|
||||
assertEquals(3, cache.getCacheEntries().length);
|
||||
assertEquals(2, cache.getCacheEntries().length);
|
||||
assertTrue(bits instanceof Bits.MatchAllBits);
|
||||
|
||||
FieldCache.Ints ints = cache.getInts(reader, "sparse", true);
|
||||
assertEquals(6, cache.getCacheEntries().length);
|
||||
NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
|
||||
assertEquals(4, cache.getCacheEntries().length);
|
||||
Bits docsWithField = cache.getDocsWithField(reader, "sparse");
|
||||
assertEquals(6, cache.getCacheEntries().length);
|
||||
assertEquals(4, cache.getCacheEntries().length);
|
||||
for (int i = 0; i < docsWithField.length(); i++) {
|
||||
if (i%2 == 0) {
|
||||
assertTrue(docsWithField.get(i));
|
||||
|
@ -360,7 +354,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
FieldCache.Ints numInts = cache.getInts(reader, "numInt", random().nextBoolean());
|
||||
NumericDocValues numInts = cache.getNumerics(reader, "numInt", FieldCache.NUMERIC_UTILS_INT_PARSER, random().nextBoolean());
|
||||
docsWithField = cache.getDocsWithField(reader, "numInt");
|
||||
for (int i = 0; i < docsWithField.length(); i++) {
|
||||
if (i%2 == 0) {
|
||||
|
@ -410,7 +404,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
assertEquals(i%2 == 0, docsWithField.get(i));
|
||||
}
|
||||
} else {
|
||||
FieldCache.Ints ints = cache.getInts(reader, "sparse", true);
|
||||
NumericDocValues ints = cache.getNumerics(reader, "sparse", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
|
||||
Bits docsWithField = cache.getDocsWithField(reader, "sparse");
|
||||
for (int i = 0; i < docsWithField.length(); i++) {
|
||||
if (i%2 == 0) {
|
||||
|
@ -459,7 +453,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
|
||||
// Binary type: can be retrieved via getTerms()
|
||||
try {
|
||||
FieldCache.DEFAULT.getInts(ar, "binary", false);
|
||||
FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
fail();
|
||||
} catch (IllegalStateException expected) {}
|
||||
|
||||
|
@ -487,7 +481,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
|
||||
// Sorted type: can be retrieved via getTerms(), getTermsIndex(), getDocTermOrds()
|
||||
try {
|
||||
FieldCache.DEFAULT.getInts(ar, "sorted", false);
|
||||
FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
fail();
|
||||
} catch (IllegalStateException expected) {}
|
||||
|
||||
|
@ -516,7 +510,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
assertTrue(bits.get(0));
|
||||
|
||||
// Numeric type: can be retrieved via getInts() and so on
|
||||
Ints numeric = FieldCache.DEFAULT.getInts(ar, "numeric", false);
|
||||
NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
assertEquals(42, numeric.get(0));
|
||||
|
||||
try {
|
||||
|
@ -545,7 +539,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
// SortedSet type: can be retrieved via getDocTermOrds()
|
||||
if (defaultCodecSupportsSortedSet()) {
|
||||
try {
|
||||
FieldCache.DEFAULT.getInts(ar, "sortedset", false);
|
||||
FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
fail();
|
||||
} catch (IllegalStateException expected) {}
|
||||
|
||||
|
@ -593,17 +587,17 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
cache.purgeAllCaches();
|
||||
assertEquals(0, cache.getCacheEntries().length);
|
||||
|
||||
Ints ints = cache.getInts(ar, "bogusints", true);
|
||||
NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
|
||||
assertEquals(0, ints.get(0));
|
||||
|
||||
Longs longs = cache.getLongs(ar, "boguslongs", true);
|
||||
NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
|
||||
assertEquals(0, longs.get(0));
|
||||
|
||||
Floats floats = cache.getFloats(ar, "bogusfloats", true);
|
||||
assertEquals(0, floats.get(0), 0.0f);
|
||||
NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
|
||||
assertEquals(0, floats.get(0));
|
||||
|
||||
Doubles doubles = cache.getDoubles(ar, "bogusdoubles", true);
|
||||
assertEquals(0, doubles.get(0), 0.0D);
|
||||
NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
|
||||
assertEquals(0, doubles.get(0));
|
||||
|
||||
BytesRef scratch = new BytesRef();
|
||||
BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
|
||||
|
@ -652,17 +646,17 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
cache.purgeAllCaches();
|
||||
assertEquals(0, cache.getCacheEntries().length);
|
||||
|
||||
Ints ints = cache.getInts(ar, "bogusints", true);
|
||||
NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.NUMERIC_UTILS_INT_PARSER, true);
|
||||
assertEquals(0, ints.get(0));
|
||||
|
||||
Longs longs = cache.getLongs(ar, "boguslongs", true);
|
||||
NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.NUMERIC_UTILS_LONG_PARSER, true);
|
||||
assertEquals(0, longs.get(0));
|
||||
|
||||
Floats floats = cache.getFloats(ar, "bogusfloats", true);
|
||||
assertEquals(0, floats.get(0), 0.0f);
|
||||
NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, true);
|
||||
assertEquals(0, floats.get(0));
|
||||
|
||||
Doubles doubles = cache.getDoubles(ar, "bogusdoubles", true);
|
||||
assertEquals(0, doubles.get(0), 0.0D);
|
||||
NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, true);
|
||||
assertEquals(0, doubles.get(0));
|
||||
|
||||
BytesRef scratch = new BytesRef();
|
||||
BinaryDocValues binaries = cache.getTerms(ar, "bogusterms", true);
|
||||
|
@ -724,7 +718,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
}
|
||||
iw.forceMerge(1);
|
||||
final DirectoryReader reader = iw.getReader();
|
||||
final FieldCache.Longs longs = FieldCache.DEFAULT.getLongs(getOnlySegmentReader(reader), "f", false);
|
||||
final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
|
||||
for (int i = 0; i < values.length; ++i) {
|
||||
assertEquals(values[i], longs.get(i));
|
||||
}
|
||||
|
@ -770,7 +764,7 @@ public class TestFieldCache extends LuceneTestCase {
|
|||
}
|
||||
iw.forceMerge(1);
|
||||
final DirectoryReader reader = iw.getReader();
|
||||
final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(getOnlySegmentReader(reader), "f", false);
|
||||
final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlySegmentReader(reader), "f", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
for (int i = 0; i < values.length; ++i) {
|
||||
assertEquals(values[i], ints.get(i));
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestFieldCacheReopen extends LuceneTestCase {
|
||||
|
||||
// TODO: make a version of this that tests the same thing with UninvertingReader.wrap()
|
||||
|
||||
// LUCENE-1579: Ensure that on a reopened reader, that any
|
||||
// shared segments reuse the doc values arrays in
|
||||
// FieldCache
|
||||
public void testFieldCacheReuseAfterReopen() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random())).
|
||||
setMergePolicy(newLogMergePolicy(10))
|
||||
);
|
||||
Document doc = new Document();
|
||||
doc.add(new IntField("number", 17, Field.Store.NO));
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Open reader1
|
||||
DirectoryReader r = DirectoryReader.open(dir);
|
||||
AtomicReader r1 = getOnlySegmentReader(r);
|
||||
final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
assertEquals(17, ints.get(0));
|
||||
|
||||
// Add new segment
|
||||
writer.addDocument(doc);
|
||||
writer.commit();
|
||||
|
||||
// Reopen reader1 --> reader2
|
||||
DirectoryReader r2 = DirectoryReader.openIfChanged(r);
|
||||
assertNotNull(r2);
|
||||
r.close();
|
||||
AtomicReader sub0 = r2.leaves().get(0).reader();
|
||||
final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
r2.close();
|
||||
assertTrue(ints == ints2);
|
||||
|
||||
writer.shutdown();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.util;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/**
|
||||
* Copyright 2009 The Apache Software Foundation
|
||||
|
@ -30,10 +30,10 @@ import org.apache.lucene.index.DirectoryReader;
|
|||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.FieldCacheSanityChecker.Insanity;
|
||||
import org.apache.lucene.util.FieldCacheSanityChecker.InsanityType;
|
||||
import org.apache.lucene.uninverting.FieldCacheSanityChecker.Insanity;
|
||||
import org.apache.lucene.uninverting.FieldCacheSanityChecker.InsanityType;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestFieldCacheSanityChecker extends LuceneTestCase {
|
||||
|
||||
|
@ -94,13 +94,11 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
|
|||
FieldCache cache = FieldCache.DEFAULT;
|
||||
cache.purgeAllCaches();
|
||||
|
||||
cache.getDoubles(readerA, "theDouble", false);
|
||||
cache.getDoubles(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getDoubles(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getDoubles(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getNumerics(readerA, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getNumerics(readerAclone, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
cache.getNumerics(readerB, "theDouble", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false);
|
||||
|
||||
cache.getInts(readerX, "theInt", false);
|
||||
cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
|
||||
// // //
|
||||
|
||||
|
@ -119,7 +117,7 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
|
|||
FieldCache cache = FieldCache.DEFAULT;
|
||||
cache.purgeAllCaches();
|
||||
|
||||
cache.getInts(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
cache.getNumerics(readerX, "theInt", FieldCache.NUMERIC_UTILS_INT_PARSER, false);
|
||||
cache.getTerms(readerX, "theInt", false);
|
||||
|
||||
// // //
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,595 @@
|
|||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.codecs.Codec;
|
||||
import org.apache.lucene.codecs.lucene42.Lucene42DocValuesFormat;
|
||||
import org.apache.lucene.document.BinaryDocValuesField;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.StoredDocument;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestFieldCacheVsDocValues extends LuceneTestCase {
|
||||
|
||||
public void testByteMissingVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestMissingVsFieldCache(Byte.MIN_VALUE, Byte.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
public void testShortMissingVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestMissingVsFieldCache(Short.MIN_VALUE, Short.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
public void testIntMissingVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestMissingVsFieldCache(Integer.MIN_VALUE, Integer.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
public void testLongMissingVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestMissingVsFieldCache(Long.MIN_VALUE, Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSortedFixedLengthVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
int fixedLength = TestUtil.nextInt(random(), 1, 10);
|
||||
doTestSortedVsFieldCache(fixedLength, fixedLength);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSortedVariableLengthVsFieldCache() throws Exception {
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestSortedVsFieldCache(1, 10);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSortedSetFixedLengthVsUninvertedField() throws Exception {
|
||||
assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
int fixedLength = TestUtil.nextInt(random(), 1, 10);
|
||||
doTestSortedSetVsUninvertedField(fixedLength, fixedLength);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSortedSetVariableLengthVsUninvertedField() throws Exception {
|
||||
assumeTrue("Codec does not support SORTED_SET", defaultCodecSupportsSortedSet());
|
||||
int numIterations = atLeast(1);
|
||||
for (int i = 0; i < numIterations; i++) {
|
||||
doTestSortedSetVsUninvertedField(1, 10);
|
||||
}
|
||||
}
|
||||
|
||||
// LUCENE-4853
|
||||
public void testHugeBinaryValues() throws Exception {
|
||||
Analyzer analyzer = new MockAnalyzer(random());
|
||||
// FSDirectory because SimpleText will consume gobbs of
|
||||
// space when storing big binary values:
|
||||
Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
|
||||
boolean doFixed = random().nextBoolean();
|
||||
int numDocs;
|
||||
int fixedLength = 0;
|
||||
if (doFixed) {
|
||||
// Sometimes make all values fixed length since some
|
||||
// codecs have different code paths for this:
|
||||
numDocs = TestUtil.nextInt(random(), 10, 20);
|
||||
fixedLength = TestUtil.nextInt(random(), 65537, 256 * 1024);
|
||||
} else {
|
||||
numDocs = TestUtil.nextInt(random(), 100, 200);
|
||||
}
|
||||
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
|
||||
List<byte[]> docBytes = new ArrayList<>();
|
||||
long totalBytes = 0;
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
// we don't use RandomIndexWriter because it might add
|
||||
// more docvalues than we expect !!!!
|
||||
|
||||
// Must be > 64KB in size to ensure more than 2 pages in
|
||||
// PagedBytes would be needed:
|
||||
int numBytes;
|
||||
if (doFixed) {
|
||||
numBytes = fixedLength;
|
||||
} else if (docID == 0 || random().nextInt(5) == 3) {
|
||||
numBytes = TestUtil.nextInt(random(), 65537, 3 * 1024 * 1024);
|
||||
} else {
|
||||
numBytes = TestUtil.nextInt(random(), 1, 1024 * 1024);
|
||||
}
|
||||
totalBytes += numBytes;
|
||||
if (totalBytes > 5 * 1024*1024) {
|
||||
break;
|
||||
}
|
||||
byte[] bytes = new byte[numBytes];
|
||||
random().nextBytes(bytes);
|
||||
docBytes.add(bytes);
|
||||
Document doc = new Document();
|
||||
BytesRef b = new BytesRef(bytes);
|
||||
b.length = bytes.length;
|
||||
doc.add(new BinaryDocValuesField("field", b));
|
||||
doc.add(new StringField("id", ""+docID, Field.Store.YES));
|
||||
try {
|
||||
w.addDocument(doc);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (iae.getMessage().indexOf("is too large") == -1) {
|
||||
throw iae;
|
||||
} else {
|
||||
// OK: some codecs can't handle binary DV > 32K
|
||||
assertFalse(codecAcceptsHugeBinaryValues("field"));
|
||||
w.rollback();
|
||||
d.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DirectoryReader r;
|
||||
try {
|
||||
r = DirectoryReader.open(w, true);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
if (iae.getMessage().indexOf("is too large") == -1) {
|
||||
throw iae;
|
||||
} else {
|
||||
assertFalse(codecAcceptsHugeBinaryValues("field"));
|
||||
|
||||
// OK: some codecs can't handle binary DV > 32K
|
||||
w.rollback();
|
||||
d.close();
|
||||
return;
|
||||
}
|
||||
}
|
||||
w.shutdown();
|
||||
|
||||
AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
|
||||
|
||||
BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
|
||||
for(int docID=0;docID<docBytes.size();docID++) {
|
||||
StoredDocument doc = ar.document(docID);
|
||||
BytesRef bytes = new BytesRef();
|
||||
s.get(docID, bytes);
|
||||
byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
|
||||
assertEquals(expected.length, bytes.length);
|
||||
assertEquals(new BytesRef(expected), bytes);
|
||||
}
|
||||
|
||||
assertTrue(codecAcceptsHugeBinaryValues("field"));
|
||||
|
||||
ar.close();
|
||||
d.close();
|
||||
}
|
||||
|
||||
// TODO: get this out of here and into the deprecated codecs (4.0, 4.2)
|
||||
public void testHugeBinaryValueLimit() throws Exception {
|
||||
// We only test DVFormats that have a limit
|
||||
assumeFalse("test requires codec with limits on max binary field length", codecAcceptsHugeBinaryValues("field"));
|
||||
Analyzer analyzer = new MockAnalyzer(random());
|
||||
// FSDirectory because SimpleText will consume gobbs of
|
||||
// space when storing big binary values:
|
||||
Directory d = newFSDirectory(createTempDir("hugeBinaryValues"));
|
||||
boolean doFixed = random().nextBoolean();
|
||||
int numDocs;
|
||||
int fixedLength = 0;
|
||||
if (doFixed) {
|
||||
// Sometimes make all values fixed length since some
|
||||
// codecs have different code paths for this:
|
||||
numDocs = TestUtil.nextInt(random(), 10, 20);
|
||||
fixedLength = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
|
||||
} else {
|
||||
numDocs = TestUtil.nextInt(random(), 100, 200);
|
||||
}
|
||||
IndexWriter w = new IndexWriter(d, newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer));
|
||||
List<byte[]> docBytes = new ArrayList<>();
|
||||
long totalBytes = 0;
|
||||
for(int docID=0;docID<numDocs;docID++) {
|
||||
// we don't use RandomIndexWriter because it might add
|
||||
// more docvalues than we expect !!!!
|
||||
|
||||
// Must be > 64KB in size to ensure more than 2 pages in
|
||||
// PagedBytes would be needed:
|
||||
int numBytes;
|
||||
if (doFixed) {
|
||||
numBytes = fixedLength;
|
||||
} else if (docID == 0 || random().nextInt(5) == 3) {
|
||||
numBytes = Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH;
|
||||
} else {
|
||||
numBytes = TestUtil.nextInt(random(), 1, Lucene42DocValuesFormat.MAX_BINARY_FIELD_LENGTH);
|
||||
}
|
||||
totalBytes += numBytes;
|
||||
if (totalBytes > 5 * 1024*1024) {
|
||||
break;
|
||||
}
|
||||
byte[] bytes = new byte[numBytes];
|
||||
random().nextBytes(bytes);
|
||||
docBytes.add(bytes);
|
||||
Document doc = new Document();
|
||||
BytesRef b = new BytesRef(bytes);
|
||||
b.length = bytes.length;
|
||||
doc.add(new BinaryDocValuesField("field", b));
|
||||
doc.add(new StringField("id", ""+docID, Field.Store.YES));
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
DirectoryReader r = DirectoryReader.open(w, true);
|
||||
w.shutdown();
|
||||
|
||||
AtomicReader ar = SlowCompositeReaderWrapper.wrap(r);
|
||||
|
||||
BinaryDocValues s = FieldCache.DEFAULT.getTerms(ar, "field", false);
|
||||
for(int docID=0;docID<docBytes.size();docID++) {
|
||||
StoredDocument doc = ar.document(docID);
|
||||
BytesRef bytes = new BytesRef();
|
||||
s.get(docID, bytes);
|
||||
byte[] expected = docBytes.get(Integer.parseInt(doc.get("id")));
|
||||
assertEquals(expected.length, bytes.length);
|
||||
assertEquals(new BytesRef(expected), bytes);
|
||||
}
|
||||
|
||||
ar.close();
|
||||
d.close();
|
||||
}
|
||||
|
||||
private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
|
||||
Document doc = new Document();
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field indexedField = new StringField("indexed", "", Field.Store.NO);
|
||||
Field dvField = new SortedDocValuesField("dv", new BytesRef());
|
||||
doc.add(idField);
|
||||
doc.add(indexedField);
|
||||
doc.add(dvField);
|
||||
|
||||
// index some docs
|
||||
int numDocs = atLeast(300);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
final int length;
|
||||
if (minLength == maxLength) {
|
||||
length = minLength; // fixed length
|
||||
} else {
|
||||
length = TestUtil.nextInt(random(), minLength, maxLength);
|
||||
}
|
||||
String value = TestUtil.randomSimpleString(random(), length);
|
||||
indexedField.setStringValue(value);
|
||||
dvField.setBytesValue(new BytesRef(value));
|
||||
writer.addDocument(doc);
|
||||
if (random().nextInt(31) == 0) {
|
||||
writer.commit();
|
||||
}
|
||||
}
|
||||
|
||||
// delete some docs
|
||||
int numDeletions = random().nextInt(numDocs/10);
|
||||
for (int i = 0; i < numDeletions; i++) {
|
||||
int id = random().nextInt(numDocs);
|
||||
writer.deleteDocuments(new Term("id", Integer.toString(id)));
|
||||
}
|
||||
writer.shutdown();
|
||||
|
||||
// compare
|
||||
DirectoryReader ir = DirectoryReader.open(dir);
|
||||
for (AtomicReaderContext context : ir.leaves()) {
|
||||
AtomicReader r = context.reader();
|
||||
SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
|
||||
SortedDocValues actual = r.getSortedDocValues("dv");
|
||||
assertEquals(r.maxDoc(), expected, actual);
|
||||
}
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void doTestSortedSetVsUninvertedField(int minLength, int maxLength) throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
|
||||
|
||||
// index some docs
|
||||
int numDocs = atLeast(300);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
Document doc = new Document();
|
||||
Field idField = new StringField("id", Integer.toString(i), Field.Store.NO);
|
||||
doc.add(idField);
|
||||
final int length;
|
||||
if (minLength == maxLength) {
|
||||
length = minLength; // fixed length
|
||||
} else {
|
||||
length = TestUtil.nextInt(random(), minLength, maxLength);
|
||||
}
|
||||
int numValues = random().nextInt(17);
|
||||
// create a random list of strings
|
||||
List<String> values = new ArrayList<>();
|
||||
for (int v = 0; v < numValues; v++) {
|
||||
values.add(TestUtil.randomSimpleString(random(), length));
|
||||
}
|
||||
|
||||
// add in any order to the indexed field
|
||||
ArrayList<String> unordered = new ArrayList<>(values);
|
||||
Collections.shuffle(unordered, random());
|
||||
for (String v : values) {
|
||||
doc.add(newStringField("indexed", v, Field.Store.NO));
|
||||
}
|
||||
|
||||
// add in any order to the dv field
|
||||
ArrayList<String> unordered2 = new ArrayList<>(values);
|
||||
Collections.shuffle(unordered2, random());
|
||||
for (String v : unordered2) {
|
||||
doc.add(new SortedSetDocValuesField("dv", new BytesRef(v)));
|
||||
}
|
||||
|
||||
writer.addDocument(doc);
|
||||
if (random().nextInt(31) == 0) {
|
||||
writer.commit();
|
||||
}
|
||||
}
|
||||
|
||||
// delete some docs
|
||||
int numDeletions = random().nextInt(numDocs/10);
|
||||
for (int i = 0; i < numDeletions; i++) {
|
||||
int id = random().nextInt(numDocs);
|
||||
writer.deleteDocuments(new Term("id", Integer.toString(id)));
|
||||
}
|
||||
|
||||
// compare per-segment
|
||||
DirectoryReader ir = writer.getReader();
|
||||
for (AtomicReaderContext context : ir.leaves()) {
|
||||
AtomicReader r = context.reader();
|
||||
SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(r, "indexed");
|
||||
SortedSetDocValues actual = r.getSortedSetDocValues("dv");
|
||||
assertEquals(r.maxDoc(), expected, actual);
|
||||
}
|
||||
ir.close();
|
||||
|
||||
writer.forceMerge(1);
|
||||
|
||||
// now compare again after the merge
|
||||
ir = writer.getReader();
|
||||
AtomicReader ar = getOnlySegmentReader(ir);
|
||||
SortedSetDocValues expected = FieldCache.DEFAULT.getDocTermOrds(ar, "indexed");
|
||||
SortedSetDocValues actual = ar.getSortedSetDocValues("dv");
|
||||
assertEquals(ir.maxDoc(), expected, actual);
|
||||
ir.close();
|
||||
|
||||
writer.shutdown();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void doTestMissingVsFieldCache(LongProducer longs) throws Exception {
|
||||
assumeTrue("Codec does not support getDocsWithField", defaultCodecSupportsDocsWithField());
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig conf = newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()));
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
Field indexedField = newStringField("indexed", "", Field.Store.NO);
|
||||
Field dvField = new NumericDocValuesField("dv", 0);
|
||||
|
||||
|
||||
// index some docs
|
||||
int numDocs = atLeast(300);
|
||||
// numDocs should be always > 256 so that in case of a codec that optimizes
|
||||
// for numbers of values <= 256, all storage layouts are tested
|
||||
assert numDocs > 256;
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
idField.setStringValue(Integer.toString(i));
|
||||
long value = longs.next();
|
||||
indexedField.setStringValue(Long.toString(value));
|
||||
dvField.setLongValue(value);
|
||||
Document doc = new Document();
|
||||
doc.add(idField);
|
||||
// 1/4 of the time we neglect to add the fields
|
||||
if (random().nextInt(4) > 0) {
|
||||
doc.add(indexedField);
|
||||
doc.add(dvField);
|
||||
}
|
||||
writer.addDocument(doc);
|
||||
if (random().nextInt(31) == 0) {
|
||||
writer.commit();
|
||||
}
|
||||
}
|
||||
|
||||
// delete some docs
|
||||
int numDeletions = random().nextInt(numDocs/10);
|
||||
for (int i = 0; i < numDeletions; i++) {
|
||||
int id = random().nextInt(numDocs);
|
||||
writer.deleteDocuments(new Term("id", Integer.toString(id)));
|
||||
}
|
||||
|
||||
// merge some segments and ensure that at least one of them has more than
|
||||
// 256 values
|
||||
writer.forceMerge(numDocs / 256);
|
||||
|
||||
writer.shutdown();
|
||||
|
||||
// compare
|
||||
DirectoryReader ir = DirectoryReader.open(dir);
|
||||
for (AtomicReaderContext context : ir.leaves()) {
|
||||
AtomicReader r = context.reader();
|
||||
Bits expected = FieldCache.DEFAULT.getDocsWithField(r, "indexed");
|
||||
Bits actual = FieldCache.DEFAULT.getDocsWithField(r, "dv");
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void doTestMissingVsFieldCache(final long minValue, final long maxValue) throws Exception {
|
||||
doTestMissingVsFieldCache(new LongProducer() {
|
||||
@Override
|
||||
long next() {
|
||||
return TestUtil.nextLong(random(), minValue, maxValue);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static abstract class LongProducer {
|
||||
abstract long next();
|
||||
}
|
||||
|
||||
private void assertEquals(Bits expected, Bits actual) throws Exception {
|
||||
assertEquals(expected.length(), actual.length());
|
||||
for (int i = 0; i < expected.length(); i++) {
|
||||
assertEquals(expected.get(i), actual.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
private void assertEquals(int maxDoc, SortedDocValues expected, SortedDocValues actual) throws Exception {
|
||||
assertEquals(maxDoc, DocValues.singleton(expected), DocValues.singleton(actual));
|
||||
}
|
||||
|
||||
private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception {
|
||||
// can be null for the segment if no docs actually had any SortedDocValues
|
||||
// in this case FC.getDocTermsOrds returns EMPTY
|
||||
if (actual == null) {
|
||||
assertEquals(DocValues.EMPTY_SORTED_SET, expected);
|
||||
return;
|
||||
}
|
||||
assertEquals(expected.getValueCount(), actual.getValueCount());
|
||||
// compare ord lists
|
||||
for (int i = 0; i < maxDoc; i++) {
|
||||
expected.setDocument(i);
|
||||
actual.setDocument(i);
|
||||
long expectedOrd;
|
||||
while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) {
|
||||
assertEquals(expectedOrd, actual.nextOrd());
|
||||
}
|
||||
assertEquals(NO_MORE_ORDS, actual.nextOrd());
|
||||
}
|
||||
|
||||
// compare ord dictionary
|
||||
BytesRef expectedBytes = new BytesRef();
|
||||
BytesRef actualBytes = new BytesRef();
|
||||
for (long i = 0; i < expected.getValueCount(); i++) {
|
||||
expected.lookupTerm(expectedBytes);
|
||||
actual.lookupTerm(actualBytes);
|
||||
assertEquals(expectedBytes, actualBytes);
|
||||
}
|
||||
|
||||
// compare termsenum
|
||||
assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum());
|
||||
}
|
||||
|
||||
private void assertEquals(long numOrds, TermsEnum expected, TermsEnum actual) throws Exception {
|
||||
BytesRef ref;
|
||||
|
||||
// sequential next() through all terms
|
||||
while ((ref = expected.next()) != null) {
|
||||
assertEquals(ref, actual.next());
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
assertNull(actual.next());
|
||||
|
||||
// sequential seekExact(ord) through all terms
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
expected.seekExact(i);
|
||||
actual.seekExact(i);
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
|
||||
// sequential seekExact(BytesRef) through all terms
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
expected.seekExact(i);
|
||||
assertTrue(actual.seekExact(expected.term()));
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
|
||||
// sequential seekCeil(BytesRef) through all terms
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
expected.seekExact(i);
|
||||
assertEquals(SeekStatus.FOUND, actual.seekCeil(expected.term()));
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
|
||||
// random seekExact(ord)
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
|
||||
expected.seekExact(randomOrd);
|
||||
actual.seekExact(randomOrd);
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
|
||||
// random seekExact(BytesRef)
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
long randomOrd = TestUtil.nextLong(random(), 0, numOrds - 1);
|
||||
expected.seekExact(randomOrd);
|
||||
actual.seekExact(expected.term());
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
|
||||
// random seekCeil(BytesRef)
|
||||
for (long i = 0; i < numOrds; i++) {
|
||||
BytesRef target = new BytesRef(TestUtil.randomUnicodeString(random()));
|
||||
SeekStatus expectedStatus = expected.seekCeil(target);
|
||||
assertEquals(expectedStatus, actual.seekCeil(target));
|
||||
if (expectedStatus != SeekStatus.END) {
|
||||
assertEquals(expected.ord(), actual.ord());
|
||||
assertEquals(expected.term(), actual.term());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean codecAcceptsHugeBinaryValues(String field) {
|
||||
String name = Codec.getDefault().getName();
|
||||
return !(name.equals("Lucene40") || name.equals("Lucene41") || name.equals("Lucene42") || name.equals("Memory") || name.equals("Direct"));
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package org.apache.lucene.index;
|
||||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
|
@ -30,14 +30,20 @@ import org.apache.lucene.document.BinaryDocValuesField;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocValuesWithThreads extends LuceneTestCase {
|
||||
public class TestFieldCacheWithThreads extends LuceneTestCase {
|
||||
|
||||
public void test() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
@ -62,7 +68,7 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
}
|
||||
|
||||
w.forceMerge(1);
|
||||
final IndexReader r = w.getReader();
|
||||
final IndexReader r = DirectoryReader.open(w, true);
|
||||
w.shutdown();
|
||||
|
||||
assertEquals(1, r.leaves().size());
|
||||
|
@ -78,7 +84,7 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
public void run() {
|
||||
try {
|
||||
//NumericDocValues ndv = ar.getNumericDocValues("number");
|
||||
FieldCache.Longs ndv = FieldCache.DEFAULT.getLongs(ar, "number", false);
|
||||
NumericDocValues ndv = FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false);
|
||||
//BinaryDocValues bdv = ar.getBinaryDocValues("bytes");
|
||||
BinaryDocValues bdv = FieldCache.DEFAULT.getTerms(ar, "bytes", false);
|
||||
SortedDocValues sdv = FieldCache.DEFAULT.getTermsIndex(ar, "sorted");
|
||||
|
@ -90,16 +96,16 @@ public class TestDocValuesWithThreads extends LuceneTestCase {
|
|||
int docID = threadRandom.nextInt(numDocs);
|
||||
switch(threadRandom.nextInt(4)) {
|
||||
case 0:
|
||||
assertEquals((int) numbers.get(docID).longValue(), FieldCache.DEFAULT.getInts(ar, "number", false).get(docID));
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_INT_PARSER, false).get(docID));
|
||||
break;
|
||||
case 1:
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getLongs(ar, "number", false).get(docID));
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_LONG_PARSER, false).get(docID));
|
||||
break;
|
||||
case 2:
|
||||
assertEquals(Float.intBitsToFloat((int) numbers.get(docID).longValue()), FieldCache.DEFAULT.getFloats(ar, "number", false).get(docID), 0.0f);
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_FLOAT_PARSER, false).get(docID));
|
||||
break;
|
||||
case 3:
|
||||
assertEquals(Double.longBitsToDouble(numbers.get(docID).longValue()), FieldCache.DEFAULT.getDoubles(ar, "number", false).get(docID), 0.0);
|
||||
assertEquals(numbers.get(docID).longValue(), FieldCache.DEFAULT.getNumerics(ar, "number", FieldCache.NUMERIC_UTILS_DOUBLE_PARSER, false).get(docID));
|
||||
break;
|
||||
}
|
||||
bdv.get(docID, scratch);
|
|
@ -0,0 +1,156 @@
|
|||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestNumericTerms32 extends LuceneTestCase {
|
||||
// distance of entries
|
||||
private static int distance;
|
||||
// shift the starting of the values to the left, to also have negative values:
|
||||
private static final int startOffset = - 1 << 15;
|
||||
// number of docs to generate for testing
|
||||
private static int noDocs;
|
||||
|
||||
private static Directory directory = null;
|
||||
private static IndexReader reader = null;
|
||||
private static IndexSearcher searcher = null;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
noDocs = atLeast(4096);
|
||||
distance = (1 << 30) / noDocs;
|
||||
directory = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
|
||||
.setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
|
||||
storedInt.setStored(true);
|
||||
storedInt.freeze();
|
||||
|
||||
final FieldType storedInt8 = new FieldType(storedInt);
|
||||
storedInt8.setNumericPrecisionStep(8);
|
||||
|
||||
final FieldType storedInt4 = new FieldType(storedInt);
|
||||
storedInt4.setNumericPrecisionStep(4);
|
||||
|
||||
final FieldType storedInt2 = new FieldType(storedInt);
|
||||
storedInt2.setNumericPrecisionStep(2);
|
||||
|
||||
IntField
|
||||
field8 = new IntField("field8", 0, storedInt8),
|
||||
field4 = new IntField("field4", 0, storedInt4),
|
||||
field2 = new IntField("field2", 0, storedInt2);
|
||||
|
||||
Document doc = new Document();
|
||||
// add fields, that have a distance to test general functionality
|
||||
doc.add(field8); doc.add(field4); doc.add(field2);
|
||||
|
||||
// Add a series of noDocs docs with increasing int values
|
||||
for (int l=0; l<noDocs; l++) {
|
||||
int val=distance*l+startOffset;
|
||||
field8.setIntValue(val);
|
||||
field4.setIntValue(val);
|
||||
field2.setIntValue(val);
|
||||
|
||||
val=l-(noDocs/2);
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
Map<String,Type> map = new HashMap<>();
|
||||
map.put("field2", Type.INTEGER);
|
||||
map.put("field4", Type.INTEGER);
|
||||
map.put("field8", Type.INTEGER);
|
||||
reader = UninvertingReader.wrap(writer.getReader(), map);
|
||||
searcher=newSearcher(reader);
|
||||
writer.shutdown();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
private void testSorting(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = TestUtil.nextInt(random(), 10, 20);
|
||||
for (int i = 0; i < num; i++) {
|
||||
int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
if (lower>upper) {
|
||||
int a=lower; lower=upper; upper=a;
|
||||
}
|
||||
Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
|
||||
TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
|
||||
if (topDocs.totalHits==0) continue;
|
||||
ScoreDoc[] sd = topDocs.scoreDocs;
|
||||
assertNotNull(sd);
|
||||
int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue();
|
||||
for (int j=1; j<sd.length; j++) {
|
||||
int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue();
|
||||
assertTrue("Docs should be sorted backwards", last>act );
|
||||
last=act;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_8bit() throws Exception {
|
||||
testSorting(8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_4bit() throws Exception {
|
||||
testSorting(4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_2bit() throws Exception {
|
||||
testSorting(2);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
package org.apache.lucene.uninverting;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.LongField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.NumericRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.uninverting.UninvertingReader.Type;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestNumericTerms64 extends LuceneTestCase {
|
||||
// distance of entries
|
||||
private static long distance;
|
||||
// shift the starting of the values to the left, to also have negative values:
|
||||
private static final long startOffset = - 1L << 31;
|
||||
// number of docs to generate for testing
|
||||
private static int noDocs;
|
||||
|
||||
private static Directory directory = null;
|
||||
private static IndexReader reader = null;
|
||||
private static IndexSearcher searcher = null;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
noDocs = atLeast(4096);
|
||||
distance = (1L << 60) / noDocs;
|
||||
directory = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
|
||||
newIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random()))
|
||||
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
|
||||
.setMergePolicy(newLogMergePolicy()));
|
||||
|
||||
final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
|
||||
storedLong.setStored(true);
|
||||
storedLong.freeze();
|
||||
|
||||
final FieldType storedLong8 = new FieldType(storedLong);
|
||||
storedLong8.setNumericPrecisionStep(8);
|
||||
|
||||
final FieldType storedLong4 = new FieldType(storedLong);
|
||||
storedLong4.setNumericPrecisionStep(4);
|
||||
|
||||
final FieldType storedLong6 = new FieldType(storedLong);
|
||||
storedLong6.setNumericPrecisionStep(6);
|
||||
|
||||
final FieldType storedLong2 = new FieldType(storedLong);
|
||||
storedLong2.setNumericPrecisionStep(2);
|
||||
|
||||
LongField
|
||||
field8 = new LongField("field8", 0L, storedLong8),
|
||||
field6 = new LongField("field6", 0L, storedLong6),
|
||||
field4 = new LongField("field4", 0L, storedLong4),
|
||||
field2 = new LongField("field2", 0L, storedLong2);
|
||||
|
||||
Document doc = new Document();
|
||||
// add fields, that have a distance to test general functionality
|
||||
doc.add(field8); doc.add(field6); doc.add(field4); doc.add(field2);
|
||||
|
||||
// Add a series of noDocs docs with increasing long values, by updating the fields
|
||||
for (int l=0; l<noDocs; l++) {
|
||||
long val=distance*l+startOffset;
|
||||
field8.setLongValue(val);
|
||||
field6.setLongValue(val);
|
||||
field4.setLongValue(val);
|
||||
field2.setLongValue(val);
|
||||
|
||||
val=l-(noDocs/2);
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
Map<String,Type> map = new HashMap<>();
|
||||
map.put("field2", Type.LONG);
|
||||
map.put("field4", Type.LONG);
|
||||
map.put("field6", Type.LONG);
|
||||
map.put("field8", Type.LONG);
|
||||
reader = UninvertingReader.wrap(writer.getReader(), map);
|
||||
searcher=newSearcher(reader);
|
||||
writer.shutdown();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
searcher = null;
|
||||
reader.close();
|
||||
reader = null;
|
||||
directory.close();
|
||||
directory = null;
|
||||
}
|
||||
|
||||
private void testSorting(int precisionStep) throws Exception {
|
||||
String field="field"+precisionStep;
|
||||
// 10 random tests, the index order is ascending,
|
||||
// so using a reverse sort field should retun descending documents
|
||||
int num = TestUtil.nextInt(random(), 10, 20);
|
||||
for (int i = 0; i < num; i++) {
|
||||
long lower=(long)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
long upper=(long)(random().nextDouble()*noDocs*distance)+startOffset;
|
||||
if (lower>upper) {
|
||||
long a=lower; lower=upper; upper=a;
|
||||
}
|
||||
Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
|
||||
TopDocs topDocs = searcher.search(tq, null, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
|
||||
if (topDocs.totalHits==0) continue;
|
||||
ScoreDoc[] sd = topDocs.scoreDocs;
|
||||
assertNotNull(sd);
|
||||
long last=searcher.doc(sd[0].doc).getField(field).numericValue().longValue();
|
||||
for (int j=1; j<sd.length; j++) {
|
||||
long act=searcher.doc(sd[j].doc).getField(field).numericValue().longValue();
|
||||
assertTrue("Docs should be sorted backwards", last>act );
|
||||
last=act;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_8bit() throws Exception {
|
||||
testSorting(8);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_6bit() throws Exception {
|
||||
testSorting(6);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_4bit() throws Exception {
|
||||
testSorting(4);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSorting_2bit() throws Exception {
|
||||
testSorting(2);
|
||||
}
|
||||
}
|
|
@ -23,7 +23,6 @@ import org.apache.lucene.index.AtomicReaderContext;
|
|||
import org.apache.lucene.index.IndexReader; // for javadocs
|
||||
import org.apache.lucene.queries.function.FunctionQuery;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FieldCache; // for javadocs
|
||||
|
||||
/**
|
||||
* An instance of this subclass should be returned by
|
||||
|
@ -32,7 +31,7 @@ import org.apache.lucene.search.FieldCache; // for javadocs
|
|||
* <p>Since Lucene 2.9, queries operate on each segment of an index separately,
|
||||
* so the protected {@link #context} field can be used to resolve doc IDs,
|
||||
* as the supplied <code>doc</code> ID is per-segment and without knowledge
|
||||
* of the IndexReader you cannot access the document or {@link FieldCache}.
|
||||
* of the IndexReader you cannot access the document or DocValues.
|
||||
*
|
||||
* @lucene.experimental
|
||||
* @since 2.9.2
|
||||
|
|
|
@ -20,12 +20,12 @@ package org.apache.lucene.queries.function.docvalues;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util.UnicodeUtil;
|
||||
|
@ -45,7 +45,7 @@ public abstract class DocTermsIndexDocValues extends FunctionValues {
|
|||
|
||||
public DocTermsIndexDocValues(ValueSource vs, AtomicReaderContext context, String field) throws IOException {
|
||||
try {
|
||||
termsIndex = FieldCache.DEFAULT.getTermsIndex(context.reader(), field);
|
||||
termsIndex = DocValues.getSorted(context.reader(), field);
|
||||
} catch (RuntimeException e) {
|
||||
throw new DocTermsIndexException(field, e);
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfo.DocValuesType;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.docvalues.DocTermsIndexDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
|
@ -45,8 +45,8 @@ public class BytesRefFieldSource extends FieldCacheSource {
|
|||
// To be sorted or not to be sorted, that is the question
|
||||
// TODO: do it cleaner?
|
||||
if (fieldInfo != null && fieldInfo.getDocValuesType() == DocValuesType.BINARY) {
|
||||
final BinaryDocValues binaryValues = FieldCache.DEFAULT.getTerms(readerContext.reader(), field, true);
|
||||
final Bits docsWithField = FieldCache.DEFAULT.getDocsWithField(readerContext.reader(), field);
|
||||
final BinaryDocValues binaryValues = DocValues.getBinary(readerContext.reader(), field);
|
||||
final Bits docsWithField = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
return new FunctionValues() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,31 +20,24 @@ package org.apache.lucene.queries.function.valuesource;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueDouble;
|
||||
|
||||
/**
|
||||
* Obtains double field values from {@link FieldCache#getDoubles} and makes
|
||||
* Obtains double field values from {@link AtomicReader#getNumericDocValues} and makes
|
||||
* those values available as other numeric types, casting as needed.
|
||||
*/
|
||||
public class DoubleFieldSource extends FieldCacheSource {
|
||||
|
||||
protected final FieldCache.DoubleParser parser;
|
||||
|
||||
public DoubleFieldSource(String field) {
|
||||
this(field, null);
|
||||
}
|
||||
|
||||
public DoubleFieldSource(String field, FieldCache.DoubleParser parser) {
|
||||
super(field);
|
||||
this.parser = parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,12 +47,12 @@ public class DoubleFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.Doubles arr = cache.getDoubles(readerContext.reader(), field, parser, true);
|
||||
final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
|
||||
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
|
||||
final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
return new DoubleDocValues(this) {
|
||||
@Override
|
||||
public double doubleVal(int doc) {
|
||||
return arr.get(doc);
|
||||
return Double.longBitsToDouble(arr.get(doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -79,29 +72,24 @@ public class DoubleFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public void fillValue(int doc) {
|
||||
mval.value = arr.get(doc);
|
||||
mval.value = doubleVal(doc);
|
||||
mval.exists = mval.value != 0 || valid.get(doc);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (o.getClass() != DoubleFieldSource.class) return false;
|
||||
DoubleFieldSource other = (DoubleFieldSource) o;
|
||||
return super.equals(other)
|
||||
&& (this.parser == null ? other.parser == null :
|
||||
this.parser.getClass() == other.parser.getClass());
|
||||
return super.equals(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = parser == null ? Double.class.hashCode() : parser.getClass().hashCode();
|
||||
int h = Double.class.hashCode();
|
||||
h += super.hashCode();
|
||||
return h;
|
||||
}
|
||||
|
|
|
@ -20,31 +20,31 @@ package org.apache.lucene.queries.function.valuesource;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
|
||||
/**
|
||||
* Obtains int field values from {@link FieldCache#getInts} and makes
|
||||
* Obtains int field values from {@link AtomicReader#getNumericDocValues} and makes
|
||||
* those values available as other numeric types, casting as needed.
|
||||
* strVal of the value is not the int value, but its string (displayed) value
|
||||
*/
|
||||
public class EnumFieldSource extends FieldCacheSource {
|
||||
static final Integer DEFAULT_VALUE = -1;
|
||||
|
||||
final FieldCache.IntParser parser;
|
||||
final Map<Integer, String> enumIntToStringMap;
|
||||
final Map<String, Integer> enumStringToIntMap;
|
||||
|
||||
public EnumFieldSource(String field, FieldCache.IntParser parser, Map<Integer, String> enumIntToStringMap, Map<String, Integer> enumStringToIntMap) {
|
||||
public EnumFieldSource(String field, Map<Integer, String> enumIntToStringMap, Map<String, Integer> enumStringToIntMap) {
|
||||
super(field);
|
||||
this.parser = parser;
|
||||
this.enumIntToStringMap = enumIntToStringMap;
|
||||
this.enumStringToIntMap = enumStringToIntMap;
|
||||
}
|
||||
|
@ -98,54 +98,28 @@ public class EnumFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.Ints arr = cache.getInts(readerContext.reader(), field, parser, true);
|
||||
final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
|
||||
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
|
||||
final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
|
||||
return new IntDocValues(this) {
|
||||
final MutableValueInt val = new MutableValueInt();
|
||||
|
||||
@Override
|
||||
public float floatVal(int doc) {
|
||||
return (float) arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intVal(int doc) {
|
||||
return arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long longVal(int doc) {
|
||||
return (long) arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double doubleVal(int doc) {
|
||||
return (double) arr.get(doc);
|
||||
return (int) arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String strVal(int doc) {
|
||||
Integer intValue = arr.get(doc);
|
||||
Integer intValue = intVal(doc);
|
||||
return intValueToStringValue(intValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object objectVal(int doc) {
|
||||
return valid.get(doc) ? arr.get(doc) : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(int doc) {
|
||||
return valid.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(int doc) {
|
||||
return description() + '=' + strVal(doc);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public ValueSourceScorer getRangeScorer(IndexReader reader, String lowerVal, String upperVal, boolean includeLower, boolean includeUpper) {
|
||||
Integer lower = stringValueToIntValue(lowerVal);
|
||||
|
@ -171,7 +145,7 @@ public class EnumFieldSource extends FieldCacheSource {
|
|||
return new ValueSourceScorer(reader, this) {
|
||||
@Override
|
||||
public boolean matchesValue(int doc) {
|
||||
int val = arr.get(doc);
|
||||
int val = intVal(doc);
|
||||
// only check for deleted if it's the default value
|
||||
// if (val==0 && reader.isDeleted(doc)) return false;
|
||||
return val >= ll && val <= uu;
|
||||
|
@ -191,13 +165,11 @@ public class EnumFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public void fillValue(int doc) {
|
||||
mval.value = arr.get(doc);
|
||||
mval.value = intVal(doc);
|
||||
mval.exists = valid.get(doc);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -211,7 +183,6 @@ public class EnumFieldSource extends FieldCacheSource {
|
|||
|
||||
if (!enumIntToStringMap.equals(that.enumIntToStringMap)) return false;
|
||||
if (!enumStringToIntMap.equals(that.enumStringToIntMap)) return false;
|
||||
if (!parser.equals(that.parser)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -219,7 +190,6 @@ public class EnumFieldSource extends FieldCacheSource {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + parser.hashCode();
|
||||
result = 31 * result + enumIntToStringMap.hashCode();
|
||||
result = 31 * result + enumStringToIntMap.hashCode();
|
||||
return result;
|
||||
|
|
|
@ -18,26 +18,20 @@
|
|||
package org.apache.lucene.queries.function.valuesource;
|
||||
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
|
||||
/**
|
||||
* A base class for ValueSource implementations that retrieve values for
|
||||
* a single field from the {@link org.apache.lucene.search.FieldCache}.
|
||||
* a single field from DocValues.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public abstract class FieldCacheSource extends ValueSource {
|
||||
protected final String field;
|
||||
protected final FieldCache cache = FieldCache.DEFAULT;
|
||||
|
||||
public FieldCacheSource(String field) {
|
||||
this.field=field;
|
||||
}
|
||||
|
||||
public FieldCache getFieldCache() {
|
||||
return cache;
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
return field;
|
||||
}
|
||||
|
@ -51,13 +45,12 @@ public abstract class FieldCacheSource extends ValueSource {
|
|||
public boolean equals(Object o) {
|
||||
if (!(o instanceof FieldCacheSource)) return false;
|
||||
FieldCacheSource other = (FieldCacheSource)o;
|
||||
return this.field.equals(other.field)
|
||||
&& this.cache == other.cache;
|
||||
return this.field.equals(other.field);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return cache.hashCode() + field.hashCode();
|
||||
return field.hashCode();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,29 +20,24 @@ package org.apache.lucene.queries.function.valuesource;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.docvalues.FloatDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueFloat;
|
||||
|
||||
/**
|
||||
* Obtains float field values from {@link FieldCache#getFloats} and makes those
|
||||
* Obtains float field values from {@link AtomicReader#getNumericDocValues} and makes those
|
||||
* values available as other numeric types, casting as needed.
|
||||
*/
|
||||
public class FloatFieldSource extends FieldCacheSource {
|
||||
|
||||
protected final FieldCache.FloatParser parser;
|
||||
|
||||
public FloatFieldSource(String field) {
|
||||
this(field, null);
|
||||
}
|
||||
|
||||
public FloatFieldSource(String field, FieldCache.FloatParser parser) {
|
||||
super(field);
|
||||
this.parser = parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -52,18 +47,13 @@ public class FloatFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.Floats arr = cache.getFloats(readerContext.reader(), field, parser, true);
|
||||
final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
|
||||
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
|
||||
final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
|
||||
return new FloatDocValues(this) {
|
||||
@Override
|
||||
public float floatVal(int doc) {
|
||||
return arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object objectVal(int doc) {
|
||||
return valid.get(doc) ? arr.get(doc) : null;
|
||||
return Float.intBitsToFloat((int)arr.get(doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +73,7 @@ public class FloatFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public void fillValue(int doc) {
|
||||
mval.value = arr.get(doc);
|
||||
mval.value = floatVal(doc);
|
||||
mval.exists = mval.value != 0 || valid.get(doc);
|
||||
}
|
||||
};
|
||||
|
@ -96,14 +86,12 @@ public class FloatFieldSource extends FieldCacheSource {
|
|||
public boolean equals(Object o) {
|
||||
if (o.getClass() != FloatFieldSource.class) return false;
|
||||
FloatFieldSource other = (FloatFieldSource)o;
|
||||
return super.equals(other)
|
||||
&& (this.parser==null ? other.parser==null :
|
||||
this.parser.getClass() == other.parser.getClass());
|
||||
return super.equals(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = parser==null ? Float.class.hashCode() : parser.getClass().hashCode();
|
||||
int h = Float.class.hashCode();
|
||||
h += super.hashCode();
|
||||
return h;
|
||||
}
|
||||
|
|
|
@ -20,30 +20,26 @@ package org.apache.lucene.queries.function.valuesource;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
|
||||
/**
|
||||
* Obtains int field values from {@link FieldCache#getInts} and makes those
|
||||
* Obtains int field values from {@link AtomicReader#getNumericDocValues} and makes those
|
||||
* values available as other numeric types, casting as needed.
|
||||
*/
|
||||
public class IntFieldSource extends FieldCacheSource {
|
||||
final FieldCache.IntParser parser;
|
||||
|
||||
public IntFieldSource(String field) {
|
||||
this(field, null);
|
||||
}
|
||||
|
||||
public IntFieldSource(String field, FieldCache.IntParser parser) {
|
||||
super(field);
|
||||
this.parser = parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,40 +50,20 @@ public class IntFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.Ints arr = cache.getInts(readerContext.reader(), field, parser, true);
|
||||
final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
|
||||
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
|
||||
final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
|
||||
return new IntDocValues(this) {
|
||||
final MutableValueInt val = new MutableValueInt();
|
||||
|
||||
@Override
|
||||
public float floatVal(int doc) {
|
||||
return (float)arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int intVal(int doc) {
|
||||
return arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long longVal(int doc) {
|
||||
return (long)arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double doubleVal(int doc) {
|
||||
return (double)arr.get(doc);
|
||||
return (int) arr.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String strVal(int doc) {
|
||||
return Integer.toString(arr.get(doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object objectVal(int doc) {
|
||||
return valid.get(doc) ? arr.get(doc) : null;
|
||||
return Integer.toString(intVal(doc));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -95,11 +71,6 @@ public class IntFieldSource extends FieldCacheSource {
|
|||
return arr.get(doc) != 0 || valid.get(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(int doc) {
|
||||
return description() + '=' + intVal(doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueFiller getValueFiller() {
|
||||
return new ValueFiller() {
|
||||
|
@ -112,13 +83,11 @@ public class IntFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public void fillValue(int doc) {
|
||||
mval.value = arr.get(doc);
|
||||
mval.value = intVal(doc);
|
||||
mval.exists = mval.value != 0 || valid.get(doc);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -126,14 +95,12 @@ public class IntFieldSource extends FieldCacheSource {
|
|||
public boolean equals(Object o) {
|
||||
if (o.getClass() != IntFieldSource.class) return false;
|
||||
IntFieldSource other = (IntFieldSource)o;
|
||||
return super.equals(other)
|
||||
&& (this.parser==null ? other.parser==null :
|
||||
this.parser.getClass() == other.parser.getClass());
|
||||
return super.equals(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = parser==null ? Integer.class.hashCode() : parser.getClass().hashCode();
|
||||
int h = Integer.class.hashCode();
|
||||
h += super.hashCode();
|
||||
return h;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
|
@ -30,7 +31,6 @@ import org.apache.lucene.index.TermsEnum;
|
|||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
|
||||
/**
|
||||
* Use a field value and find the Document Frequency within another field.
|
||||
|
@ -56,7 +56,7 @@ public class JoinDocFreqValueSource extends FieldCacheSource {
|
|||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException
|
||||
{
|
||||
final BinaryDocValues terms = cache.getTerms(readerContext.reader(), field, false, PackedInts.FAST);
|
||||
final BinaryDocValues terms = DocValues.getBinary(readerContext.reader(), field);
|
||||
final IndexReader top = ReaderUtil.getTopLevelContext(readerContext).reader();
|
||||
Terms t = MultiFields.getTerms(top, qfield);
|
||||
final TermsEnum termsEnum = t == null ? TermsEnum.EMPTY : t.iterator(null);
|
||||
|
|
|
@ -20,31 +20,24 @@ package org.apache.lucene.queries.function.valuesource;
|
|||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSourceScorer;
|
||||
import org.apache.lucene.queries.function.docvalues.LongDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueLong;
|
||||
|
||||
/**
|
||||
* Obtains long field values from {@link FieldCache#getLongs} and makes those
|
||||
* Obtains long field values from {@link AtomicReader#getNumericDocValues} and makes those
|
||||
* values available as other numeric types, casting as needed.
|
||||
*/
|
||||
public class LongFieldSource extends FieldCacheSource {
|
||||
|
||||
protected final FieldCache.LongParser parser;
|
||||
|
||||
public LongFieldSource(String field) {
|
||||
this(field, null);
|
||||
}
|
||||
|
||||
public LongFieldSource(String field, FieldCache.LongParser parser) {
|
||||
super(field);
|
||||
this.parser = parser;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,8 +59,8 @@ public class LongFieldSource extends FieldCacheSource {
|
|||
|
||||
@Override
|
||||
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
|
||||
final FieldCache.Longs arr = cache.getLongs(readerContext.reader(), field, parser, true);
|
||||
final Bits valid = cache.getDocsWithField(readerContext.reader(), field);
|
||||
final NumericDocValues arr = DocValues.getNumeric(readerContext.reader(), field);
|
||||
final Bits valid = DocValues.getDocsWithField(readerContext.reader(), field);
|
||||
|
||||
return new LongDocValues(this) {
|
||||
@Override
|
||||
|
@ -124,14 +117,12 @@ public class LongFieldSource extends FieldCacheSource {
|
|||
public boolean equals(Object o) {
|
||||
if (o.getClass() != this.getClass()) return false;
|
||||
LongFieldSource other = (LongFieldSource) o;
|
||||
return super.equals(other)
|
||||
&& (this.parser == null ? other.parser == null :
|
||||
this.parser.getClass() == other.parser.getClass());
|
||||
return super.equals(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = parser == null ? this.getClass().hashCode() : parser.getClass().hashCode();
|
||||
int h = getClass().hashCode();
|
||||
h += super.hashCode();
|
||||
return h;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.Map;
|
|||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.CompositeReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
|
@ -30,12 +31,11 @@ import org.apache.lucene.index.SortedDocValues;
|
|||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.util.mutable.MutableValue;
|
||||
import org.apache.lucene.util.mutable.MutableValueInt;
|
||||
|
||||
/**
|
||||
* Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getStringIndex().
|
||||
* Obtains the ordinal of the field value from {@link AtomicReader#getSortedDocValues}.
|
||||
* <br>
|
||||
* The native lucene index order is used to assign an ordinal value for each field value.
|
||||
* <br>Field values (terms) are lexicographically ordered by unicode value, and numbered starting at 1.
|
||||
|
@ -71,7 +71,7 @@ public class OrdFieldSource extends ValueSource {
|
|||
final int off = readerContext.docBase;
|
||||
final IndexReader topReader = ReaderUtil.getTopLevelContext(readerContext).reader();
|
||||
final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
|
||||
final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
|
||||
final SortedDocValues sindex = DocValues.getSorted(r, field);
|
||||
return new IntDocValues(this) {
|
||||
protected String toTerm(String readableValue) {
|
||||
return readableValue;
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.Map;
|
|||
import org.apache.lucene.index.AtomicReader;
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.CompositeReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.index.SlowCompositeReaderWrapper;
|
||||
|
@ -30,10 +31,9 @@ import org.apache.lucene.index.SortedDocValues;
|
|||
import org.apache.lucene.queries.function.FunctionValues;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.docvalues.IntDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
|
||||
/**
|
||||
* Obtains the ordinal of the field value from the default Lucene {@link org.apache.lucene.search.FieldCache} using getTermsIndex()
|
||||
* Obtains the ordinal of the field value from {@link AtomicReader#getSortedDocValues}
|
||||
* and reverses the order.
|
||||
* <br>
|
||||
* The native lucene index order is used to assign an ordinal value for each field value.
|
||||
|
@ -72,7 +72,7 @@ public class ReverseOrdFieldSource extends ValueSource {
|
|||
final AtomicReader r = SlowCompositeReaderWrapper.wrap(topReader);
|
||||
final int off = readerContext.docBase;
|
||||
|
||||
final SortedDocValues sindex = FieldCache.DEFAULT.getTermsIndex(r, field);
|
||||
final SortedDocValues sindex = DocValues.getSorted(r, field);
|
||||
final int end = sindex.getValueCount();
|
||||
|
||||
return new IntDocValues(this) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.CheckHits;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryUtils;
|
||||
|
@ -39,7 +38,9 @@ import java.util.Map;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
||||
/**
|
||||
|
@ -66,11 +67,6 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
|
|||
*/
|
||||
@Test
|
||||
public void testCustomScoreFloat() throws Exception {
|
||||
// INT field can be parsed as float
|
||||
doTestCustomScore(INT_AS_FLOAT_VALUESOURCE, 1.0);
|
||||
doTestCustomScore(INT_AS_FLOAT_VALUESOURCE, 5.0);
|
||||
|
||||
// same values, but in float format
|
||||
doTestCustomScore(FLOAT_VALUESOURCE, 1.0);
|
||||
doTestCustomScore(FLOAT_VALUESOURCE, 6.0);
|
||||
}
|
||||
|
@ -164,7 +160,7 @@ public class TestCustomScoreQuery extends FunctionTestSetup {
|
|||
|
||||
@Override
|
||||
protected CustomScoreProvider getCustomScoreProvider(AtomicReaderContext context) throws IOException {
|
||||
final FieldCache.Ints values = FieldCache.DEFAULT.getInts(context.reader(), INT_FIELD, false);
|
||||
final NumericDocValues values = DocValues.getNumeric(context.reader(), INT_FIELD);
|
||||
return new CustomScoreProvider(context) {
|
||||
@Override
|
||||
public float customScore(int doc, float subScore, float valSrcScore) {
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package org.apache.lucene.queries.function;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
|
@ -9,15 +7,14 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.FloatField;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
|
||||
import org.apache.lucene.queries.function.valuesource.IntFieldSource;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -60,21 +57,7 @@ public abstract class FunctionTestSetup extends LuceneTestCase {
|
|||
protected static final String INT_FIELD = "iii";
|
||||
protected static final String FLOAT_FIELD = "fff";
|
||||
|
||||
private static final FieldCache.FloatParser CUSTOM_FLOAT_PARSER = new FieldCache.FloatParser() {
|
||||
|
||||
@Override
|
||||
public TermsEnum termsEnum(Terms terms) throws IOException {
|
||||
return FieldCache.NUMERIC_UTILS_INT_PARSER.termsEnum(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float parseFloat(BytesRef term) {
|
||||
return (float) FieldCache.NUMERIC_UTILS_INT_PARSER.parseInt(term);
|
||||
}
|
||||
};
|
||||
|
||||
protected ValueSource INT_VALUESOURCE = new IntFieldSource(INT_FIELD);
|
||||
protected ValueSource INT_AS_FLOAT_VALUESOURCE = new FloatFieldSource(INT_FIELD, CUSTOM_FLOAT_PARSER);
|
||||
protected ValueSource FLOAT_VALUESOURCE = new FloatFieldSource(FLOAT_FIELD);
|
||||
|
||||
private static final String DOC_TEXT_LINES[] = {
|
||||
|
@ -152,6 +135,7 @@ public abstract class FunctionTestSetup extends LuceneTestCase {
|
|||
|
||||
f = newField(ID_FIELD, id2String(scoreAndID), customType); // for debug purposes
|
||||
d.add(f);
|
||||
d.add(new SortedDocValuesField(ID_FIELD, new BytesRef(id2String(scoreAndID))));
|
||||
|
||||
FieldType customType2 = new FieldType(TextField.TYPE_NOT_STORED);
|
||||
customType2.setOmitNorms(true);
|
||||
|
@ -160,9 +144,11 @@ public abstract class FunctionTestSetup extends LuceneTestCase {
|
|||
|
||||
f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
|
||||
d.add(f);
|
||||
d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
|
||||
|
||||
f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
|
||||
d.add(f);
|
||||
d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));
|
||||
|
||||
iw.addDocument(d);
|
||||
log("added: " + d);
|
||||
|
|
|
@ -53,8 +53,6 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
|
|||
/** Test that FieldScoreQuery of Type.FLOAT returns docs in expected order. */
|
||||
@Test
|
||||
public void testRankFloat () throws Exception {
|
||||
// INT field can be parsed as float
|
||||
doTestRank(INT_AS_FLOAT_VALUESOURCE);
|
||||
// same values, but in flot format
|
||||
doTestRank(FLOAT_VALUESOURCE);
|
||||
}
|
||||
|
@ -88,8 +86,6 @@ public class TestFieldScoreQuery extends FunctionTestSetup {
|
|||
/** Test that FieldScoreQuery of Type.FLOAT returns the expected scores. */
|
||||
@Test
|
||||
public void testExactScoreFloat () throws Exception {
|
||||
// INT field can be parsed as float
|
||||
doTestExactScore(INT_AS_FLOAT_VALUESOURCE);
|
||||
// same values, but in flot format
|
||||
doTestExactScore(FLOAT_VALUESOURCE);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
|
@ -48,12 +49,15 @@ public class TestFunctionQuerySort extends LuceneTestCase {
|
|||
|
||||
Document doc = new Document();
|
||||
Field field = new IntField("value", 0, Field.Store.YES);
|
||||
Field dvField = new NumericDocValuesField("value", 0);
|
||||
doc.add(field);
|
||||
doc.add(dvField);
|
||||
|
||||
// Save docs unsorted (decreasing value n, n-1, ...)
|
||||
final int NUM_VALS = 5;
|
||||
for (int val = NUM_VALS; val > 0; val--) {
|
||||
field.setIntValue(val);
|
||||
dvField.setLongValue(val);
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.FloatField;
|
||||
import org.apache.lucene.document.IntField;
|
||||
import org.apache.lucene.document.LongField;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -101,26 +103,44 @@ public class TestValueSources extends LuceneTestCase {
|
|||
Document document = new Document();
|
||||
Field idField = new StringField("id", "", Field.Store.NO);
|
||||
document.add(idField);
|
||||
Field idDVField = new SortedDocValuesField("id", new BytesRef());
|
||||
document.add(idDVField);
|
||||
Field doubleField = new DoubleField("double", 0d, Field.Store.NO);
|
||||
document.add(doubleField);
|
||||
Field doubleDVField = new NumericDocValuesField("double", 0);
|
||||
document.add(doubleDVField);
|
||||
Field floatField = new FloatField("float", 0f, Field.Store.NO);
|
||||
document.add(floatField);
|
||||
Field floatDVField = new NumericDocValuesField("float", 0);
|
||||
document.add(floatDVField);
|
||||
Field intField = new IntField("int", 0, Field.Store.NO);
|
||||
document.add(intField);
|
||||
Field intDVField = new NumericDocValuesField("int", 0);
|
||||
document.add(intDVField);
|
||||
Field longField = new LongField("long", 0L, Field.Store.NO);
|
||||
document.add(longField);
|
||||
Field longDVField = new NumericDocValuesField("long", 0);
|
||||
document.add(longDVField);
|
||||
Field stringField = new StringField("string", "", Field.Store.NO);
|
||||
document.add(stringField);
|
||||
Field stringDVField = new SortedDocValuesField("string", new BytesRef());
|
||||
document.add(stringDVField);
|
||||
Field textField = new TextField("text", "", Field.Store.NO);
|
||||
document.add(textField);
|
||||
|
||||
for (String [] doc : documents) {
|
||||
idField.setStringValue(doc[0]);
|
||||
idDVField.setBytesValue(new BytesRef(doc[0]));
|
||||
doubleField.setDoubleValue(Double.valueOf(doc[1]));
|
||||
doubleDVField.setLongValue(Double.doubleToRawLongBits(Double.valueOf(doc[1])));
|
||||
floatField.setFloatValue(Float.valueOf(doc[2]));
|
||||
floatDVField.setLongValue(Float.floatToRawIntBits(Float.valueOf(doc[2])));
|
||||
intField.setIntValue(Integer.valueOf(doc[3]));
|
||||
intDVField.setLongValue(Integer.valueOf(doc[3]));
|
||||
longField.setLongValue(Long.valueOf(doc[4]));
|
||||
longDVField.setLongValue(Long.valueOf(doc[4]));
|
||||
stringField.setStringValue(doc[5]);
|
||||
stringDVField.setBytesValue(new BytesRef(doc[5]));
|
||||
textField.setStringValue(doc[6]);
|
||||
iw.addDocument(document);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.text.Collator;
|
|||
|
||||
import org.apache.lucene.index.AtomicReaderContext;
|
||||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -95,8 +95,8 @@ public final class SlowCollatedStringComparator extends FieldComparator<String>
|
|||
|
||||
@Override
|
||||
public FieldComparator<String> setNextReader(AtomicReaderContext context) throws IOException {
|
||||
currentDocTerms = FieldCache.DEFAULT.getTerms(context.reader(), field, true);
|
||||
docsWithField = FieldCache.DEFAULT.getDocsWithField(context.reader(), field);
|
||||
currentDocTerms = DocValues.getBinary(context.reader(), field);
|
||||
docsWithField = DocValues.getDocsWithField(context.reader(), field);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.index.DocValues;
|
|||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -159,7 +158,7 @@ public class SortedSetSortField extends SortField {
|
|||
return new FieldComparator.TermOrdValComparator(numHits, getField(), missingValue == STRING_LAST) {
|
||||
@Override
|
||||
protected SortedDocValues getSortedDocValues(AtomicReaderContext context, String field) throws IOException {
|
||||
SortedSetDocValues sortedSet = FieldCache.DEFAULT.getDocTermOrds(context.reader(), field);
|
||||
SortedSetDocValues sortedSet = DocValues.getSortedSet(context.reader(), field);
|
||||
|
||||
if (sortedSet.getValueCount() >= Integer.MAX_VALUE) {
|
||||
throw new UnsupportedOperationException("fields containing more than " + (Integer.MAX_VALUE-1) + " unique terms are unsupported");
|
||||
|
|
|
@ -5,11 +5,13 @@ import java.util.Locale;
|
|||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -58,6 +60,8 @@ public class TestSlowCollationMethods extends LuceneTestCase {
|
|||
String value = TestUtil.randomUnicodeString(random());
|
||||
Field field = newStringField("field", value, Field.Store.YES);
|
||||
doc.add(field);
|
||||
Field dvField = new SortedDocValuesField("field", new BytesRef(value));
|
||||
doc.add(dvField);
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
splitDoc = TestUtil.randomUnicodeString(random());
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.lucene.sandbox.queries;
|
|||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
|
@ -31,165 +32,14 @@ import org.apache.lucene.search.SortField;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
/** Simple tests for SortedSetSortField */
|
||||
/** Simple tests for SortedSetSortField, indexing the sortedset up front */
|
||||
@SuppressCodecs({"Lucene40", "Lucene41"}) // avoid codecs that don't support sortedset
|
||||
public class TestSortedSetSortField extends LuceneTestCase {
|
||||
|
||||
public void testForward() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("value", "baz", Field.Store.NO));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("value", "foo", Field.Store.NO));
|
||||
doc.add(newStringField("value", "bar", Field.Store.NO));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testReverse() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("value", "foo", Field.Store.NO));
|
||||
doc.add(newStringField("value", "bar", Field.Store.NO));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("value", "baz", Field.Store.NO));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingFirst() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("value", "baz", Field.Store.NO));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("value", "foo", Field.Store.NO));
|
||||
doc.add(newStringField("value", "bar", Field.Store.NO));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_FIRST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
// null comes first
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingLast() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("value", "baz", Field.Store.NO));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("value", "foo", Field.Store.NO));
|
||||
doc.add(newStringField("value", "bar", Field.Store.NO));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_LAST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testSingleton() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("value", "baz", Field.Store.NO));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("value", "bar", Field.Store.NO));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testEmptyIndex() throws Exception {
|
||||
IndexSearcher empty = newSearcher(new MultiReader());
|
||||
Query query = new TermQuery(new Term("contents", "foo"));
|
||||
|
@ -222,4 +72,158 @@ public class TestSortedSetSortField extends LuceneTestCase {
|
|||
assertFalse(sf.equals(new SortedSetSortField("a", false, SortedSetSortField.Selector.MAX)));
|
||||
assertFalse(sf.equals("foo"));
|
||||
}
|
||||
|
||||
public void testForward() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testReverse() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingFirst() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_FIRST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
// null comes first
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingLast() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_LAST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testSingleton() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
package org.apache.lucene.sandbox.queries;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressCodecs;
|
||||
|
||||
/** Simple tests for SortedSetSortField, indexing the sortedset up front */
|
||||
@SuppressCodecs({"Lucene40", "Lucene41"}) // avoid codecs that don't support sortedset
|
||||
public class TestSortedSetSortFieldDocValues extends LuceneTestCase {
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
// ensure there is nothing in fieldcache before test starts
|
||||
FieldCache.DEFAULT.purgeAllCaches();
|
||||
}
|
||||
|
||||
private void assertNoFieldCaches() {
|
||||
// docvalues sorting should NOT create any fieldcache entries!
|
||||
assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
|
||||
}
|
||||
|
||||
public void testForward() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testReverse() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", true));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingFirst() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_FIRST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
// null comes first
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testMissingLast() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("foo")));
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(newStringField("id", "3", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
SortField sortField = new SortedSetSortField("value", false);
|
||||
sortField.setMissingValue(SortField.STRING_LAST);
|
||||
Sort sort = new Sort(sortField);
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(3, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testSingleton() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("baz")));
|
||||
doc.add(newStringField("id", "2", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
doc = new Document();
|
||||
doc.add(new SortedSetDocValuesField("value", new BytesRef("bar")));
|
||||
doc.add(newStringField("id", "1", Field.Store.YES));
|
||||
writer.addDocument(doc);
|
||||
IndexReader ir = writer.getReader();
|
||||
writer.shutdown();
|
||||
|
||||
IndexSearcher searcher = newSearcher(ir);
|
||||
Sort sort = new Sort(new SortedSetSortField("value", false));
|
||||
|
||||
TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
|
||||
assertEquals(2, td.totalHits);
|
||||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.document.Field;
|
|||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Sort;
|
||||
|
@ -61,18 +60,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
Codec.setDefault(savedCodec);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
// ensure there is nothing in fieldcache before test starts
|
||||
FieldCache.DEFAULT.purgeAllCaches();
|
||||
}
|
||||
|
||||
private void assertNoFieldCaches() {
|
||||
// docvalues sorting should NOT create any fieldcache entries!
|
||||
assertEquals(0, FieldCache.DEFAULT.getCacheEntries().length);
|
||||
}
|
||||
|
||||
public void testMax() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
@ -98,7 +85,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'baz' comes before 'foo'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -129,7 +115,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'baz' comes before 'foo'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -167,7 +152,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'baz' comes before 'foo'
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -205,7 +189,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -234,7 +217,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -266,7 +248,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -298,7 +279,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -337,7 +317,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -376,7 +355,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -405,7 +383,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -437,7 +414,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -469,7 +445,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -508,7 +483,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'b' comes before 'c'
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -547,7 +521,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
assertEquals("1", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
// null comes last
|
||||
assertEquals("3", searcher.doc(td.scoreDocs[2].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
@ -576,7 +549,6 @@ public class TestSortedSetSortFieldSelectors extends LuceneTestCase {
|
|||
// 'bar' comes before 'baz'
|
||||
assertEquals("1", searcher.doc(td.scoreDocs[0].doc).get("id"));
|
||||
assertEquals("2", searcher.doc(td.scoreDocs[1].doc).get("id"));
|
||||
assertNoFieldCaches();
|
||||
|
||||
ir.close();
|
||||
dir.close();
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
<path refid="base.classpath"/>
|
||||
<path refid="spatialjar"/>
|
||||
<pathelement path="${queries.jar}" />
|
||||
<pathelement path="${misc.jar}" />
|
||||
</path>
|
||||
|
||||
<path id="test.classpath">
|
||||
|
@ -40,12 +41,13 @@
|
|||
<pathelement path="src/test-files" />
|
||||
</path>
|
||||
|
||||
<target name="compile-core" depends="jar-queries,common.compile-core" />
|
||||
<target name="compile-core" depends="jar-queries,jar-misc,common.compile-core" />
|
||||
|
||||
<target name="javadocs" depends="javadocs-queries,compile-core">
|
||||
<target name="javadocs" depends="javadocs-queries,javadocs-misc,compile-core">
|
||||
<invoke-module-javadoc>
|
||||
<links>
|
||||
<link href="../queries"/>
|
||||
<link href="../misc"/>
|
||||
</links>
|
||||
</invoke-module-javadoc>
|
||||
</target>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue