LUCENE-6917: rename/deprecate numeric classes in favor of dimensional values

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1719562 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael McCandless 2015-12-11 21:13:41 +00:00
parent 671b5768a6
commit 7da175b0b6
159 changed files with 2902 additions and 2445 deletions

View File

@ -86,6 +86,9 @@ API Changes
IndexOutput.getName returns its name (Dawid Weiss, Robert Muir, Mike
McCandless)
* LUCENE-6917: Deprecate and rename NumericXXX classes to
LegacyNumericXXX in favor of dimensional values (Mike McCandless)
Optimizations
* LUCENE-6891: Use prefix coding when writing dimensional values in

View File

@ -80,3 +80,11 @@ would be equivalent to the following code with the old setBoost API:
Query q = ...;
float boost = ...;
q.setBoost(q.getBoost() * boost);
## DimensionalValues replaces NumericField (LUCENE-6917)
DimensionalValues provides faster indexing and searching, a smaller
index size, and less heap used at search time. The numeric fields
(IntField, FloatField, LongField, DoubleField) and NumericRangeQuery
have been moved to the backward-codecs module and prefixed with
Legacy.

View File

@ -17,12 +17,12 @@ package org.apache.lucene.analysis.core;
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream;
import java.util.Set;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.util.BaseTokenStreamFactoryTestCase;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import java.util.Set;
/**
* Testcase for {@link TypeTokenFilterFactory}
*/
@ -46,8 +46,7 @@ public class TestTypeTokenFilterFactory extends BaseTokenStreamFactoryTestCase {
public void testCreationWithBlackList() throws Exception {
TokenFilterFactory factory = tokenFilterFactory("Type",
"types", "stoptypes-1.txt, stoptypes-2.txt");
NumericTokenStream input = new NumericTokenStream();
input.setIntValue(123);
CannedTokenStream input = new CannedTokenStream();
factory.create(input);
}
@ -55,8 +54,7 @@ public class TestTypeTokenFilterFactory extends BaseTokenStreamFactoryTestCase {
TokenFilterFactory factory = tokenFilterFactory("Type",
"types", "stoptypes-1.txt, stoptypes-2.txt",
"useWhitelist", "true");
NumericTokenStream input = new NumericTokenStream();
input.setIntValue(123);
CannedTokenStream input = new CannedTokenStream();
factory.create(input);
}

View File

@ -43,8 +43,8 @@ import org.apache.lucene.document.DoubleDocValuesField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
@ -54,7 +54,7 @@ import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.BaseDirectoryWrapper;
@ -68,9 +68,9 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.InfoStream;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LineFileDocs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.Version;
import org.junit.AfterClass;
@ -939,8 +939,8 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
doc.add(new Field("content2", "here is more content with aaa aaa aaa", customType2));
doc.add(new Field("fie\u2C77ld", "field with non-ascii name", customType2));
// add numeric fields, to test if flex preserves encoding
doc.add(new IntField("trieInt", id, Field.Store.NO));
doc.add(new LongField("trieLong", (long) id, Field.Store.NO));
doc.add(new LegacyIntField("trieInt", id, Field.Store.NO));
doc.add(new LegacyLongField("trieLong", (long) id, Field.Store.NO));
// add docvalues fields
doc.add(new NumericDocValuesField("dvByte", (byte) id));
byte bytes[] = new byte[] {
@ -1103,36 +1103,36 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
IndexSearcher searcher = newSearcher(reader);
for (int id=10; id<15; id++) {
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", NumericUtils.PRECISION_STEP_DEFAULT_32, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
ScoreDoc[] hits = searcher.search(LegacyNumericRangeQuery.newIntRange("trieInt", LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, Integer.valueOf(id), Integer.valueOf(id), true, true), 100).scoreDocs;
assertEquals("wrong number of hits", 1, hits.length);
StoredDocument d = searcher.doc(hits[0].doc);
assertEquals(String.valueOf(id), d.get("id"));
hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", NumericUtils.PRECISION_STEP_DEFAULT, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
hits = searcher.search(LegacyNumericRangeQuery.newLongRange("trieLong", LegacyNumericUtils.PRECISION_STEP_DEFAULT, Long.valueOf(id), Long.valueOf(id), true, true), 100).scoreDocs;
assertEquals("wrong number of hits", 1, hits.length);
d = searcher.doc(hits[0].doc);
assertEquals(String.valueOf(id), d.get("id"));
}
// check that also lower-precision fields are ok
ScoreDoc[] hits = searcher.search(NumericRangeQuery.newIntRange("trieInt", NumericUtils.PRECISION_STEP_DEFAULT_32, Integer.MIN_VALUE, Integer.MAX_VALUE, false, false), 100).scoreDocs;
ScoreDoc[] hits = searcher.search(LegacyNumericRangeQuery.newIntRange("trieInt", LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, Integer.MIN_VALUE, Integer.MAX_VALUE, false, false), 100).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
hits = searcher.search(NumericRangeQuery.newLongRange("trieLong", NumericUtils.PRECISION_STEP_DEFAULT, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
hits = searcher.search(LegacyNumericRangeQuery.newLongRange("trieLong", LegacyNumericUtils.PRECISION_STEP_DEFAULT, Long.MIN_VALUE, Long.MAX_VALUE, false, false), 100).scoreDocs;
assertEquals("wrong number of hits", 34, hits.length);
// check decoding of terms
Terms terms = MultiFields.getTerms(searcher.getIndexReader(), "trieInt");
TermsEnum termsEnum = NumericUtils.filterPrefixCodedInts(terms.iterator());
TermsEnum termsEnum = LegacyNumericUtils.filterPrefixCodedInts(terms.iterator());
while (termsEnum.next() != null) {
int val = NumericUtils.prefixCodedToInt(termsEnum.term());
int val = LegacyNumericUtils.prefixCodedToInt(termsEnum.term());
assertTrue("value in id bounds", val >= 0 && val < 35);
}
terms = MultiFields.getTerms(searcher.getIndexReader(), "trieLong");
termsEnum = NumericUtils.filterPrefixCodedLongs(terms.iterator());
termsEnum = LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
while (termsEnum.next() != null) {
long val = NumericUtils.prefixCodedToLong(termsEnum.term());
long val = LegacyNumericUtils.prefixCodedToLong(termsEnum.term());
assertTrue("value in id bounds", val >= 0L && val < 35L);
}

View File

@ -36,12 +36,12 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.benchmark.byTask.utils.Config;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
@ -119,8 +119,8 @@ public class DocMaker implements Closeable {
fields.put(ID_FIELD, new StringField(ID_FIELD, "", Field.Store.YES));
fields.put(NAME_FIELD, new Field(NAME_FIELD, "", ft));
numericFields.put(DATE_MSEC_FIELD, new LongField(DATE_MSEC_FIELD, 0L, Field.Store.NO));
numericFields.put(TIME_SEC_FIELD, new IntField(TIME_SEC_FIELD, 0, Field.Store.NO));
numericFields.put(DATE_MSEC_FIELD, new LegacyLongField(DATE_MSEC_FIELD, 0L, Field.Store.NO));
numericFields.put(TIME_SEC_FIELD, new LegacyIntField(TIME_SEC_FIELD, 0, Field.Store.NO));
doc = new Document();
} else {
@ -148,7 +148,7 @@ public class DocMaker implements Closeable {
return f;
}
Field getNumericField(String name, NumericType type) {
Field getNumericField(String name, LegacyNumericType type) {
Field f;
if (reuseFields) {
f = numericFields.get(name);
@ -159,16 +159,16 @@ public class DocMaker implements Closeable {
if (f == null) {
switch(type) {
case INT:
f = new IntField(name, 0, Field.Store.NO);
f = new LegacyIntField(name, 0, Field.Store.NO);
break;
case LONG:
f = new LongField(name, 0L, Field.Store.NO);
f = new LegacyLongField(name, 0L, Field.Store.NO);
break;
case FLOAT:
f = new FloatField(name, 0.0F, Field.Store.NO);
f = new LegacyFloatField(name, 0.0F, Field.Store.NO);
break;
case DOUBLE:
f = new DoubleField(name, 0.0, Field.Store.NO);
f = new LegacyDoubleField(name, 0.0, Field.Store.NO);
break;
default:
throw new AssertionError("Cannot get here");
@ -278,14 +278,14 @@ public class DocMaker implements Closeable {
date = new Date();
}
Field dateField = ds.getNumericField(DATE_MSEC_FIELD, NumericType.LONG);
Field dateField = ds.getNumericField(DATE_MSEC_FIELD, FieldType.LegacyNumericType.LONG);
dateField.setLongValue(date.getTime());
doc.add(dateField);
util.cal.setTime(date);
final int sec = util.cal.get(Calendar.HOUR_OF_DAY)*3600 + util.cal.get(Calendar.MINUTE)*60 + util.cal.get(Calendar.SECOND);
Field timeSecField = ds.getNumericField(TIME_SEC_FIELD, NumericType.INT);
Field timeSecField = ds.getNumericField(TIME_SEC_FIELD, LegacyNumericType.INT);
timeSecField.setIntValue(sec);
doc.add(timeSecField);

View File

@ -27,11 +27,10 @@ import org.apache.lucene.benchmark.byTask.PerfRunData;
import org.apache.lucene.benchmark.byTask.feeds.DocMaker;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.LegacyLongField;
/**
* Simple task to test performance of tokenizers. It just
@ -74,10 +73,10 @@ public class ReadTokensTask extends PerfTask {
int tokenCount = 0;
for(final Field field : fields) {
if (!field.fieldType().tokenized() ||
field instanceof IntField ||
field instanceof LongField ||
field instanceof FloatField ||
field instanceof DoubleField) {
field instanceof LegacyIntField ||
field instanceof LegacyLongField ||
field instanceof LegacyFloatField ||
field instanceof LegacyDoubleField) {
continue;
}

View File

@ -23,26 +23,22 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.document.DoubleField; // for javadocs
import org.apache.lucene.document.FloatField; // for javadocs
import org.apache.lucene.document.IntField; // for javadocs
import org.apache.lucene.document.LongField; // for javadocs
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.index.DimensionalValues;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeFactory;
import org.apache.lucene.util.AttributeImpl;
import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/**
* <b>Expert:</b> This class provides a {@link TokenStream}
* for indexing numeric values that can be used by {@link
* NumericRangeQuery}.
* org.apache.lucene.search.LegacyNumericRangeQuery}.
*
* <p>Note that for simple usage, {@link IntField}, {@link
* LongField}, {@link FloatField} or {@link DoubleField} is
* <p>Note that for simple usage, {@link org.apache.lucene.document.LegacyIntField}, {@link
* org.apache.lucene.document.LegacyLongField}, {@link org.apache.lucene.document.LegacyFloatField} or {@link org.apache.lucene.document.LegacyDoubleField} is
* recommended. These fields disable norms and
* term freqs, as they are not usually needed during
* searching. If you need to change these settings, you
@ -54,7 +50,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
* fieldType.setOmitNorms(true);
* fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
* Field field = new Field(name, new NumericTokenStream(precisionStep).setIntValue(value), fieldType);
* Field field = new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value), fieldType);
* document.add(field);
* </pre>
*
@ -62,7 +58,7 @@ import org.apache.lucene.util.NumericUtils;
* for more than one document:
*
* <pre class="prettyprint">
* NumericTokenStream stream = new NumericTokenStream(precisionStep);
* LegacyNumericTokenStream stream = new LegacyNumericTokenStream(precisionStep);
* FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
* fieldType.setOmitNorms(true);
* fieldType.setIndexOptions(IndexOptions.DOCS_ONLY);
@ -82,17 +78,20 @@ import org.apache.lucene.util.NumericUtils;
* <p><b>NOTE</b>: as token streams are only consumed once
* the document is added to the index, if you index more
* than one numeric field, use a separate <code>NumericTokenStream</code>
* than one numeric field, use a separate <code>LegacyNumericTokenStream</code>
* instance for each.</p>
*
* <p>See {@link NumericRangeQuery} for more details on the
* <p>See {@link org.apache.lucene.search.LegacyNumericRangeQuery} for more details on the
* <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* parameter as well as how numeric fields work under the hood.</p>
*
* @deprecated Please switch to {@link DimensionalValues} instead
*
* @since 2.9
*/
public final class NumericTokenStream extends TokenStream {
@Deprecated
public final class LegacyNumericTokenStream extends TokenStream {
/** The full precision token gets this token type assigned. */
public static final String TOKEN_TYPE_FULL_PREC = "fullPrecNumeric";
@ -104,7 +103,7 @@ public final class NumericTokenStream extends TokenStream {
* @lucene.experimental
* @since 4.0
*/
public interface NumericTermAttribute extends Attribute {
public interface LegacyNumericTermAttribute extends Attribute {
/** Returns current shift value, undefined before first token */
int getShift();
/** Returns current token's raw value as {@code long} with all {@link #getShift} applied, undefined before first token */
@ -136,16 +135,16 @@ public final class NumericTokenStream extends TokenStream {
@Override
public AttributeImpl createAttributeInstance(Class<? extends Attribute> attClass) {
if (CharTermAttribute.class.isAssignableFrom(attClass))
throw new IllegalArgumentException("NumericTokenStream does not support CharTermAttribute.");
throw new IllegalArgumentException("LegacyNumericTokenStream does not support CharTermAttribute.");
return delegate.createAttributeInstance(attClass);
}
}
/** Implementation of {@link NumericTermAttribute}.
/** Implementation of {@link org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute}.
* @lucene.internal
* @since 4.0
*/
public static final class NumericTermAttributeImpl extends AttributeImpl implements NumericTermAttribute,TermToBytesRefAttribute {
public static final class LegacyNumericTermAttributeImpl extends AttributeImpl implements LegacyNumericTermAttribute,TermToBytesRefAttribute {
private long value = 0L;
private int valueSize = 0, shift = 0, precisionStep = 0;
private BytesRefBuilder bytes = new BytesRefBuilder();
@ -154,15 +153,15 @@ public final class NumericTokenStream extends TokenStream {
* Creates, but does not yet initialize this attribute instance
* @see #init(long, int, int, int)
*/
public NumericTermAttributeImpl() {}
public LegacyNumericTermAttributeImpl() {}
@Override
public BytesRef getBytesRef() {
assert valueSize == 64 || valueSize == 32;
if (valueSize == 64) {
NumericUtils.longToPrefixCoded(value, shift, bytes);
LegacyNumericUtils.longToPrefixCoded(value, shift, bytes);
} else {
NumericUtils.intToPrefixCoded((int) value, shift, bytes);
LegacyNumericUtils.intToPrefixCoded((int) value, shift, bytes);
}
return bytes.get();
}
@ -198,20 +197,20 @@ public final class NumericTokenStream extends TokenStream {
@Override
public void reflectWith(AttributeReflector reflector) {
reflector.reflect(TermToBytesRefAttribute.class, "bytes", getBytesRef());
reflector.reflect(NumericTermAttribute.class, "shift", shift);
reflector.reflect(NumericTermAttribute.class, "rawValue", getRawValue());
reflector.reflect(NumericTermAttribute.class, "valueSize", valueSize);
reflector.reflect(LegacyNumericTermAttribute.class, "shift", shift);
reflector.reflect(LegacyNumericTermAttribute.class, "rawValue", getRawValue());
reflector.reflect(LegacyNumericTermAttribute.class, "valueSize", valueSize);
}
@Override
public void copyTo(AttributeImpl target) {
final NumericTermAttribute a = (NumericTermAttribute) target;
final LegacyNumericTermAttribute a = (LegacyNumericTermAttribute) target;
a.init(value, valueSize, precisionStep, shift);
}
@Override
public NumericTermAttributeImpl clone() {
NumericTermAttributeImpl t = (NumericTermAttributeImpl)super.clone();
public LegacyNumericTermAttributeImpl clone() {
LegacyNumericTermAttributeImpl t = (LegacyNumericTermAttributeImpl)super.clone();
// Do a deep clone
t.bytes = new BytesRefBuilder();
t.bytes.copyBytes(getBytesRef());
@ -228,7 +227,7 @@ public final class NumericTokenStream extends TokenStream {
if (this == obj) return true;
if (obj == null) return false;
if (getClass() != obj.getClass()) return false;
NumericTermAttributeImpl other = (NumericTermAttributeImpl) obj;
LegacyNumericTermAttributeImpl other = (LegacyNumericTermAttributeImpl) obj;
if (precisionStep != other.precisionStep) return false;
if (shift != other.shift) return false;
if (value != other.value) return false;
@ -239,11 +238,11 @@ public final class NumericTokenStream extends TokenStream {
/**
* Creates a token stream for numeric values using the default <code>precisionStep</code>
* {@link NumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
* {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16). The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream() {
this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, NumericUtils.PRECISION_STEP_DEFAULT);
public LegacyNumericTokenStream() {
this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, LegacyNumericUtils.PRECISION_STEP_DEFAULT);
}
/**
@ -251,7 +250,7 @@ public final class NumericTokenStream extends TokenStream {
* <code>precisionStep</code>. The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream(final int precisionStep) {
public LegacyNumericTokenStream(final int precisionStep) {
this(AttributeFactory.DEFAULT_ATTRIBUTE_FACTORY, precisionStep);
}
@ -262,7 +261,7 @@ public final class NumericTokenStream extends TokenStream {
* The stream is not yet initialized,
* before using set a value using the various set<em>???</em>Value() methods.
*/
public NumericTokenStream(AttributeFactory factory, final int precisionStep) {
public LegacyNumericTokenStream(AttributeFactory factory, final int precisionStep) {
super(new NumericAttributeFactory(factory));
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
@ -274,9 +273,9 @@ public final class NumericTokenStream extends TokenStream {
* Initializes the token stream with the supplied <code>long</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setLongValue(value))</code>
* <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setLongValue(value))</code>
*/
public NumericTokenStream setLongValue(final long value) {
public LegacyNumericTokenStream setLongValue(final long value) {
numericAtt.init(value, valSize = 64, precisionStep, -precisionStep);
return this;
}
@ -285,9 +284,9 @@ public final class NumericTokenStream extends TokenStream {
* Initializes the token stream with the supplied <code>int</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setIntValue(value))</code>
* <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setIntValue(value))</code>
*/
public NumericTokenStream setIntValue(final int value) {
public LegacyNumericTokenStream setIntValue(final int value) {
numericAtt.init(value, valSize = 32, precisionStep, -precisionStep);
return this;
}
@ -296,10 +295,10 @@ public final class NumericTokenStream extends TokenStream {
* Initializes the token stream with the supplied <code>double</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setDoubleValue(value))</code>
* <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setDoubleValue(value))</code>
*/
public NumericTokenStream setDoubleValue(final double value) {
numericAtt.init(NumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
public LegacyNumericTokenStream setDoubleValue(final double value) {
numericAtt.init(LegacyNumericUtils.doubleToSortableLong(value), valSize = 64, precisionStep, -precisionStep);
return this;
}
@ -307,10 +306,10 @@ public final class NumericTokenStream extends TokenStream {
* Initializes the token stream with the supplied <code>float</code> value.
* @param value the value, for which this TokenStream should enumerate tokens.
* @return this instance, because of this you can use it the following way:
* <code>new Field(name, new NumericTokenStream(precisionStep).setFloatValue(value))</code>
* <code>new Field(name, new LegacyNumericTokenStream(precisionStep).setFloatValue(value))</code>
*/
public NumericTokenStream setFloatValue(final float value) {
numericAtt.init(NumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
public LegacyNumericTokenStream setFloatValue(final float value) {
numericAtt.init(LegacyNumericUtils.floatToSortableInt(value), valSize = 32, precisionStep, -precisionStep);
return this;
}
@ -347,7 +346,7 @@ public final class NumericTokenStream extends TokenStream {
}
// members
private final NumericTermAttribute numericAtt = addAttribute(NumericTermAttribute.class);
private final LegacyNumericTermAttribute numericAtt = addAttribute(LegacyNumericTermAttribute.class);
private final TypeAttribute typeAtt = addAttribute(TypeAttribute.class);
private final PositionIncrementAttribute posIncrAtt = addAttribute(PositionIncrementAttribute.class);

View File

@ -24,7 +24,7 @@ import java.util.Map;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.FieldInfosFormat;
import org.apache.lucene.document.DimensionalField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
@ -97,7 +97,7 @@ import org.apache.lucene.store.IndexOutput;
* are updates stored by {@link DocValuesFormat}.</li>
* <li>Attributes: a key-value map of codec-private attributes.</li>
* <li>DimensionCount, DimensionNumBytes: these are non-zero only if the field is
* indexed dimensionally using {@link DimensionalField}</li>
* indexed dimensionally, e.g. using {@link DimensionalLongField}</li>
* </ul>
*
* @lucene.experimental

View File

@ -17,11 +17,6 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.util.NumericUtils; // for javadocs
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
@ -29,6 +24,10 @@ import java.util.Date;
import java.util.Locale;
import java.util.TimeZone;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.TermRangeQuery;
/**
* Provides support for converting dates to strings and vice-versa.
* The strings are structured so that lexicographic sorting orders
@ -40,13 +39,12 @@ import java.util.TimeZone;
* {@link TermRangeQuery} and {@link PrefixQuery} will require more memory and become slower.
*
* <P>
* Another approach is {@link NumericUtils}, which provides
* a sortable binary representation (prefix encoded) of numeric values, which
* date/time are.
* Another approach is {@link DimensionalLongField}, which indexes the
* values in sorted order.
* For indexing a {@link Date} or {@link Calendar}, just get the unix timestamp as
* <code>long</code> using {@link Date#getTime} or {@link Calendar#getTimeInMillis} and
* index this as a numeric value with {@link LongField}
* and use {@link NumericRangeQuery} to query it.
* index this as a numeric value with {@link DimensionalLongField}
* and use {@link DimensionalRangeQuery} to query it.
*/
public class DateTools {

View File

@ -18,15 +18,13 @@ package org.apache.lucene.document;
*/
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.bkd.BKDUtil;
/** A field that is indexed dimensionally such that finding
* all documents within an N-dimensional at search time is
/** A binary field that is indexed dimensionally such that finding
* all documents within an N-dimensional shape or range at search time is
* efficient. Muliple values for the same field in one documents
* is allowed. */
public final class DimensionalField extends Field {
public final class DimensionalBinaryField extends Field {
private static FieldType getType(byte[][] point) {
if (point == null) {
@ -91,22 +89,6 @@ public final class DimensionalField extends Field {
return new BytesRef(packed);
}
private static BytesRef pack(long... point) {
if (point == null) {
throw new IllegalArgumentException("point cannot be null");
}
if (point.length == 0) {
throw new IllegalArgumentException("point cannot be 0 dimensions");
}
byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_LONG];
for(int dim=0;dim<point.length;dim++) {
BKDUtil.longToBytes(point[dim], packed, dim);
}
return new BytesRef(packed);
}
/** General purpose API: creates a new DimensionalField, indexing the
* provided N-dimensional binary point.
*
@ -114,23 +96,12 @@ public final class DimensionalField extends Field {
* @param point byte[][] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalField(String name, byte[]... point) {
public DimensionalBinaryField(String name, byte[]... point) {
super(name, pack(point), getType(point));
}
/** General purpose API: creates a new DimensionalField, indexing the
* provided N-dimensional long point.
*
* @param name field name
* @param point long[] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalField(String name, long... point) {
super(name, pack(point), getType(point.length, RamUsageEstimator.NUM_BYTES_LONG));
}
/** Expert API */
public DimensionalField(String name, byte[] packedPoint, FieldType type) {
public DimensionalBinaryField(String name, byte[] packedPoint, FieldType type) {
super(name, packedPoint, type);
if (packedPoint.length != type.dimensionCount() * type.dimensionNumBytes()) {
throw new IllegalArgumentException("packedPoint is length=" + packedPoint.length + " but type.dimensionCount()=" + type.dimensionCount() + " and type.dimensionNumBytes()=" + type.dimensionNumBytes());

View File

@ -0,0 +1,86 @@
package org.apache.lucene.document;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
/** A double field that is indexed dimensionally such that finding
* all documents within an N-dimensional shape or range at search time is
* efficient. Muliple values for the same field in one documents
* is allowed. */
public final class DimensionalDoubleField extends Field {
private static FieldType getType(int numDims) {
FieldType type = new FieldType();
type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_LONG);
type.freeze();
return type;
}
@Override
public void setDoubleValue(double value) {
setDoubleValues(value);
}
/** Change the values of this field */
public void setDoubleValues(double... point) {
fieldsData = pack(point);
}
@Override
public void setBytesValue(BytesRef bytes) {
throw new IllegalArgumentException("cannot change value type from double to BytesRef");
}
@Override
public Number numericValue() {
BytesRef bytes = (BytesRef) fieldsData;
assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
return NumericUtils.sortableLongToDouble(NumericUtils.bytesToLongDirect(bytes.bytes, bytes.offset));
}
private static BytesRef pack(double... point) {
if (point == null) {
throw new IllegalArgumentException("point cannot be null");
}
if (point.length == 0) {
throw new IllegalArgumentException("point cannot be 0 dimensions");
}
byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_LONG];
for(int dim=0;dim<point.length;dim++) {
NumericUtils.longToBytesDirect(NumericUtils.doubleToSortableLong(point[dim]), packed, dim);
}
return new BytesRef(packed);
}
/** Creates a new DimensionalDoubleField, indexing the
* provided N-dimensional int point.
*
* @param name field name
* @param point double[] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalDoubleField(String name, double... point) {
super(name, pack(point), getType(point.length));
}
}

View File

@ -0,0 +1,86 @@
package org.apache.lucene.document;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
/** A field that is indexed dimensionally such that finding
* all documents within an N-dimensional at search time is
* efficient. Muliple values for the same field in one documents
* is allowed. */
public final class DimensionalFloatField extends Field {
private static FieldType getType(int numDims) {
FieldType type = new FieldType();
type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_INT);
type.freeze();
return type;
}
@Override
public void setFloatValue(float value) {
setFloatValues(value);
}
/** Change the values of this field */
public void setFloatValues(float... point) {
fieldsData = pack(point);
}
@Override
public void setBytesValue(BytesRef bytes) {
throw new IllegalArgumentException("cannot change value type from float to BytesRef");
}
@Override
public Number numericValue() {
BytesRef bytes = (BytesRef) fieldsData;
assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
return NumericUtils.sortableIntToFloat(NumericUtils.bytesToIntDirect(bytes.bytes, bytes.offset));
}
private static BytesRef pack(float... point) {
if (point == null) {
throw new IllegalArgumentException("point cannot be null");
}
if (point.length == 0) {
throw new IllegalArgumentException("point cannot be 0 dimensions");
}
byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_INT];
for(int dim=0;dim<point.length;dim++) {
NumericUtils.intToBytesDirect(NumericUtils.floatToSortableInt(point[dim]), packed, dim);
}
return new BytesRef(packed);
}
/** Creates a new DimensionalFloatField, indexing the
* provided N-dimensional float point.
*
* @param name field name
* @param point int[] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalFloatField(String name, float... point) {
super(name, pack(point), getType(point.length));
}
}

View File

@ -0,0 +1,86 @@
package org.apache.lucene.document;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
/** An int field that is indexed dimensionally such that finding
* all documents within an N-dimensional shape or range at search time is
* efficient. Muliple values for the same field in one documents
* is allowed. */
public final class DimensionalIntField extends Field {
private static FieldType getType(int numDims) {
FieldType type = new FieldType();
type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_INT);
type.freeze();
return type;
}
@Override
public void setIntValue(int value) {
setIntValues(value);
}
/** Change the values of this field */
public void setIntValues(int... point) {
fieldsData = pack(point);
}
@Override
public void setBytesValue(BytesRef bytes) {
throw new IllegalArgumentException("cannot change value type from int to BytesRef");
}
@Override
public Number numericValue() {
BytesRef bytes = (BytesRef) fieldsData;
assert bytes.length == RamUsageEstimator.NUM_BYTES_INT;
return NumericUtils.bytesToInt(bytes.bytes, bytes.offset);
}
private static BytesRef pack(int... point) {
if (point == null) {
throw new IllegalArgumentException("point cannot be null");
}
if (point.length == 0) {
throw new IllegalArgumentException("point cannot be 0 dimensions");
}
byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_INT];
for(int dim=0;dim<point.length;dim++) {
NumericUtils.intToBytes(point[dim], packed, dim);
}
return new BytesRef(packed);
}
/** Creates a new DimensionalIntField, indexing the
* provided N-dimensional int point.
*
* @param name field name
* @param point int[] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalIntField(String name, int... point) {
super(name, pack(point), getType(point.length));
}
}

View File

@ -0,0 +1,86 @@
package org.apache.lucene.document;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
/** A long field that is indexed dimensionally such that finding
* all documents within an N-dimensional shape or range at search time is
* efficient. Muliple values for the same field in one documents
* is allowed. */
public final class DimensionalLongField extends Field {
private static FieldType getType(int numDims) {
FieldType type = new FieldType();
type.setDimensions(numDims, RamUsageEstimator.NUM_BYTES_LONG);
type.freeze();
return type;
}
@Override
public void setLongValue(long value) {
setLongValues(value);
}
/** Change the values of this field */
public void setLongValues(long... point) {
fieldsData = pack(point);
}
@Override
public void setBytesValue(BytesRef bytes) {
throw new IllegalArgumentException("cannot change value type from long to BytesRef");
}
@Override
public Number numericValue() {
BytesRef bytes = (BytesRef) fieldsData;
assert bytes.length == RamUsageEstimator.NUM_BYTES_LONG;
return NumericUtils.bytesToLong(bytes.bytes, bytes.offset);
}
private static BytesRef pack(long... point) {
if (point == null) {
throw new IllegalArgumentException("point cannot be null");
}
if (point.length == 0) {
throw new IllegalArgumentException("point cannot be 0 dimensions");
}
byte[] packed = new byte[point.length * RamUsageEstimator.NUM_BYTES_LONG];
for(int dim=0;dim<point.length;dim++) {
NumericUtils.longToBytes(point[dim], packed, dim);
}
return new BytesRef(packed);
}
/** Creates a new DimensionalLongField, indexing the
* provided N-dimensional int point.
*
* @param name field name
* @param point int[] value
* @throws IllegalArgumentException if the field name or value is null.
*/
public DimensionalLongField(String name, long... point) {
super(name, pack(point), getType(point.length));
}
}

View File

@ -229,8 +229,8 @@ public final class Document implements IndexDocument {
* Returns an array of values of the field specified as the method parameter.
* This method returns an empty array when there are no
* matching fields. It never returns null.
* For {@link IntField}, {@link LongField}, {@link
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
* For {@link LegacyIntField}, {@link LegacyLongField}, {@link
* LegacyFloatField} and {@link LegacyDoubleField} it returns the string value of the number. If you want
* the actual numeric field instances back, use {@link #getFields}.
* @param name the name of the field
* @return a <code>String[]</code> of field values
@ -256,8 +256,8 @@ public final class Document implements IndexDocument {
* this document, or null. If multiple fields exist with this name, this
* method returns the first value added. If only binary fields with this name
* exist, returns null.
* For {@link IntField}, {@link LongField}, {@link
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
* For {@link LegacyIntField}, {@link LegacyLongField}, {@link
* LegacyFloatField} and {@link LegacyDoubleField} it returns the string value of the number. If you want
* the actual numeric field instance back, use {@link #getField}.
*/
public final String get(String name) {

View File

@ -21,12 +21,11 @@ import java.io.IOException;
import java.io.Reader;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.BytesTermAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.IndexableFieldType;
@ -36,8 +35,8 @@ import org.apache.lucene.util.BytesRef;
/**
* Expert: directly create a field for a document. Most
* users should use one of the sugar subclasses: {@link
* IntField}, {@link LongField}, {@link FloatField}, {@link
* DoubleField}, {@link BinaryDocValuesField}, {@link
* LegacyIntField}, {@link LegacyLongField}, {@link LegacyFloatField}, {@link
* LegacyDoubleField}, {@link BinaryDocValuesField}, {@link
* NumericDocValuesField}, {@link SortedDocValuesField}, {@link
* StringField}, {@link TextField}, {@link StoredField}.
*
@ -504,14 +503,14 @@ public class Field implements IndexableField, StorableField {
return null;
}
final NumericType numericType = fieldType().numericType();
final FieldType.LegacyNumericType numericType = fieldType().numericType();
if (numericType != null) {
if (!(reuse instanceof NumericTokenStream && ((NumericTokenStream)reuse).getPrecisionStep() == type.numericPrecisionStep())) {
if (!(reuse instanceof LegacyNumericTokenStream && ((LegacyNumericTokenStream)reuse).getPrecisionStep() == type.numericPrecisionStep())) {
// lazy init the TokenStream as it is heavy to instantiate
// (attributes,...) if not needed (stored field loading)
reuse = new NumericTokenStream(type.numericPrecisionStep());
reuse = new LegacyNumericTokenStream(type.numericPrecisionStep());
}
final NumericTokenStream nts = (NumericTokenStream) reuse;
final LegacyNumericTokenStream nts = (LegacyNumericTokenStream) reuse;
// initialize value in TokenStream
final Number val = (Number) fieldsData;
switch (numericType) {

View File

@ -21,8 +21,7 @@ import org.apache.lucene.analysis.Analyzer; // javadocs
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexableFieldType;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/**
* Describes the properties of a field.
@ -32,7 +31,7 @@ public class FieldType implements IndexableFieldType {
/** Data type of the numeric value
* @since 3.2
*/
public enum NumericType {
public enum LegacyNumericType {
/** 32-bit integer numeric type */
INT,
/** 64-bit long numeric type */
@ -51,9 +50,9 @@ public class FieldType implements IndexableFieldType {
private boolean storeTermVectorPayloads;
private boolean omitNorms;
private IndexOptions indexOptions = IndexOptions.NONE;
private NumericType numericType;
private LegacyNumericType numericType;
private boolean frozen;
private int numericPrecisionStep = NumericUtils.PRECISION_STEP_DEFAULT;
private int numericPrecisionStep = LegacyNumericUtils.PRECISION_STEP_DEFAULT;
private DocValuesType docValuesType = DocValuesType.NONE;
private int dimensionCount;
private int dimensionNumBytes;
@ -301,20 +300,20 @@ public class FieldType implements IndexableFieldType {
* future modifications.
* @see #numericType()
*/
public void setNumericType(NumericType type) {
public void setNumericType(LegacyNumericType type) {
checkIfFrozen();
numericType = type;
}
/**
* NumericType: if non-null then the field's value will be indexed
* numerically so that {@link NumericRangeQuery} can be used at
* LegacyNumericType: if non-null then the field's value will be indexed
* numerically so that {@link org.apache.lucene.search.LegacyNumericRangeQuery} can be used at
* search time.
* <p>
* The default is <code>null</code> (no numeric type)
* @see #setNumericType(NumericType)
* @see #setNumericType(org.apache.lucene.document.FieldType.LegacyNumericType)
*/
public NumericType numericType() {
public LegacyNumericType numericType() {
return numericType;
}
@ -339,7 +338,7 @@ public class FieldType implements IndexableFieldType {
* <p>
* This has no effect if {@link #numericType()} returns null.
* <p>
* The default is {@link NumericUtils#PRECISION_STEP_DEFAULT}
* The default is {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT}
* @see #setNumericPrecisionStep(int)
*/
public int numericPrecisionStep() {

View File

@ -17,10 +17,8 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
/**
* <p>
@ -28,14 +26,14 @@ import org.apache.lucene.util.NumericUtils;
* for efficient range filtering and sorting. Here's an example usage:
*
* <pre class="prettyprint">
* document.add(new DoubleField(name, 6.0, Field.Store.NO));
* document.add(new LegacyDoubleField(name, 6.0, Field.Store.NO));
* </pre>
*
* For optimal performance, re-use the <code>DoubleField</code> and
* For optimal performance, re-use the <code>LegacyDoubleField</code> and
* {@link Document} instance for more than one document:
*
* <pre class="prettyprint">
* DoubleField field = new DoubleField(name, 0.0, Field.Store.NO);
* LegacyDoubleField field = new LegacyDoubleField(name, 0.0, Field.Store.NO);
* Document document = new Document();
* document.add(field);
*
@ -47,24 +45,24 @@ import org.apache.lucene.util.NumericUtils;
* }
* </pre>
*
* See also {@link IntField}, {@link LongField}, {@link
* FloatField}.
* See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
* LegacyFloatField}.
*
* <p>To perform range querying or filtering against a
* <code>DoubleField</code>, use {@link NumericRangeQuery}.
* <code>LegacyDoubleField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
* To sort according to a
* <code>DoubleField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>DoubleField</code>
* <code>LegacyDoubleField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#DOUBLE}. <code>LegacyDoubleField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
*
* <p>You may add the same field name as an <code>DoubleField</code> to
* <p>You may add the same field name as an <code>LegacyDoubleField</code> to
* the same document more than once. Range querying and
* filtering will be the logical OR of all values; so a range query
* will hit all documents that have at least one value in
* the range. However sort behavior is not defined. If you need to sort,
* you should separately index a single-valued <code>DoubleField</code>.</p>
* you should separately index a single-valued <code>LegacyDoubleField</code>.</p>
*
* <p>A <code>DoubleField</code> will consume somewhat more disk space
* <p>A <code>LegacyDoubleField</code> will consume somewhat more disk space
* in the index than an ordinary single-valued field.
* However, for a typical index that includes substantial
* textual content per document, this increase will likely
@ -85,7 +83,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery}.
* org.apache.lucene.search.LegacyNumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one
@ -93,9 +91,9 @@ import org.apache.lucene.util.NumericUtils;
*
* <p>For more information on the internals of numeric trie
* indexing, including the <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link NumericRangeQuery}. The format of
* indexed values is described in {@link NumericUtils}.
* href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
* indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
*
* <p>If you only need to sort by numeric value, and never
* run range querying/filtering, you can index using a
@ -103,17 +101,20 @@ import org.apache.lucene.util.NumericUtils;
* This will minimize disk space consumed. </p>
*
* <p>More advanced users can instead use {@link
* NumericTokenStream} directly, when indexing numbers. This
* org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
* class is a wrapper around this token stream type for
* easier, more intuitive usage.</p>
*
* @deprecated Please use {@link DimensionalDoubleField} instead
*
* @since 2.9
*/
public final class DoubleField extends Field {
@Deprecated
public final class LegacyDoubleField extends Field {
/**
* Type for a DoubleField that is not stored:
* Type for a LegacyDoubleField that is not stored:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
@ -121,12 +122,12 @@ public final class DoubleField extends Field {
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_NOT_STORED.setNumericType(FieldType.NumericType.DOUBLE);
TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.DOUBLE);
TYPE_NOT_STORED.freeze();
}
/**
* Type for a stored DoubleField:
* Type for a stored LegacyDoubleField:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_STORED = new FieldType();
@ -134,20 +135,20 @@ public final class DoubleField extends Field {
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_STORED.setNumericType(FieldType.NumericType.DOUBLE);
TYPE_STORED.setNumericType(FieldType.LegacyNumericType.DOUBLE);
TYPE_STORED.setStored(true);
TYPE_STORED.freeze();
}
/** Creates a stored or un-stored DoubleField with the provided value
/** Creates a stored or un-stored LegacyDoubleField with the provided value
* and default <code>precisionStep</code> {@link
* NumericUtils#PRECISION_STEP_DEFAULT} (16).
* org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
* @param name field name
* @param value 64-bit double value
* @param stored Store.YES if the content should also be stored
* @throws IllegalArgumentException if the field name is null.
*/
public DoubleField(String name, double value, Store stored) {
public LegacyDoubleField(String name, double value, Store stored) {
super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
fieldsData = Double.valueOf(value);
}
@ -157,13 +158,13 @@ public final class DoubleField extends Field {
* @param name field name
* @param value 64-bit double value
* @param type customized field type: must have {@link FieldType#numericType()}
* of {@link FieldType.NumericType#DOUBLE}.
* of {@link org.apache.lucene.document.FieldType.LegacyNumericType#DOUBLE}.
* @throws IllegalArgumentException if the field name or type is null, or
* if the field type does not have a DOUBLE numericType()
*/
public DoubleField(String name, double value, FieldType type) {
public LegacyDoubleField(String name, double value, FieldType type) {
super(name, type);
if (type.numericType() != FieldType.NumericType.DOUBLE) {
if (type.numericType() != FieldType.LegacyNumericType.DOUBLE) {
throw new IllegalArgumentException("type.numericType() must be DOUBLE but got " + type.numericType());
}
fieldsData = Double.valueOf(value);

View File

@ -17,10 +17,8 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/**
* <p>
@ -28,14 +26,14 @@ import org.apache.lucene.util.NumericUtils;
* for efficient range filtering and sorting. Here's an example usage:
*
* <pre class="prettyprint">
* document.add(new FloatField(name, 6.0F, Field.Store.NO));
* document.add(new LegacyFloatField(name, 6.0F, Field.Store.NO));
* </pre>
*
* For optimal performance, re-use the <code>FloatField</code> and
* For optimal performance, re-use the <code>LegacyFloatField</code> and
* {@link Document} instance for more than one document:
*
* <pre class="prettyprint">
* FloatField field = new FloatField(name, 0.0F, Field.Store.NO);
* LegacyFloatField field = new LegacyFloatField(name, 0.0F, Field.Store.NO);
* Document document = new Document();
* document.add(field);
*
@ -47,24 +45,24 @@ import org.apache.lucene.util.NumericUtils;
* }
* </pre>
*
* See also {@link IntField}, {@link LongField}, {@link
* DoubleField}.
* See also {@link LegacyIntField}, {@link LegacyLongField}, {@link
* LegacyDoubleField}.
*
* <p>To perform range querying or filtering against a
* <code>FloatField</code>, use {@link NumericRangeQuery}.
* <code>LegacyFloatField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
* To sort according to a
* <code>FloatField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>FloatField</code>
* <code>LegacyFloatField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#FLOAT}. <code>LegacyFloatField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
*
* <p>You may add the same field name as an <code>FloatField</code> to
* <p>You may add the same field name as an <code>LegacyFloatField</code> to
* the same document more than once. Range querying and
* filtering will be the logical OR of all values; so a range query
* will hit all documents that have at least one value in
* the range. However sort behavior is not defined. If you need to sort,
* you should separately index a single-valued <code>FloatField</code>.</p>
* you should separately index a single-valued <code>LegacyFloatField</code>.</p>
*
* <p>A <code>FloatField</code> will consume somewhat more disk space
* <p>A <code>LegacyFloatField</code> will consume somewhat more disk space
* in the index than an ordinary single-valued field.
* However, for a typical index that includes substantial
* textual content per document, this increase will likely
@ -85,7 +83,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery}.
* org.apache.lucene.search.LegacyNumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one
@ -93,9 +91,9 @@ import org.apache.lucene.util.NumericUtils;
*
* <p>For more information on the internals of numeric trie
* indexing, including the <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link NumericRangeQuery}. The format of
* indexed values is described in {@link NumericUtils}.
* href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
* indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
*
* <p>If you only need to sort by numeric value, and never
* run range querying/filtering, you can index using a
@ -103,17 +101,20 @@ import org.apache.lucene.util.NumericUtils;
* This will minimize disk space consumed. </p>
*
* <p>More advanced users can instead use {@link
* NumericTokenStream} directly, when indexing numbers. This
* org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
* class is a wrapper around this token stream type for
* easier, more intuitive usage.</p>
*
* @deprecated Please use {@link DimensionalFloatField} instead
*
* @since 2.9
*/
public final class FloatField extends Field {
@Deprecated
public final class LegacyFloatField extends Field {
/**
* Type for a FloatField that is not stored:
* Type for a LegacyFloatField that is not stored:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
@ -121,13 +122,13 @@ public final class FloatField extends Field {
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_NOT_STORED.setNumericType(FieldType.NumericType.FLOAT);
TYPE_NOT_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.FLOAT);
TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_NOT_STORED.freeze();
}
/**
* Type for a stored FloatField:
* Type for a stored LegacyFloatField:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_STORED = new FieldType();
@ -135,21 +136,21 @@ public final class FloatField extends Field {
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_STORED.setNumericType(FieldType.NumericType.FLOAT);
TYPE_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_STORED.setNumericType(FieldType.LegacyNumericType.FLOAT);
TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_STORED.setStored(true);
TYPE_STORED.freeze();
}
/** Creates a stored or un-stored FloatField with the provided value
/** Creates a stored or un-stored LegacyFloatField with the provided value
* and default <code>precisionStep</code> {@link
* NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* @param name field name
* @param value 32-bit double value
* @param stored Store.YES if the content should also be stored
* @throws IllegalArgumentException if the field name is null.
*/
public FloatField(String name, float value, Store stored) {
public LegacyFloatField(String name, float value, Store stored) {
super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
fieldsData = Float.valueOf(value);
}
@ -159,13 +160,13 @@ public final class FloatField extends Field {
* @param name field name
* @param value 32-bit float value
* @param type customized field type: must have {@link FieldType#numericType()}
* of {@link FieldType.NumericType#FLOAT}.
* of {@link org.apache.lucene.document.FieldType.LegacyNumericType#FLOAT}.
* @throws IllegalArgumentException if the field name or type is null, or
* if the field type does not have a FLOAT numericType()
*/
public FloatField(String name, float value, FieldType type) {
public LegacyFloatField(String name, float value, FieldType type) {
super(name, type);
if (type.numericType() != FieldType.NumericType.FLOAT) {
if (type.numericType() != FieldType.LegacyNumericType.FLOAT) {
throw new IllegalArgumentException("type.numericType() must be FLOAT but got " + type.numericType());
}
fieldsData = Float.valueOf(value);

View File

@ -17,10 +17,8 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/**
* <p>
@ -28,14 +26,14 @@ import org.apache.lucene.util.NumericUtils;
* for efficient range filtering and sorting. Here's an example usage:
*
* <pre class="prettyprint">
* document.add(new IntField(name, 6, Field.Store.NO));
* document.add(new LegacyIntField(name, 6, Field.Store.NO));
* </pre>
*
* For optimal performance, re-use the <code>IntField</code> and
* For optimal performance, re-use the <code>LegacyIntField</code> and
* {@link Document} instance for more than one document:
*
* <pre class="prettyprint">
* IntField field = new IntField(name, 6, Field.Store.NO);
* LegacyIntField field = new LegacyIntField(name, 6, Field.Store.NO);
* Document document = new Document();
* document.add(field);
*
@ -47,24 +45,24 @@ import org.apache.lucene.util.NumericUtils;
* }
* </pre>
*
* See also {@link LongField}, {@link FloatField}, {@link
* DoubleField}.
* See also {@link LegacyLongField}, {@link LegacyFloatField}, {@link
* LegacyDoubleField}.
*
* <p>To perform range querying or filtering against a
* <code>IntField</code>, use {@link NumericRangeQuery}.
* <code>LegacyIntField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
* To sort according to a
* <code>IntField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#INT}. <code>IntField</code>
* <code>LegacyIntField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#INT}. <code>LegacyIntField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.</p>
*
* <p>You may add the same field name as an <code>IntField</code> to
* <p>You may add the same field name as an <code>LegacyIntField</code> to
* the same document more than once. Range querying and
* filtering will be the logical OR of all values; so a range query
* will hit all documents that have at least one value in
* the range. However sort behavior is not defined. If you need to sort,
* you should separately index a single-valued <code>IntField</code>.</p>
* you should separately index a single-valued <code>LegacyIntField</code>.</p>
*
* <p>An <code>IntField</code> will consume somewhat more disk space
* <p>An <code>LegacyIntField</code> will consume somewhat more disk space
* in the index than an ordinary single-valued field.
* However, for a typical index that includes substantial
* textual content per document, this increase will likely
@ -85,7 +83,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery}.
* org.apache.lucene.search.LegacyNumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one
@ -93,9 +91,9 @@ import org.apache.lucene.util.NumericUtils;
*
* <p>For more information on the internals of numeric trie
* indexing, including the <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link NumericRangeQuery}. The format of
* indexed values is described in {@link NumericUtils}.
* href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
* indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
*
* <p>If you only need to sort by numeric value, and never
* run range querying/filtering, you can index using a
@ -103,17 +101,20 @@ import org.apache.lucene.util.NumericUtils;
* This will minimize disk space consumed. </p>
*
* <p>More advanced users can instead use {@link
* NumericTokenStream} directly, when indexing numbers. This
* org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
* class is a wrapper around this token stream type for
* easier, more intuitive usage.</p>
*
* @deprecated Please use {@link DimensionalIntField} instead
*
* @since 2.9
*/
public final class IntField extends Field {
@Deprecated
public final class LegacyIntField extends Field {
/**
* Type for an IntField that is not stored:
* Type for an LegacyIntField that is not stored:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
@ -121,13 +122,13 @@ public final class IntField extends Field {
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_NOT_STORED.setNumericType(FieldType.NumericType.INT);
TYPE_NOT_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.INT);
TYPE_NOT_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_NOT_STORED.freeze();
}
/**
* Type for a stored IntField:
* Type for a stored LegacyIntField:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_STORED = new FieldType();
@ -135,21 +136,21 @@ public final class IntField extends Field {
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_STORED.setNumericType(FieldType.NumericType.INT);
TYPE_STORED.setNumericPrecisionStep(NumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_STORED.setNumericType(FieldType.LegacyNumericType.INT);
TYPE_STORED.setNumericPrecisionStep(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32);
TYPE_STORED.setStored(true);
TYPE_STORED.freeze();
}
/** Creates a stored or un-stored IntField with the provided value
/** Creates a stored or un-stored LegacyIntField with the provided value
* and default <code>precisionStep</code> {@link
* NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* @param name field name
* @param value 32-bit integer value
* @param stored Store.YES if the content should also be stored
* @throws IllegalArgumentException if the field name is null.
*/
public IntField(String name, int value, Store stored) {
public LegacyIntField(String name, int value, Store stored) {
super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
fieldsData = Integer.valueOf(value);
}
@ -159,13 +160,13 @@ public final class IntField extends Field {
* @param name field name
* @param value 32-bit integer value
* @param type customized field type: must have {@link FieldType#numericType()}
* of {@link FieldType.NumericType#INT}.
* of {@link org.apache.lucene.document.FieldType.LegacyNumericType#INT}.
* @throws IllegalArgumentException if the field name or type is null, or
* if the field type does not have a INT numericType()
*/
public IntField(String name, int value, FieldType type) {
public LegacyIntField(String name, int value, FieldType type) {
super(name, type);
if (type.numericType() != FieldType.NumericType.INT) {
if (type.numericType() != FieldType.LegacyNumericType.INT) {
throw new IllegalArgumentException("type.numericType() must be INT but got " + type.numericType());
}
fieldsData = Integer.valueOf(value);

View File

@ -17,10 +17,8 @@ package org.apache.lucene.document;
* limitations under the License.
*/
import org.apache.lucene.analysis.NumericTokenStream; // javadocs
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.NumericRangeQuery; // javadocs
import org.apache.lucene.util.NumericUtils;
/**
* <p>
@ -28,14 +26,14 @@ import org.apache.lucene.util.NumericUtils;
* for efficient range filtering and sorting. Here's an example usage:
*
* <pre class="prettyprint">
* document.add(new LongField(name, 6L, Field.Store.NO));
* document.add(new LegacyLongField(name, 6L, Field.Store.NO));
* </pre>
*
* For optimal performance, re-use the <code>LongField</code> and
* For optimal performance, re-use the <code>LegacyLongField</code> and
* {@link Document} instance for more than one document:
*
* <pre class="prettyprint">
* LongField field = new LongField(name, 0L, Field.Store.NO);
* LegacyLongField field = new LegacyLongField(name, 0L, Field.Store.NO);
* Document document = new Document();
* document.add(field);
*
@ -47,8 +45,8 @@ import org.apache.lucene.util.NumericUtils;
* }
* </pre>
*
* See also {@link IntField}, {@link FloatField}, {@link
* DoubleField}.
* See also {@link LegacyIntField}, {@link LegacyFloatField}, {@link
* LegacyDoubleField}.
*
* Any type that can be converted to long can also be
* indexed. For example, date/time values represented by a
@ -61,20 +59,20 @@ import org.apache.lucene.util.NumericUtils;
* <code>long</code> value.
*
* <p>To perform range querying or filtering against a
* <code>LongField</code>, use {@link NumericRangeQuery}.
* <code>LegacyLongField</code>, use {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
* To sort according to a
* <code>LongField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LongField</code>
* <code>LegacyLongField</code>, use the normal numeric sort types, eg
* {@link org.apache.lucene.search.SortField.Type#LONG}. <code>LegacyLongField</code>
* values can also be loaded directly from {@link org.apache.lucene.index.LeafReader#getNumericDocValues}.
*
* <p>You may add the same field name as an <code>LongField</code> to
* <p>You may add the same field name as an <code>LegacyLongField</code> to
* the same document more than once. Range querying and
* filtering will be the logical OR of all values; so a range query
* will hit all documents that have at least one value in
* the range. However sort behavior is not defined. If you need to sort,
* you should separately index a single-valued <code>LongField</code>.
* you should separately index a single-valued <code>LegacyLongField</code>.
*
* <p>A <code>LongField</code> will consume somewhat more disk space
* <p>A <code>LegacyLongField</code> will consume somewhat more disk space
* in the index than an ordinary single-valued field.
* However, for a typical index that includes substantial
* textual content per document, this increase will likely
@ -95,7 +93,7 @@ import org.apache.lucene.util.NumericUtils;
* FieldType#setNumericPrecisionStep} method if you'd
* like to change the value. Note that you must also
* specify a congruent value when creating {@link
* NumericRangeQuery}.
* org.apache.lucene.search.LegacyNumericRangeQuery}.
* For low cardinality fields larger precision steps are good.
* If the cardinality is &lt; 100, it is fair
* to use {@link Integer#MAX_VALUE}, which produces one
@ -103,9 +101,9 @@ import org.apache.lucene.util.NumericUtils;
*
* <p>For more information on the internals of numeric trie
* indexing, including the <a
* href="../search/NumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link NumericRangeQuery}. The format of
* indexed values is described in {@link NumericUtils}.
* href="../search/LegacyNumericRangeQuery.html#precisionStepDesc"><code>precisionStep</code></a>
* configuration, see {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The format of
* indexed values is described in {@link org.apache.lucene.util.LegacyNumericUtils}.
*
* <p>If you only need to sort by numeric value, and never
* run range querying/filtering, you can index using a
@ -113,17 +111,20 @@ import org.apache.lucene.util.NumericUtils;
* This will minimize disk space consumed.
*
* <p>More advanced users can instead use {@link
* NumericTokenStream} directly, when indexing numbers. This
* org.apache.lucene.analysis.LegacyNumericTokenStream} directly, when indexing numbers. This
* class is a wrapper around this token stream type for
* easier, more intuitive usage.</p>
*
* @deprecated Please use {@link DimensionalLongField} instead
*
* @since 2.9
*/
public final class LongField extends Field {
@Deprecated
public final class LegacyLongField extends Field {
/**
* Type for a LongField that is not stored:
* Type for a LegacyLongField that is not stored:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_NOT_STORED = new FieldType();
@ -131,12 +132,12 @@ public final class LongField extends Field {
TYPE_NOT_STORED.setTokenized(true);
TYPE_NOT_STORED.setOmitNorms(true);
TYPE_NOT_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_NOT_STORED.setNumericType(FieldType.NumericType.LONG);
TYPE_NOT_STORED.setNumericType(FieldType.LegacyNumericType.LONG);
TYPE_NOT_STORED.freeze();
}
/**
* Type for a stored LongField:
* Type for a stored LegacyLongField:
* normalization factors, frequencies, and positions are omitted.
*/
public static final FieldType TYPE_STORED = new FieldType();
@ -144,20 +145,20 @@ public final class LongField extends Field {
TYPE_STORED.setTokenized(true);
TYPE_STORED.setOmitNorms(true);
TYPE_STORED.setIndexOptions(IndexOptions.DOCS);
TYPE_STORED.setNumericType(FieldType.NumericType.LONG);
TYPE_STORED.setNumericType(FieldType.LegacyNumericType.LONG);
TYPE_STORED.setStored(true);
TYPE_STORED.freeze();
}
/** Creates a stored or un-stored LongField with the provided value
/** Creates a stored or un-stored LegacyLongField with the provided value
* and default <code>precisionStep</code> {@link
* NumericUtils#PRECISION_STEP_DEFAULT} (16).
* org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
* @param name field name
* @param value 64-bit long value
* @param stored Store.YES if the content should also be stored
* @throws IllegalArgumentException if the field name is null.
*/
public LongField(String name, long value, Store stored) {
public LegacyLongField(String name, long value, Store stored) {
super(name, stored == Store.YES ? TYPE_STORED : TYPE_NOT_STORED);
fieldsData = Long.valueOf(value);
}
@ -167,13 +168,13 @@ public final class LongField extends Field {
* @param name field name
* @param value 64-bit long value
* @param type customized field type: must have {@link FieldType#numericType()}
* of {@link FieldType.NumericType#LONG}.
* of {@link org.apache.lucene.document.FieldType.LegacyNumericType#LONG}.
* @throws IllegalArgumentException if the field name or type is null, or
* if the field type does not have a LONG numericType()
*/
public LongField(String name, long value, FieldType type) {
public LegacyLongField(String name, long value, FieldType type) {
super(name, type);
if (type.numericType() != FieldType.NumericType.LONG) {
if (type.numericType() != FieldType.LegacyNumericType.LONG) {
throw new IllegalArgumentException("type.numericType() must be LONG but got " + type.numericType());
}
fieldsData = Long.valueOf(value);

View File

@ -18,7 +18,6 @@ package org.apache.lucene.document;
*/
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.util.NumericUtils;
/**
* <p>
@ -32,10 +31,10 @@ import org.apache.lucene.util.NumericUtils;
*
* <p>
* Note that if you want to encode doubles or floats with proper sort order,
* you will need to encode them with {@link NumericUtils}:
* you will need to encode them with {@link org.apache.lucene.util.LegacyNumericUtils}:
*
* <pre class="prettyprint">
* document.add(new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(-5.3f)));
* document.add(new SortedNumericDocValuesField(name, LegacyNumericUtils.floatToSortableInt(-5.3f)));
* </pre>
*
* <p>

View File

@ -34,10 +34,9 @@
* How this is done is completely up to you. That being said, there are many tools available in other projects that can make
* the process of taking a file and converting it into a Lucene {@link org.apache.lucene.document.Document}.
* </p>
* <p>The {@link org.apache.lucene.document.DateTools} is a utility class to make dates and times searchable
* (remember, Lucene only searches text). {@link org.apache.lucene.document.IntField}, {@link org.apache.lucene.document.LongField},
* {@link org.apache.lucene.document.FloatField} and {@link org.apache.lucene.document.DoubleField} are a special helper class
* to simplify indexing of numeric values (and also dates) for fast range range queries with {@link org.apache.lucene.search.NumericRangeQuery}
* (using a special sortable string representation of numeric values).</p>
* <p>The {@link org.apache.lucene.document.DateTools} is a utility class to make dates and times searchable. {@link
* org.apache.lucene.document.DimensionalIntField}, {@link org.apache.lucene.document.DimensionalLongField},
* {@link org.apache.lucene.document.DimensionalFloatField} and {@link org.apache.lucene.document.DimensionalDoubleField} enable indexing
* of numeric values (and also dates) for fast range queries using {@link org.apache.lucene.search.DimensionalRangeQuery}</p>
*/
package org.apache.lucene.document;

View File

@ -2,6 +2,13 @@ package org.apache.lucene.index;
import java.io.IOException;
import org.apache.lucene.document.DimensionalBinaryField;
import org.apache.lucene.document.DimensionalDoubleField;
import org.apache.lucene.document.DimensionalFloatField;
import org.apache.lucene.document.DimensionalIntField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.util.bkd.BKDWriter;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@ -19,11 +26,19 @@ import java.io.IOException;
* limitations under the License.
*/
/** Allows recursively visiting indexed dimensional values
/** Allows recursively visiting dimensional values indexed with {@link DimensionalIntField},
* {@link DimensionalFloatField}, {@link DimensionalLongField}, {@link DimensionalDoubleField}
* or {@link DimensionalBinaryField}.
*
* @lucene.experimental */
public abstract class DimensionalValues {
/** Maximum number of bytes for each dimension */
public static final int MAX_NUM_BYTES = 16;
/** Maximum number of dimensions */
public static final int MAX_DIMENSIONS = BKDWriter.MAX_DIMS;
/** Defautl constructor */
protected DimensionalValues() {
}

View File

@ -171,9 +171,15 @@ public final class FieldInfo {
if (count <= 0) {
throw new IllegalArgumentException("dimension count must be >= 0; got " + count + " for field=\"" + name + "\"");
}
if (count > DimensionalValues.MAX_DIMENSIONS) {
throw new IllegalArgumentException("dimension count must be < DimensionalValues.MAX_DIMENSIONS (= " + DimensionalValues.MAX_DIMENSIONS + "); got " + count + " for field=\"" + name + "\"");
}
if (numBytes <= 0) {
throw new IllegalArgumentException("dimension numBytes must be >= 0; got " + numBytes + " for field=\"" + name + "\"");
}
if (numBytes > DimensionalValues.MAX_NUM_BYTES) {
throw new IllegalArgumentException("dimension numBytes must be <= DimensionalValues.MAX_NUM_BYTES (= " + DimensionalValues.MAX_NUM_BYTES + "); got " + numBytes + " for field=\"" + name + "\"");
}
if (dimensionCount != 0 && dimensionCount != count) {
throw new IllegalArgumentException("cannot change dimension count from " + dimensionCount + " to " + count + " for field=\"" + name + "\"");
}

View File

@ -337,6 +337,12 @@ public class FieldInfos implements Iterable<FieldInfo> {
}
synchronized void setDimensions(int number, String name, int dimensionCount, int dimensionNumBytes) {
if (dimensionNumBytes > DimensionalValues.MAX_NUM_BYTES) {
throw new IllegalArgumentException("dimension numBytes must be <= DimensionalValues.MAX_NUM_BYTES (= " + DimensionalValues.MAX_NUM_BYTES + "); got " + dimensionNumBytes + " for field=\"" + name + "\"");
}
if (dimensionCount > DimensionalValues.MAX_DIMENSIONS) {
throw new IllegalArgumentException("dimensionCount must be <= DimensionalValues.MAX_DIMENSIONS (= " + DimensionalValues.MAX_DIMENSIONS + "); got " + dimensionCount + " for field=\"" + name + "\"");
}
verifyConsistentDimensions(number, name, dimensionCount, dimensionNumBytes);
dimensions.put(name, new FieldDimensions(dimensionCount, dimensionNumBytes));
}

View File

@ -21,10 +21,6 @@ import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.BytesRef;
@ -151,8 +147,8 @@ public class StoredDocument implements Iterable<StorableField> {
* Returns an array of values of the field specified as the method parameter.
* This method returns an empty array when there are no
* matching fields. It never returns null.
* For {@link IntField}, {@link LongField}, {@link
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
* For {@link org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link
* org.apache.lucene.document.LegacyFloatField} and {@link org.apache.lucene.document.LegacyDoubleField} it returns the string value of the number. If you want
* the actual numeric field instances back, use {@link #getFields}.
* @param name the name of the field
* @return a <code>String[]</code> of field values
@ -176,8 +172,8 @@ public class StoredDocument implements Iterable<StorableField> {
* this document, or null. If multiple fields exist with this name, this
* method returns the first value added. If only binary fields with this name
* exist, returns null.
* For {@link IntField}, {@link LongField}, {@link
* FloatField} and {@link DoubleField} it returns the string value of the number. If you want
* For {@link org.apache.lucene.document.LegacyIntField}, {@link org.apache.lucene.document.LegacyLongField}, {@link
* org.apache.lucene.document.LegacyFloatField} and {@link org.apache.lucene.document.LegacyDoubleField} it returns the string value of the number. If you want
* the actual numeric field instance back, use {@link #getField}.
*/
public final String get(String name) {

View File

@ -21,7 +21,11 @@ import java.io.IOException;
import java.util.Arrays;
import java.util.Objects;
import org.apache.lucene.document.DimensionalField;
import org.apache.lucene.document.DimensionalBinaryField;
import org.apache.lucene.document.DimensionalDoubleField;
import org.apache.lucene.document.DimensionalFloatField;
import org.apache.lucene.document.DimensionalIntField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.index.DimensionalValues;
import org.apache.lucene.index.DimensionalValues.IntersectVisitor;
import org.apache.lucene.index.DimensionalValues.Relation;
@ -29,12 +33,13 @@ import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.DocIdSetBuilder;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.bkd.BKDUtil;
/** Searches for ranges in fields previously indexed using {@link DimensionalField}. In
* a 1D field this is a simple range query; in a multi-dimensional field it's a box shape. */
/** Searches for ranges in fields previously indexed using dimensional
* fields, e.g. {@link DimensionalLongField}. In a 1D field this is
* a simple range query; in a multi-dimensional field it's a box shape. */
public class DimensionalRangeQuery extends Query {
final String field;
@ -91,14 +96,29 @@ public class DimensionalRangeQuery extends Query {
}
}
/** Sugar constructor: use in the 1D case when you indexed 1D long values using {@link DimensionalField} */
public DimensionalRangeQuery(String field, Long lowerValue, boolean lowerInclusive, Long upperValue, boolean upperInclusive) {
this(field, pack(lowerValue), new boolean[] {lowerInclusive}, pack(upperValue), new boolean[] {upperInclusive});
/** Use in the 1D case when you indexed 1D int values using {@link DimensionalIntField} */
public static DimensionalRangeQuery new1DIntRange(String field, Integer lowerValue, boolean lowerInclusive, Integer upperValue, boolean upperInclusive) {
return new DimensionalRangeQuery(field, pack(lowerValue), new boolean[] {lowerInclusive}, pack(upperValue), new boolean[] {upperInclusive});
}
/** Sugar constructor: use in the 1D case when you indexed binary values using {@link DimensionalField} */
public DimensionalRangeQuery(String field, byte[] lowerValue, boolean lowerInclusive, byte[] upperValue, boolean upperInclusive) {
this(field, new byte[][] {lowerValue}, new boolean[] {lowerInclusive}, new byte[][] {upperValue}, new boolean[] {upperInclusive});
/** Use in the 1D case when you indexed 1D long values using {@link DimensionalLongField} */
public static DimensionalRangeQuery new1DLongRange(String field, Long lowerValue, boolean lowerInclusive, Long upperValue, boolean upperInclusive) {
return new DimensionalRangeQuery(field, pack(lowerValue), new boolean[] {lowerInclusive}, pack(upperValue), new boolean[] {upperInclusive});
}
/** Use in the 1D case when you indexed 1D float values using {@link DimensionalFloatField} */
public static DimensionalRangeQuery new1DFloatRange(String field, Float lowerValue, boolean lowerInclusive, Float upperValue, boolean upperInclusive) {
return new DimensionalRangeQuery(field, pack(lowerValue), new boolean[] {lowerInclusive}, pack(upperValue), new boolean[] {upperInclusive});
}
/** Use in the 1D case when you indexed 1D double values using {@link DimensionalDoubleField} */
public static DimensionalRangeQuery new1DDoubleRange(String field, Double lowerValue, boolean lowerInclusive, Double upperValue, boolean upperInclusive) {
return new DimensionalRangeQuery(field, pack(lowerValue), new boolean[] {lowerInclusive}, pack(upperValue), new boolean[] {upperInclusive});
}
/** Use in the 1D case when you indexed binary values using {@link DimensionalBinaryField} */
public static DimensionalRangeQuery new1DBinaryRange(String field, byte[] lowerValue, boolean lowerInclusive, byte[] upperValue, boolean upperInclusive) {
return new DimensionalRangeQuery(field, new byte[][] {lowerValue}, new boolean[] {lowerInclusive}, new byte[][] {upperValue}, new boolean[] {upperInclusive});
}
private static byte[][] pack(Long value) {
@ -107,7 +127,37 @@ public class DimensionalRangeQuery extends Query {
return new byte[1][];
}
byte[][] result = new byte[][] {new byte[RamUsageEstimator.NUM_BYTES_LONG]};
BKDUtil.longToBytes(value, result[0], 0);
NumericUtils.longToBytes(value, result[0], 0);
return result;
}
private static byte[][] pack(Double value) {
if (value == null) {
// OK: open ended range
return new byte[1][];
}
byte[][] result = new byte[][] {new byte[RamUsageEstimator.NUM_BYTES_LONG]};
NumericUtils.longToBytesDirect(NumericUtils.doubleToSortableLong(value), result[0], 0);
return result;
}
private static byte[][] pack(Integer value) {
if (value == null) {
// OK: open ended range
return new byte[1][];
}
byte[][] result = new byte[][] {new byte[RamUsageEstimator.NUM_BYTES_INT]};
NumericUtils.intToBytes(value, result[0], 0);
return result;
}
private static byte[][] pack(Float value) {
if (value == null) {
// OK: open ended range
return new byte[1][];
}
byte[][] result = new byte[][] {new byte[RamUsageEstimator.NUM_BYTES_INT]};
NumericUtils.intToBytesDirect(NumericUtils.floatToSortableInt(value), result[0], 0);
return result;
}
@ -158,7 +208,7 @@ public class DimensionalRangeQuery extends Query {
return null;
} else {
byte[] value = new byte[bytesPerDim];
BKDUtil.add(bytesPerDim, 0, lowerPoint[dim], one, value);
NumericUtils.add(bytesPerDim, 0, lowerPoint[dim], one, value);
System.arraycopy(value, 0, packedLowerIncl, dim*bytesPerDim, bytesPerDim);
}
} else {
@ -174,7 +224,7 @@ public class DimensionalRangeQuery extends Query {
return null;
} else {
byte[] value = new byte[bytesPerDim];
BKDUtil.subtract(bytesPerDim, 0, upperPoint[dim], one, value);
NumericUtils.subtract(bytesPerDim, 0, upperPoint[dim], one, value);
System.arraycopy(value, 0, packedUpperIncl, dim*bytesPerDim, bytesPerDim);
}
} else {

View File

@ -21,39 +21,35 @@ import java.io.IOException;
import java.util.LinkedList;
import java.util.Objects;
import org.apache.lucene.analysis.NumericTokenStream; // for javadocs
import org.apache.lucene.document.DoubleField; // for javadocs
import org.apache.lucene.document.FloatField; // for javadocs
import org.apache.lucene.document.IntField; // for javadocs
import org.apache.lucene.document.LongField; // for javadocs
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.index.Term; // for javadocs
/**
* <p>A {@link Query} that matches numeric values within a
* specified range. To use this, you must first index the
* numeric values using {@link IntField}, {@link
* FloatField}, {@link LongField} or {@link DoubleField} (expert: {@link
* NumericTokenStream}). If your terms are instead textual,
* numeric values using {@link org.apache.lucene.document.LegacyIntField}, {@link
* org.apache.lucene.document.LegacyFloatField}, {@link org.apache.lucene.document.LegacyLongField} or {@link org.apache.lucene.document.LegacyDoubleField} (expert: {@link
* org.apache.lucene.analysis.LegacyNumericTokenStream}). If your terms are instead textual,
* you should use {@link TermRangeQuery}.</p>
*
* <p>You create a new NumericRangeQuery with the static
* <p>You create a new LegacyNumericRangeQuery with the static
* factory methods, eg:
*
* <pre class="prettyprint">
* Query q = NumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
* Query q = LegacyNumericRangeQuery.newFloatRange("weight", 0.03f, 0.10f, true, true);
* </pre>
*
* matches all documents whose float valued "weight" field
* ranges from 0.03 to 0.10, inclusive.
*
* <p>The performance of NumericRangeQuery is much better
* <p>The performance of LegacyNumericRangeQuery is much better
* than the corresponding {@link TermRangeQuery} because the
* number of terms that must be searched is usually far
* fewer, thanks to trie indexing, described below.</p>
@ -94,7 +90,7 @@ import org.apache.lucene.index.Term; // for javadocs
* (all numerical values like doubles, longs, floats, and ints are converted to
* lexicographic sortable string representations and stored with different precisions
* (for a more detailed description of how the values are stored,
* see {@link NumericUtils}). A range is then divided recursively into multiple intervals for searching:
* see {@link org.apache.lucene.util.LegacyNumericUtils}). A range is then divided recursively into multiple intervals for searching:
* The center of the range is searched only with the lowest possible precision in the <em>trie</em>,
* while the boundaries are matched more exactly. This reduces the number of terms dramatically.</p>
*
@ -110,7 +106,7 @@ import org.apache.lucene.index.Term; // for javadocs
* <h3><a name="precisionStepDesc">Precision Step</a></h3>
* <p>You can choose any <code>precisionStep</code> when encoding values.
* Lower step values mean more precisions and so more terms in index (and index gets larger). The number
* of indexed terms per value is (those are generated by {@link NumericTokenStream}):
* of indexed terms per value is (those are generated by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}):
* <p style="font-family:serif">
* &nbsp;&nbsp;indexedTermsPerValue = <b>ceil</b><big>(</big>bitsPerValue / precisionStep<big>)</big>
* </p>
@ -146,8 +142,8 @@ import org.apache.lucene.index.Term; // for javadocs
* <li>Steps <b>&ge;64</b> for <em>long/double</em> and <b>&ge;32</b> for <em>int/float</em> produces one token
* per value in the index and querying is as slow as a conventional {@link TermRangeQuery}. But it can be used
* to produce fields, that are solely used for sorting (in this case simply use {@link Integer#MAX_VALUE} as
* <code>precisionStep</code>). Using {@link IntField},
* {@link LongField}, {@link FloatField} or {@link DoubleField} for sorting
* <code>precisionStep</code>). Using {@link org.apache.lucene.document.LegacyIntField},
* {@link org.apache.lucene.document.LegacyLongField}, {@link org.apache.lucene.document.LegacyFloatField} or {@link org.apache.lucene.document.LegacyDoubleField} for sorting
* is ideal, because building the field cache is much faster than with text-only numbers.
* These fields have one term per value and therefore also work with term enumeration for building distinct lists
* (e.g. facets / preselected values to search for).
@ -161,17 +157,21 @@ import org.apache.lucene.index.Term; // for javadocs
* precision step). This query type was developed for a geographic portal, where the performance for
* e.g. bounding boxes or exact date/time stamps is important.</p>
*
* @deprecated Please use {@link DimensionalRangeQuery} instead
*
* @since 2.9
**/
public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
private NumericRangeQuery(final String field, final int precisionStep, final NumericType dataType,
T min, T max, final boolean minInclusive, final boolean maxInclusive) {
@Deprecated
public final class LegacyNumericRangeQuery<T extends Number> extends MultiTermQuery {
private LegacyNumericRangeQuery(final String field, final int precisionStep, final LegacyNumericType dataType,
T min, T max, final boolean minInclusive, final boolean maxInclusive) {
super(field);
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
this.precisionStep = precisionStep;
this.dataType = Objects.requireNonNull(dataType, "NumericType must not be null");
this.dataType = Objects.requireNonNull(dataType, "LegacyNumericType must not be null");
this.min = min;
this.max = max;
this.minInclusive = minInclusive;
@ -179,59 +179,59 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
public static LegacyNumericRangeQuery<Long> newLongRange(final String field, final int precisionStep,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, precisionStep, NumericType.LONG, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>long</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>long</code>
* range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Long> newLongRange(final String field,
public static LegacyNumericRangeQuery<Long> newLongRange(final String field,
Long min, Long max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.LONG, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, FieldType.LegacyNumericType.LONG, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
public static LegacyNumericRangeQuery<Integer> newIntRange(final String field, final int precisionStep,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, precisionStep, NumericType.INT, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>int</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>int</code>
* range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Integer> newIntRange(final String field,
public static LegacyNumericRangeQuery<Integer> newIntRange(final String field,
Integer min, Integer max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT_32, NumericType.INT, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, FieldType.LegacyNumericType.INT, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
@ -239,29 +239,29 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
* with {@code min == max == Double.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field, final int precisionStep,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, precisionStep, NumericType.DOUBLE, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>double</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT} (16).
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>double</code>
* range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT} (16).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Double#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Double.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Double> newDoubleRange(final String field,
public static LegacyNumericRangeQuery<Double> newDoubleRange(final String field,
Double min, Double max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT, NumericType.DOUBLE, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT, FieldType.LegacyNumericType.DOUBLE, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
* range using the given <a href="#precisionStepDesc"><code>precisionStep</code></a>.
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
@ -269,25 +269,25 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
* with {@code min == max == Float.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
public static LegacyNumericRangeQuery<Float> newFloatRange(final String field, final int precisionStep,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, precisionStep, NumericType.FLOAT, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, precisionStep, FieldType.LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
}
/**
* Factory that creates a <code>NumericRangeQuery</code>, that queries a <code>float</code>
* range using the default <code>precisionStep</code> {@link NumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* Factory that creates a <code>LegacyNumericRangeQuery</code>, that queries a <code>float</code>
* range using the default <code>precisionStep</code> {@link org.apache.lucene.util.LegacyNumericUtils#PRECISION_STEP_DEFAULT_32} (8).
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting the min or max value to <code>null</code>.
* {@link Float#NaN} will never match a half-open range, to hit {@code NaN} use a query
* with {@code min == max == Float.NaN}. By setting inclusive to false, it will
* match all documents excluding the bounds, with inclusive on, the boundaries are hits, too.
*/
public static NumericRangeQuery<Float> newFloatRange(final String field,
public static LegacyNumericRangeQuery<Float> newFloatRange(final String field,
Float min, Float max, final boolean minInclusive, final boolean maxInclusive
) {
return new NumericRangeQuery<>(field, NumericUtils.PRECISION_STEP_DEFAULT_32, NumericType.FLOAT, min, max, minInclusive, maxInclusive);
return new LegacyNumericRangeQuery<>(field, LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, FieldType.LegacyNumericType.FLOAT, min, max, minInclusive, maxInclusive);
}
@Override @SuppressWarnings("unchecked")
@ -332,8 +332,8 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
if (o==this) return true;
if (!super.equals(o))
return false;
if (o instanceof NumericRangeQuery) {
final NumericRangeQuery q=(NumericRangeQuery)o;
if (o instanceof LegacyNumericRangeQuery) {
final LegacyNumericRangeQuery q=(LegacyNumericRangeQuery)o;
return (
(q.min == null ? min == null : q.min.equals(min)) &&
(q.max == null ? max == null : q.max.equals(max)) &&
@ -358,19 +358,19 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
// members (package private, to be also fast accessible by NumericRangeTermEnum)
final int precisionStep;
final NumericType dataType;
final FieldType.LegacyNumericType dataType;
final T min, max;
final boolean minInclusive,maxInclusive;
// used to handle float/double infinity correcty
static final long LONG_NEGATIVE_INFINITY =
NumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
LegacyNumericUtils.doubleToSortableLong(Double.NEGATIVE_INFINITY);
static final long LONG_POSITIVE_INFINITY =
NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
LegacyNumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
static final int INT_NEGATIVE_INFINITY =
NumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
LegacyNumericUtils.floatToSortableInt(Float.NEGATIVE_INFINITY);
static final int INT_POSITIVE_INFINITY =
NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
LegacyNumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
/**
* Subclass of FilteredTermsEnum for enumerating all terms that match the
@ -378,8 +378,8 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
* <p>
* WARNING: This term enumeration is not guaranteed to be always ordered by
* {@link Term#compareTo}.
* The ordering depends on how {@link NumericUtils#splitLongRange} and
* {@link NumericUtils#splitIntRange} generates the sub-ranges. For
* The ordering depends on how {@link org.apache.lucene.util.LegacyNumericUtils#splitLongRange} and
* {@link org.apache.lucene.util.LegacyNumericUtils#splitIntRange} generates the sub-ranges. For
* {@link MultiTermQuery} ordering is not relevant.
*/
private final class NumericRangeTermsEnum extends FilteredTermsEnum {
@ -395,12 +395,12 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
case DOUBLE: {
// lower
long minBound;
if (dataType == NumericType.LONG) {
if (dataType == FieldType.LegacyNumericType.LONG) {
minBound = (min == null) ? Long.MIN_VALUE : min.longValue();
} else {
assert dataType == NumericType.DOUBLE;
assert dataType == FieldType.LegacyNumericType.DOUBLE;
minBound = (min == null) ? LONG_NEGATIVE_INFINITY
: NumericUtils.doubleToSortableLong(min.doubleValue());
: LegacyNumericUtils.doubleToSortableLong(min.doubleValue());
}
if (!minInclusive && min != null) {
if (minBound == Long.MAX_VALUE) break;
@ -409,19 +409,19 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
// upper
long maxBound;
if (dataType == NumericType.LONG) {
if (dataType == FieldType.LegacyNumericType.LONG) {
maxBound = (max == null) ? Long.MAX_VALUE : max.longValue();
} else {
assert dataType == NumericType.DOUBLE;
assert dataType == FieldType.LegacyNumericType.DOUBLE;
maxBound = (max == null) ? LONG_POSITIVE_INFINITY
: NumericUtils.doubleToSortableLong(max.doubleValue());
: LegacyNumericUtils.doubleToSortableLong(max.doubleValue());
}
if (!maxInclusive && max != null) {
if (maxBound == Long.MIN_VALUE) break;
maxBound--;
}
NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
@Override
public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
rangeBounds.add(minPrefixCoded);
@ -435,12 +435,12 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
case FLOAT: {
// lower
int minBound;
if (dataType == NumericType.INT) {
if (dataType == FieldType.LegacyNumericType.INT) {
minBound = (min == null) ? Integer.MIN_VALUE : min.intValue();
} else {
assert dataType == NumericType.FLOAT;
assert dataType == FieldType.LegacyNumericType.FLOAT;
minBound = (min == null) ? INT_NEGATIVE_INFINITY
: NumericUtils.floatToSortableInt(min.floatValue());
: LegacyNumericUtils.floatToSortableInt(min.floatValue());
}
if (!minInclusive && min != null) {
if (minBound == Integer.MAX_VALUE) break;
@ -449,19 +449,19 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
// upper
int maxBound;
if (dataType == NumericType.INT) {
if (dataType == LegacyNumericType.INT) {
maxBound = (max == null) ? Integer.MAX_VALUE : max.intValue();
} else {
assert dataType == NumericType.FLOAT;
assert dataType == FieldType.LegacyNumericType.FLOAT;
maxBound = (max == null) ? INT_POSITIVE_INFINITY
: NumericUtils.floatToSortableInt(max.floatValue());
: LegacyNumericUtils.floatToSortableInt(max.floatValue());
}
if (!maxInclusive && max != null) {
if (maxBound == Integer.MIN_VALUE) break;
maxBound--;
}
NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
@Override
public final void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
rangeBounds.add(minPrefixCoded);
@ -473,7 +473,7 @@ public final class NumericRangeQuery<T extends Number> extends MultiTermQuery {
default:
// should never happen
throw new IllegalArgumentException("Invalid NumericType");
throw new IllegalArgumentException("Invalid LegacyNumericType");
}
}

View File

@ -33,7 +33,7 @@ import org.apache.lucene.index.IndexReader;
<li> {@link FuzzyQuery}
<li> {@link RegexpQuery}
<li> {@link TermRangeQuery}
<li> {@link NumericRangeQuery}
<li> {@link DimensionalRangeQuery}
<li> {@link ConstantScoreQuery}
<li> {@link DisjunctionMaxQuery}
<li> {@link MatchAllDocsQuery}

View File

@ -20,7 +20,7 @@ package org.apache.lucene.search;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/**
* Selects a value from the document's list to use as the representative value
@ -81,14 +81,14 @@ public class SortedNumericSelector {
return new NumericDocValues() {
@Override
public long get(int docID) {
return NumericUtils.sortableFloatBits((int) view.get(docID));
return LegacyNumericUtils.sortableFloatBits((int) view.get(docID));
}
};
case DOUBLE:
return new NumericDocValues() {
@Override
public long get(int docID) {
return NumericUtils.sortableDoubleBits(view.get(docID));
return LegacyNumericUtils.sortableDoubleBits(view.get(docID));
}
};
default:

View File

@ -22,7 +22,6 @@ import java.io.IOException;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.util.FrequencyTrackingRingBuffer;
/**
* A {@link QueryCachingPolicy} that tracks usage statistics of recently-used
* filters in order to decide on which filters are worth caching.
@ -43,7 +42,8 @@ public final class UsageTrackingQueryCachingPolicy implements QueryCachingPolicy
// already have the DocIdSetIterator#cost API) but the cost to build the
// DocIdSet in the first place
return query instanceof MultiTermQuery ||
query instanceof MultiTermQueryConstantScoreWrapper;
query instanceof MultiTermQueryConstantScoreWrapper ||
query instanceof DimensionalRangeQuery;
}
static boolean isCheap(Query query) {

View File

@ -160,22 +160,22 @@
* and an upper
* {@link org.apache.lucene.index.Term Term}
* according to {@link org.apache.lucene.util.BytesRef#compareTo BytesRef.compareTo()}. It is not intended
* for numerical ranges; use {@link org.apache.lucene.search.NumericRangeQuery NumericRangeQuery} instead.
* for numerical ranges; use {@link org.apache.lucene.search.DimensionalRangeQuery DimensionalRangeQuery} instead.
*
* For example, one could find all documents
* that have terms beginning with the letters <tt>a</tt> through <tt>c</tt>.
*
* <h3>
* {@link org.apache.lucene.search.NumericRangeQuery NumericRangeQuery}
* {@link org.apache.lucene.search.DimensionalRangeQuery DimensionalRangeQuery}
* </h3>
*
* <p>The
* {@link org.apache.lucene.search.NumericRangeQuery NumericRangeQuery}
* {@link org.apache.lucene.search.DimensionalRangeQuery DimensionalRangeQuery}
* matches all documents that occur in a numeric range.
* For NumericRangeQuery to work, you must index the values
* using a one of the numeric fields ({@link org.apache.lucene.document.IntField IntField},
* {@link org.apache.lucene.document.LongField LongField}, {@link org.apache.lucene.document.FloatField FloatField},
* or {@link org.apache.lucene.document.DoubleField DoubleField}).
* For DimensionalRangeQuery to work, you must index the values
* using a one of the numeric fields ({@link org.apache.lucene.document.DimensionalIntField DimensionalIntField},
* {@link org.apache.lucene.document.DimensionalLongField DimensionalLongField}, {@link org.apache.lucene.document.DimensionalFloatField DimensionalFloatField},
* or {@link org.apache.lucene.document.DimensionalDoubleField DimensionalDoubleField}).
*
* <h3>
* {@link org.apache.lucene.search.PrefixQuery PrefixQuery},

View File

@ -0,0 +1,597 @@
package org.apache.lucene.util;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.index.DimensionalValues;
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
/**
* This is a helper class to generate prefix-encoded representations for numerical values
* and supplies converters to represent float/double values as sortable integers/longs.
*
* <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
* into multiple intervals for searching: The center of the range is searched only with
* the lowest possible precision in the trie, while the boundaries are matched
* more exactly. This reduces the number of terms dramatically.
*
* <p>This class generates terms to achieve this: First the numerical integer values need to
* be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
* and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
* sortable like the original integer value (even using UTF-8 sort order). Each value is also
* prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
* during encoding.
*
* <p>To also index floating point numbers, this class supplies two methods to convert them
* to integer values by changing their bit layout: {@link #doubleToSortableLong},
* {@link #floatToSortableInt}. You will have no precision loss by
* converting floating point numbers to integers and back (only that the integer form
* is not usable). Other data types like dates can easily converted to longs or ints (e.g.
* date to long: {@link java.util.Date#getTime}).
*
* <p>For easy usage, the trie algorithm is implemented for indexing inside
* {@link org.apache.lucene.analysis.LegacyNumericTokenStream} that can index <code>int</code>, <code>long</code>,
* <code>float</code>, and <code>double</code>. For querying,
* {@link org.apache.lucene.search.LegacyNumericRangeQuery} implements the query part
* for the same data types.
*
* <p>This class can also be used, to generate lexicographically sortable (according to
* {@link BytesRef#getUTF8SortedAsUTF16Comparator()}) representations of numeric data
* types for other usages (e.g. sorting).
*
* @lucene.internal
*
* @deprecated Please use {@link DimensionalValues} instead.
*
* @since 2.9, API changed non backwards-compliant in 4.0
*/
@Deprecated
public final class LegacyNumericUtils {
private LegacyNumericUtils() {} // no instance!
/**
* The default precision step used by {@link org.apache.lucene.document.LegacyLongField},
* {@link org.apache.lucene.document.LegacyDoubleField}, {@link org.apache.lucene.analysis.LegacyNumericTokenStream}, {@link
* org.apache.lucene.search.LegacyNumericRangeQuery}.
*/
public static final int PRECISION_STEP_DEFAULT = 16;
/**
* The default precision step used by {@link org.apache.lucene.document.LegacyIntField} and
* {@link org.apache.lucene.document.LegacyFloatField}.
*/
public static final int PRECISION_STEP_DEFAULT_32 = 8;
/**
* Longs are stored at lower precision by shifting off lower bits. The shift count is
* stored as <code>SHIFT_START_LONG+shift</code> in the first byte
*/
public static final byte SHIFT_START_LONG = 0x20;
/**
* The maximum term length (used for <code>byte[]</code> buffer size)
* for encoding <code>long</code> values.
* @see #longToPrefixCodedBytes
*/
public static final int BUF_SIZE_LONG = 63/7 + 2;
/**
* Integers are stored at lower precision by shifting off lower bits. The shift count is
* stored as <code>SHIFT_START_INT+shift</code> in the first byte
*/
public static final byte SHIFT_START_INT = 0x60;
/**
* The maximum term length (used for <code>byte[]</code> buffer size)
* for encoding <code>int</code> values.
* @see #intToPrefixCodedBytes
*/
public static final int BUF_SIZE_INT = 31/7 + 2;
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
longToPrefixCodedBytes(val, shift, bytes);
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
intToPrefixCodedBytes(val, shift, bytes);
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void longToPrefixCodedBytes(final long val, final int shift, final BytesRefBuilder bytes) {
// ensure shift is 0..63
if ((shift & ~0x3f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
}
int nChars = (((63-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(BUF_SIZE_LONG);
bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
long sortableBits = val ^ 0x8000000000000000L;
sortableBits >>>= shift;
while (nChars > 0) {
// Store 7 bits per byte for compatibility
// with UTF-8 encoding of terms
bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
sortableBits >>>= 7;
}
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void intToPrefixCodedBytes(final int val, final int shift, final BytesRefBuilder bytes) {
// ensure shift is 0..31
if ((shift & ~0x1f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
}
int nChars = (((31-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(LegacyNumericUtils.BUF_SIZE_LONG); // use the max
bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
int sortableBits = val ^ 0x80000000;
sortableBits >>>= shift;
while (nChars > 0) {
// Store 7 bits per byte for compatibility
// with UTF-8 encoding of terms
bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
sortableBits >>>= 7;
}
}
/**
* Returns the shift value from a prefix encoded {@code long}.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
*/
public static int getPrefixCodedLongShift(final BytesRef val) {
final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
if (shift > 63 || shift < 0)
throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
return shift;
}
/**
* Returns the shift value from a prefix encoded {@code int}.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
*/
public static int getPrefixCodedIntShift(final BytesRef val) {
final int shift = val.bytes[val.offset] - SHIFT_START_INT;
if (shift > 31 || shift < 0)
throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
return shift;
}
/**
* Returns a long from prefixCoded bytes.
* Rightmost bits will be zero for lower precision codes.
* This method can be used to decode a term's value.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
* @see #longToPrefixCodedBytes
*/
public static long prefixCodedToLong(final BytesRef val) {
long sortableBits = 0L;
for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
sortableBits <<= 7;
final byte b = val.bytes[i];
if (b < 0) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (byte "+
Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
);
}
sortableBits |= b;
}
return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
}
/**
* Returns an int from prefixCoded bytes.
* Rightmost bits will be zero for lower precision codes.
* This method can be used to decode a term's value.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
* @see #intToPrefixCodedBytes
*/
public static int prefixCodedToInt(final BytesRef val) {
int sortableBits = 0;
for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
sortableBits <<= 7;
final byte b = val.bytes[i];
if (b < 0) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (byte "+
Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
);
}
sortableBits |= b;
}
return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
}
/**
* Converts a <code>double</code> value to a sortable signed <code>long</code>.
* The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
* bit layout and then some bits are swapped, to be able to compare the result as long.
* By this the precision is not reduced, but the value can easily used as a long.
* The sort order (including {@link Double#NaN}) is defined by
* {@link Double#compareTo}; {@code NaN} is greater than positive infinity.
* @see #sortableLongToDouble
*/
public static long doubleToSortableLong(double val) {
return sortableDoubleBits(Double.doubleToLongBits(val));
}
/**
* Converts a sortable <code>long</code> back to a <code>double</code>.
* @see #doubleToSortableLong
*/
public static double sortableLongToDouble(long val) {
return Double.longBitsToDouble(sortableDoubleBits(val));
}
/**
* Converts a <code>float</code> value to a sortable signed <code>int</code>.
* The value is converted by getting their IEEE 754 floating-point &quot;float format&quot;
* bit layout and then some bits are swapped, to be able to compare the result as int.
* By this the precision is not reduced, but the value can easily used as an int.
* The sort order (including {@link Float#NaN}) is defined by
* {@link Float#compareTo}; {@code NaN} is greater than positive infinity.
* @see #sortableIntToFloat
*/
public static int floatToSortableInt(float val) {
return sortableFloatBits(Float.floatToIntBits(val));
}
/**
* Converts a sortable <code>int</code> back to a <code>float</code>.
* @see #floatToSortableInt
*/
public static float sortableIntToFloat(int val) {
return Float.intBitsToFloat(sortableFloatBits(val));
}
/** Converts IEEE 754 representation of a double to sortable order (or back to the original) */
public static long sortableDoubleBits(long bits) {
return bits ^ (bits >> 63) & 0x7fffffffffffffffL;
}
/** Converts IEEE 754 representation of a float to sortable order (or back to the original) */
public static int sortableFloatBits(int bits) {
return bits ^ (bits >> 31) & 0x7fffffff;
}
/**
* Splits a long range recursively.
* You may implement a builder that adds clauses to a
* {@link org.apache.lucene.search.BooleanQuery} for each call to its
* {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
* method.
* <p>This method is used by {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
*/
public static void splitLongRange(final LongRangeBuilder builder,
final int precisionStep, final long minBound, final long maxBound
) {
splitRange(builder, 64, precisionStep, minBound, maxBound);
}
/**
* Splits an int range recursively.
* You may implement a builder that adds clauses to a
* {@link org.apache.lucene.search.BooleanQuery} for each call to its
* {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
* method.
* <p>This method is used by {@link org.apache.lucene.search.LegacyNumericRangeQuery}.
*/
public static void splitIntRange(final IntRangeBuilder builder,
final int precisionStep, final int minBound, final int maxBound
) {
splitRange(builder, 32, precisionStep, minBound, maxBound);
}
/** This helper does the splitting for both 32 and 64 bit. */
private static void splitRange(
final Object builder, final int valSize,
final int precisionStep, long minBound, long maxBound
) {
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
if (minBound > maxBound) return;
for (int shift=0; ; shift += precisionStep) {
// calculate new bounds for inner precision
final long diff = 1L << (shift+precisionStep),
mask = ((1L<<precisionStep) - 1L) << shift;
final boolean
hasLower = (minBound & mask) != 0L,
hasUpper = (maxBound & mask) != mask;
final long
nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
final boolean
lowerWrapped = nextMinBound < minBound,
upperWrapped = nextMaxBound > maxBound;
if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
// We are in the lowest precision or the next precision is not available.
addRange(builder, valSize, minBound, maxBound, shift);
// exit the split recursion loop
break;
}
if (hasLower)
addRange(builder, valSize, minBound, minBound | mask, shift);
if (hasUpper)
addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
// recurse to next precision
minBound = nextMinBound;
maxBound = nextMaxBound;
}
}
/** Helper that delegates to correct range builder */
private static void addRange(
final Object builder, final int valSize,
long minBound, long maxBound,
final int shift
) {
// for the max bound set all lower bits (that were shifted away):
// this is important for testing or other usages of the splitted range
// (e.g. to reconstruct the full range). The prefixEncoding will remove
// the bits anyway, so they do not hurt!
maxBound |= (1L << shift) - 1L;
// delegate to correct range builder
switch(valSize) {
case 64:
((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
break;
case 32:
((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
break;
default:
// Should not happen!
throw new IllegalArgumentException("valSize must be 32 or 64.");
}
}
/**
* Callback for {@link #splitLongRange}.
* You need to overwrite only one of the methods.
* @lucene.internal
* @since 2.9, API changed non backwards-compliant in 4.0
*/
public static abstract class LongRangeBuilder {
/**
* Overwrite this method, if you like to receive the already prefix encoded range bounds.
* You can directly build classical (inclusive) range queries from them.
*/
public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
throw new UnsupportedOperationException();
}
/**
* Overwrite this method, if you like to receive the raw long range bounds.
* You can use this for e.g. debugging purposes (print out range bounds).
*/
public void addRange(final long min, final long max, final int shift) {
final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
longToPrefixCodedBytes(min, shift, minBytes);
longToPrefixCodedBytes(max, shift, maxBytes);
addRange(minBytes.get(), maxBytes.get());
}
}
/**
* Callback for {@link #splitIntRange}.
* You need to overwrite only one of the methods.
* @lucene.internal
* @since 2.9, API changed non backwards-compliant in 4.0
*/
public static abstract class IntRangeBuilder {
/**
* Overwrite this method, if you like to receive the already prefix encoded range bounds.
* You can directly build classical range (inclusive) queries from them.
*/
public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
throw new UnsupportedOperationException();
}
/**
* Overwrite this method, if you like to receive the raw int range bounds.
* You can use this for e.g. debugging purposes (print out range bounds).
*/
public void addRange(final int min, final int max, final int shift) {
final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
intToPrefixCodedBytes(min, shift, minBytes);
intToPrefixCodedBytes(max, shift, maxBytes);
addRange(minBytes.get(), maxBytes.get());
}
}
/**
* Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
* terms with a shift value of <tt>0</tt>.
*
* @param termsEnum
* the terms enum to filter
* @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
* terms with a shift value of <tt>0</tt>.
*/
public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
return new SeekingNumericFilteredTermsEnum(termsEnum) {
@Override
protected AcceptStatus accept(BytesRef term) {
return LegacyNumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/**
* Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
* terms with a shift value of <tt>0</tt>.
*
* @param termsEnum
* the terms enum to filter
* @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
* terms with a shift value of <tt>0</tt>.
*/
public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
return new SeekingNumericFilteredTermsEnum(termsEnum) {
@Override
protected AcceptStatus accept(BytesRef term) {
return LegacyNumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/** Just like FilteredTermsEnum, except it adds a limited
* seekCeil implementation that only works with {@link
* #filterPrefixCodedInts} and {@link
* #filterPrefixCodedLongs}. */
private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
super(tenum, false);
}
@Override
@SuppressWarnings("fallthrough")
public SeekStatus seekCeil(BytesRef term) throws IOException {
// NOTE: This is not general!! It only handles YES
// and END, because that's all we need for the numeric
// case here
SeekStatus status = tenum.seekCeil(term);
if (status == SeekStatus.END) {
return SeekStatus.END;
}
actualTerm = tenum.term();
if (accept(actualTerm) == AcceptStatus.YES) {
return status;
} else {
return SeekStatus.END;
}
}
}
private static Terms intTerms(Terms terms) {
return new FilterLeafReader.FilterTerms(terms) {
@Override
public TermsEnum iterator() throws IOException {
return filterPrefixCodedInts(in.iterator());
}
};
}
private static Terms longTerms(Terms terms) {
return new FilterLeafReader.FilterTerms(terms) {
@Override
public TermsEnum iterator() throws IOException {
return filterPrefixCodedLongs(in.iterator());
}
};
}
/**
* Returns the minimum int value indexed into this
* numeric field or null if no terms exist.
*/
public static Integer getMinInt(Terms terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
BytesRef min = terms.getMin();
return (min != null) ? LegacyNumericUtils.prefixCodedToInt(min) : null;
}
/**
* Returns the maximum int value indexed into this
* numeric field or null if no terms exist.
*/
public static Integer getMaxInt(Terms terms) throws IOException {
BytesRef max = intTerms(terms).getMax();
return (max != null) ? LegacyNumericUtils.prefixCodedToInt(max) : null;
}
/**
* Returns the minimum long value indexed into this
* numeric field or null if no terms exist.
*/
public static Long getMinLong(Terms terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
BytesRef min = terms.getMin();
return (min != null) ? LegacyNumericUtils.prefixCodedToLong(min) : null;
}
/**
* Returns the maximum long value indexed into this
* numeric field or null if no terms exist.
*/
public static Long getMaxLong(Terms terms) throws IOException {
BytesRef max = longTerms(terms).getMax();
return (max != null) ? LegacyNumericUtils.prefixCodedToLong(max) : null;
}
}

View File

@ -17,250 +17,18 @@ package org.apache.lucene.util;
* limitations under the License.
*/
import java.io.IOException;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.document.DoubleField; // javadocs
import org.apache.lucene.document.FloatField; // javadocs
import org.apache.lucene.document.IntField; // javadocs
import org.apache.lucene.document.LongField; // javadocs
import org.apache.lucene.index.FilterLeafReader;
import org.apache.lucene.index.FilteredTermsEnum;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.NumericRangeQuery; // for javadocs
import java.math.BigInteger;
import java.util.Arrays;
/**
* This is a helper class to generate prefix-encoded representations for numerical values
* and supplies converters to represent float/double values as sortable integers/longs.
*
* <p>To quickly execute range queries in Apache Lucene, a range is divided recursively
* into multiple intervals for searching: The center of the range is searched only with
* the lowest possible precision in the trie, while the boundaries are matched
* more exactly. This reduces the number of terms dramatically.
*
* <p>This class generates terms to achieve this: First the numerical integer values need to
* be converted to bytes. For that integer values (32 bit or 64 bit) are made unsigned
* and the bits are converted to ASCII chars with each 7 bit. The resulting byte[] is
* sortable like the original integer value (even using UTF-8 sort order). Each value is also
* prefixed (in the first char) by the <code>shift</code> value (number of bits removed) used
* during encoding.
*
* <p>To also index floating point numbers, this class supplies two methods to convert them
* to integer values by changing their bit layout: {@link #doubleToSortableLong},
* {@link #floatToSortableInt}. You will have no precision loss by
* converting floating point numbers to integers and back (only that the integer form
* is not usable). Other data types like dates can easily converted to longs or ints (e.g.
* date to long: {@link java.util.Date#getTime}).
*
* <p>For easy usage, the trie algorithm is implemented for indexing inside
* {@link NumericTokenStream} that can index <code>int</code>, <code>long</code>,
* <code>float</code>, and <code>double</code>. For querying,
* {@link NumericRangeQuery} implements the query part
* for the same data types.
*
* <p>This class can also be used, to generate lexicographically sortable (according to
* {@link BytesRef#getUTF8SortedAsUTF16Comparator()}) representations of numeric data
* types for other usages (e.g. sorting).
* Helper APIs to encode numeric values as sortable bytes and vice-versa.
*
* @lucene.internal
* @since 2.9, API changed non backwards-compliant in 4.0
*/
public final class NumericUtils {
private NumericUtils() {} // no instance!
/**
* The default precision step used by {@link LongField},
* {@link DoubleField}, {@link NumericTokenStream}, {@link
* NumericRangeQuery}.
*/
public static final int PRECISION_STEP_DEFAULT = 16;
/**
* The default precision step used by {@link IntField} and
* {@link FloatField}.
*/
public static final int PRECISION_STEP_DEFAULT_32 = 8;
/**
* Longs are stored at lower precision by shifting off lower bits. The shift count is
* stored as <code>SHIFT_START_LONG+shift</code> in the first byte
*/
public static final byte SHIFT_START_LONG = 0x20;
/**
* The maximum term length (used for <code>byte[]</code> buffer size)
* for encoding <code>long</code> values.
* @see #longToPrefixCodedBytes
*/
public static final int BUF_SIZE_LONG = 63/7 + 2;
/**
* Integers are stored at lower precision by shifting off lower bits. The shift count is
* stored as <code>SHIFT_START_INT+shift</code> in the first byte
*/
public static final byte SHIFT_START_INT = 0x60;
/**
* The maximum term length (used for <code>byte[]</code> buffer size)
* for encoding <code>int</code> values.
* @see #intToPrefixCodedBytes
*/
public static final int BUF_SIZE_INT = 31/7 + 2;
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link NumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void longToPrefixCoded(final long val, final int shift, final BytesRefBuilder bytes) {
longToPrefixCodedBytes(val, shift, bytes);
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link NumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void intToPrefixCoded(final int val, final int shift, final BytesRefBuilder bytes) {
intToPrefixCodedBytes(val, shift, bytes);
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link NumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void longToPrefixCodedBytes(final long val, final int shift, final BytesRefBuilder bytes) {
// ensure shift is 0..63
if ((shift & ~0x3f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..63; got shift=" + shift);
}
int nChars = (((63-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(BUF_SIZE_LONG);
bytes.setByteAt(0, (byte)(SHIFT_START_LONG + shift));
long sortableBits = val ^ 0x8000000000000000L;
sortableBits >>>= shift;
while (nChars > 0) {
// Store 7 bits per byte for compatibility
// with UTF-8 encoding of terms
bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
sortableBits >>>= 7;
}
}
/**
* Returns prefix coded bits after reducing the precision by <code>shift</code> bits.
* This is method is used by {@link NumericTokenStream}.
* After encoding, {@code bytes.offset} will always be 0.
* @param val the numeric value
* @param shift how many bits to strip from the right
* @param bytes will contain the encoded value
*/
public static void intToPrefixCodedBytes(final int val, final int shift, final BytesRefBuilder bytes) {
// ensure shift is 0..31
if ((shift & ~0x1f) != 0) {
throw new IllegalArgumentException("Illegal shift value, must be 0..31; got shift=" + shift);
}
int nChars = (((31-shift)*37)>>8) + 1; // i/7 is the same as (i*37)>>8 for i in 0..63
bytes.setLength(nChars+1); // one extra for the byte that contains the shift info
bytes.grow(NumericUtils.BUF_SIZE_LONG); // use the max
bytes.setByteAt(0, (byte)(SHIFT_START_INT + shift));
int sortableBits = val ^ 0x80000000;
sortableBits >>>= shift;
while (nChars > 0) {
// Store 7 bits per byte for compatibility
// with UTF-8 encoding of terms
bytes.setByteAt(nChars--, (byte)(sortableBits & 0x7f));
sortableBits >>>= 7;
}
}
/**
* Returns the shift value from a prefix encoded {@code long}.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
*/
public static int getPrefixCodedLongShift(final BytesRef val) {
final int shift = val.bytes[val.offset] - SHIFT_START_LONG;
if (shift > 63 || shift < 0)
throw new NumberFormatException("Invalid shift value (" + shift + ") in prefixCoded bytes (is encoded value really an INT?)");
return shift;
}
/**
* Returns the shift value from a prefix encoded {@code int}.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
*/
public static int getPrefixCodedIntShift(final BytesRef val) {
final int shift = val.bytes[val.offset] - SHIFT_START_INT;
if (shift > 31 || shift < 0)
throw new NumberFormatException("Invalid shift value in prefixCoded bytes (is encoded value really an INT?)");
return shift;
}
/**
* Returns a long from prefixCoded bytes.
* Rightmost bits will be zero for lower precision codes.
* This method can be used to decode a term's value.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
* @see #longToPrefixCodedBytes
*/
public static long prefixCodedToLong(final BytesRef val) {
long sortableBits = 0L;
for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
sortableBits <<= 7;
final byte b = val.bytes[i];
if (b < 0) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (byte "+
Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
);
}
sortableBits |= b;
}
return (sortableBits << getPrefixCodedLongShift(val)) ^ 0x8000000000000000L;
}
/**
* Returns an int from prefixCoded bytes.
* Rightmost bits will be zero for lower precision codes.
* This method can be used to decode a term's value.
* @throws NumberFormatException if the supplied {@link BytesRef} is
* not correctly prefix encoded.
* @see #intToPrefixCodedBytes
*/
public static int prefixCodedToInt(final BytesRef val) {
int sortableBits = 0;
for (int i=val.offset+1, limit=val.offset+val.length; i<limit; i++) {
sortableBits <<= 7;
final byte b = val.bytes[i];
if (b < 0) {
throw new NumberFormatException(
"Invalid prefixCoded numerical value representation (byte "+
Integer.toHexString(b&0xff)+" at position "+(i-val.offset)+" is invalid)"
);
}
sortableBits |= b;
}
return (sortableBits << getPrefixCodedIntShift(val)) ^ 0x80000000;
}
/**
* Converts a <code>double</code> value to a sortable signed <code>long</code>.
* The value is converted by getting their IEEE 754 floating-point &quot;double format&quot;
@ -313,285 +81,178 @@ public final class NumericUtils {
return bits ^ (bits >> 31) & 0x7fffffff;
}
/**
* Splits a long range recursively.
* You may implement a builder that adds clauses to a
* {@link org.apache.lucene.search.BooleanQuery} for each call to its
* {@link LongRangeBuilder#addRange(BytesRef,BytesRef)}
* method.
* <p>This method is used by {@link NumericRangeQuery}.
*/
public static void splitLongRange(final LongRangeBuilder builder,
final int precisionStep, final long minBound, final long maxBound
) {
splitRange(builder, 64, precisionStep, minBound, maxBound);
}
/**
* Splits an int range recursively.
* You may implement a builder that adds clauses to a
* {@link org.apache.lucene.search.BooleanQuery} for each call to its
* {@link IntRangeBuilder#addRange(BytesRef,BytesRef)}
* method.
* <p>This method is used by {@link NumericRangeQuery}.
*/
public static void splitIntRange(final IntRangeBuilder builder,
final int precisionStep, final int minBound, final int maxBound
) {
splitRange(builder, 32, precisionStep, minBound, maxBound);
}
/** This helper does the splitting for both 32 and 64 bit. */
private static void splitRange(
final Object builder, final int valSize,
final int precisionStep, long minBound, long maxBound
) {
if (precisionStep < 1)
throw new IllegalArgumentException("precisionStep must be >=1");
if (minBound > maxBound) return;
for (int shift=0; ; shift += precisionStep) {
// calculate new bounds for inner precision
final long diff = 1L << (shift+precisionStep),
mask = ((1L<<precisionStep) - 1L) << shift;
final boolean
hasLower = (minBound & mask) != 0L,
hasUpper = (maxBound & mask) != mask;
final long
nextMinBound = (hasLower ? (minBound + diff) : minBound) & ~mask,
nextMaxBound = (hasUpper ? (maxBound - diff) : maxBound) & ~mask;
final boolean
lowerWrapped = nextMinBound < minBound,
upperWrapped = nextMaxBound > maxBound;
if (shift+precisionStep>=valSize || nextMinBound>nextMaxBound || lowerWrapped || upperWrapped) {
// We are in the lowest precision or the next precision is not available.
addRange(builder, valSize, minBound, maxBound, shift);
// exit the split recursion loop
break;
}
if (hasLower)
addRange(builder, valSize, minBound, minBound | mask, shift);
if (hasUpper)
addRange(builder, valSize, maxBound & ~mask, maxBound, shift);
// recurse to next precision
minBound = nextMinBound;
maxBound = nextMaxBound;
}
}
/** Helper that delegates to correct range builder */
private static void addRange(
final Object builder, final int valSize,
long minBound, long maxBound,
final int shift
) {
// for the max bound set all lower bits (that were shifted away):
// this is important for testing or other usages of the splitted range
// (e.g. to reconstruct the full range). The prefixEncoding will remove
// the bits anyway, so they do not hurt!
maxBound |= (1L << shift) - 1L;
// delegate to correct range builder
switch(valSize) {
case 64:
((LongRangeBuilder)builder).addRange(minBound, maxBound, shift);
break;
case 32:
((IntRangeBuilder)builder).addRange((int)minBound, (int)maxBound, shift);
break;
default:
// Should not happen!
throw new IllegalArgumentException("valSize must be 32 or 64.");
}
}
/**
* Callback for {@link #splitLongRange}.
* You need to overwrite only one of the methods.
* @lucene.internal
* @since 2.9, API changed non backwards-compliant in 4.0
*/
public static abstract class LongRangeBuilder {
/**
* Overwrite this method, if you like to receive the already prefix encoded range bounds.
* You can directly build classical (inclusive) range queries from them.
*/
public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
throw new UnsupportedOperationException();
}
/**
* Overwrite this method, if you like to receive the raw long range bounds.
* You can use this for e.g. debugging purposes (print out range bounds).
*/
public void addRange(final long min, final long max, final int shift) {
final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
longToPrefixCodedBytes(min, shift, minBytes);
longToPrefixCodedBytes(max, shift, maxBytes);
addRange(minBytes.get(), maxBytes.get());
}
}
/**
* Callback for {@link #splitIntRange}.
* You need to overwrite only one of the methods.
* @lucene.internal
* @since 2.9, API changed non backwards-compliant in 4.0
*/
public static abstract class IntRangeBuilder {
/**
* Overwrite this method, if you like to receive the already prefix encoded range bounds.
* You can directly build classical range (inclusive) queries from them.
*/
public void addRange(BytesRef minPrefixCoded, BytesRef maxPrefixCoded) {
throw new UnsupportedOperationException();
}
/**
* Overwrite this method, if you like to receive the raw int range bounds.
* You can use this for e.g. debugging purposes (print out range bounds).
*/
public void addRange(final int min, final int max, final int shift) {
final BytesRefBuilder minBytes = new BytesRefBuilder(), maxBytes = new BytesRefBuilder();
intToPrefixCodedBytes(min, shift, minBytes);
intToPrefixCodedBytes(max, shift, maxBytes);
addRange(minBytes.get(), maxBytes.get());
}
}
/**
* Filters the given {@link TermsEnum} by accepting only prefix coded 64 bit
* terms with a shift value of <tt>0</tt>.
*
* @param termsEnum
* the terms enum to filter
* @return a filtered {@link TermsEnum} that only returns prefix coded 64 bit
* terms with a shift value of <tt>0</tt>.
*/
public static TermsEnum filterPrefixCodedLongs(TermsEnum termsEnum) {
return new SeekingNumericFilteredTermsEnum(termsEnum) {
@Override
protected AcceptStatus accept(BytesRef term) {
return NumericUtils.getPrefixCodedLongShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/**
* Filters the given {@link TermsEnum} by accepting only prefix coded 32 bit
* terms with a shift value of <tt>0</tt>.
*
* @param termsEnum
* the terms enum to filter
* @return a filtered {@link TermsEnum} that only returns prefix coded 32 bit
* terms with a shift value of <tt>0</tt>.
*/
public static TermsEnum filterPrefixCodedInts(TermsEnum termsEnum) {
return new SeekingNumericFilteredTermsEnum(termsEnum) {
@Override
protected AcceptStatus accept(BytesRef term) {
return NumericUtils.getPrefixCodedIntShift(term) == 0 ? AcceptStatus.YES : AcceptStatus.END;
}
};
}
/** Just like FilteredTermsEnum, except it adds a limited
* seekCeil implementation that only works with {@link
* #filterPrefixCodedInts} and {@link
* #filterPrefixCodedLongs}. */
private static abstract class SeekingNumericFilteredTermsEnum extends FilteredTermsEnum {
public SeekingNumericFilteredTermsEnum(final TermsEnum tenum) {
super(tenum, false);
}
@Override
@SuppressWarnings("fallthrough")
public SeekStatus seekCeil(BytesRef term) throws IOException {
// NOTE: This is not general!! It only handles YES
// and END, because that's all we need for the numeric
// case here
SeekStatus status = tenum.seekCeil(term);
if (status == SeekStatus.END) {
return SeekStatus.END;
}
actualTerm = tenum.term();
if (accept(actualTerm) == AcceptStatus.YES) {
return status;
/** Result = a - b, where a &gt;= b, else {@code IllegalArgumentException} is thrown. */
public static void subtract(int bytesPerDim, int dim, byte[] a, byte[] b, byte[] result) {
int start = dim * bytesPerDim;
int end = start + bytesPerDim;
int borrow = 0;
for(int i=end-1;i>=start;i--) {
int diff = (a[i]&0xff) - (b[i]&0xff) - borrow;
if (diff < 0) {
diff += 256;
borrow = 1;
} else {
return SeekStatus.END;
borrow = 0;
}
result[i-start] = (byte) diff;
}
if (borrow != 0) {
throw new IllegalArgumentException("a < b");
}
}
private static Terms intTerms(Terms terms) {
return new FilterLeafReader.FilterTerms(terms) {
@Override
public TermsEnum iterator() throws IOException {
return filterPrefixCodedInts(in.iterator());
}
};
/** Result = a + b, where a and b are unsigned. If there is an overflow, {@code IllegalArgumentException} is thrown. */
public static void add(int bytesPerDim, int dim, byte[] a, byte[] b, byte[] result) {
int start = dim * bytesPerDim;
int end = start + bytesPerDim;
int carry = 0;
for(int i=end-1;i>=start;i--) {
int digitSum = (a[i]&0xff) + (b[i]&0xff) + carry;
if (digitSum > 255) {
digitSum -= 256;
carry = 1;
} else {
carry = 0;
}
result[i-start] = (byte) digitSum;
}
if (carry != 0) {
throw new IllegalArgumentException("a + b overflows bytesPerDim=" + bytesPerDim);
}
}
private static Terms longTerms(Terms terms) {
return new FilterLeafReader.FilterTerms(terms) {
@Override
public TermsEnum iterator() throws IOException {
return filterPrefixCodedLongs(in.iterator());
}
};
}
/**
* Returns the minimum int value indexed into this
* numeric field or null if no terms exist.
*/
public static Integer getMinInt(Terms terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
BytesRef min = terms.getMin();
return (min != null) ? NumericUtils.prefixCodedToInt(min) : null;
/** Returns positive int if a &gt; b, negative int if a &lt; b and 0 if a == b */
public static int compare(int bytesPerDim, byte[] a, int aIndex, byte[] b, int bIndex) {
assert aIndex >= 0;
assert bIndex >= 0;
int aOffset = aIndex*bytesPerDim;
int bOffset = bIndex*bytesPerDim;
for(int i=0;i<bytesPerDim;i++) {
int cmp = (a[aOffset+i]&0xff) - (b[bOffset+i]&0xff);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Returns the maximum int value indexed into this
* numeric field or null if no terms exist.
*/
public static Integer getMaxInt(Terms terms) throws IOException {
BytesRef max = intTerms(terms).getMax();
return (max != null) ? NumericUtils.prefixCodedToInt(max) : null;
/** Returns true if N-dim rect A contains N-dim rect B */
public static boolean contains(int bytesPerDim,
byte[] minPackedA, byte[] maxPackedA,
byte[] minPackedB, byte[] maxPackedB) {
int dims = minPackedA.length / bytesPerDim;
for(int dim=0;dim<dims;dim++) {
if (compare(bytesPerDim, minPackedA, dim, minPackedB, dim) > 0) {
return false;
}
if (compare(bytesPerDim, maxPackedA, dim, maxPackedB, dim) < 0) {
return false;
}
}
return true;
}
/**
* Returns the minimum long value indexed into this
* numeric field or null if no terms exist.
*/
public static Long getMinLong(Terms terms) throws IOException {
// All shift=0 terms are sorted first, so we don't need
// to filter the incoming terms; we can just get the
// min:
BytesRef min = terms.getMin();
return (min != null) ? NumericUtils.prefixCodedToLong(min) : null;
public static void intToBytes(int x, byte[] dest, int index) {
// Flip the sign bit, so negative ints sort before positive ints correctly:
x ^= 0x80000000;
intToBytesDirect(x, dest, index);
}
/**
* Returns the maximum long value indexed into this
* numeric field or null if no terms exist.
*/
public static Long getMaxLong(Terms terms) throws IOException {
BytesRef max = longTerms(terms).getMax();
return (max != null) ? NumericUtils.prefixCodedToLong(max) : null;
public static void intToBytesDirect(int x, byte[] dest, int index) {
// Flip the sign bit, so negative ints sort before positive ints correctly:
for(int i=0;i<4;i++) {
dest[4*index+i] = (byte) (x >> 24-i*8);
}
}
public static int bytesToInt(byte[] src, int index) {
int x = bytesToIntDirect(src, index);
// Re-flip the sign bit to restore the original value:
return x ^ 0x80000000;
}
public static int bytesToIntDirect(byte[] src, int index) {
int x = 0;
for(int i=0;i<4;i++) {
x |= (src[4*index+i] & 0xff) << (24-i*8);
}
return x;
}
public static void longToBytes(long v, byte[] bytes, int dim) {
// Flip the sign bit so negative longs sort before positive longs:
v ^= 0x8000000000000000L;
longToBytesDirect(v, bytes, dim);
}
public static void longToBytesDirect(long v, byte[] bytes, int dim) {
int offset = 8 * dim;
bytes[offset] = (byte) (v >> 56);
bytes[offset+1] = (byte) (v >> 48);
bytes[offset+2] = (byte) (v >> 40);
bytes[offset+3] = (byte) (v >> 32);
bytes[offset+4] = (byte) (v >> 24);
bytes[offset+5] = (byte) (v >> 16);
bytes[offset+6] = (byte) (v >> 8);
bytes[offset+7] = (byte) v;
}
public static long bytesToLong(byte[] bytes, int index) {
long v = bytesToLongDirect(bytes, index);
// Flip the sign bit back
v ^= 0x8000000000000000L;
return v;
}
public static long bytesToLongDirect(byte[] bytes, int index) {
int offset = 8 * index;
long v = ((bytes[offset] & 0xffL) << 56) |
((bytes[offset+1] & 0xffL) << 48) |
((bytes[offset+2] & 0xffL) << 40) |
((bytes[offset+3] & 0xffL) << 32) |
((bytes[offset+4] & 0xffL) << 24) |
((bytes[offset+5] & 0xffL) << 16) |
((bytes[offset+6] & 0xffL) << 8) |
(bytes[offset+7] & 0xffL);
return v;
}
public static void sortableBigIntBytes(byte[] bytes) {
bytes[0] ^= 0x80;
for(int i=1;i<bytes.length;i++) {
bytes[i] ^= 0;
}
}
public static void bigIntToBytes(BigInteger bigInt, byte[] result, int dim, int numBytesPerDim) {
byte[] bigIntBytes = bigInt.toByteArray();
byte[] fullBigIntBytes;
if (bigIntBytes.length < numBytesPerDim) {
fullBigIntBytes = new byte[numBytesPerDim];
System.arraycopy(bigIntBytes, 0, fullBigIntBytes, numBytesPerDim-bigIntBytes.length, bigIntBytes.length);
if ((bigIntBytes[0] & 0x80) != 0) {
// sign extend
Arrays.fill(fullBigIntBytes, 0, numBytesPerDim-bigIntBytes.length, (byte) 0xff);
}
} else {
assert bigIntBytes.length == numBytesPerDim;
fullBigIntBytes = bigIntBytes;
}
sortableBigIntBytes(fullBigIntBytes);
System.arraycopy(fullBigIntBytes, 0, result, dim * numBytesPerDim, numBytesPerDim);
assert bytesToBigInt(result, dim, numBytesPerDim).equals(bigInt): "bigInt=" + bigInt + " converted=" + bytesToBigInt(result, dim, numBytesPerDim);
}
public static BigInteger bytesToBigInt(byte[] bytes, int dim, int numBytesPerDim) {
byte[] bigIntBytes = new byte[numBytesPerDim];
System.arraycopy(bytes, dim*numBytesPerDim, bigIntBytes, 0, numBytesPerDim);
sortableBigIntBytes(bigIntBytes);
return new BigInteger(bigIntBytes);
}
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.index.DimensionalValues.Relation;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.StringHelper;
@ -163,7 +164,7 @@ public class BKDReader implements Accountable {
// With only 1D, all values should always be in sorted order
if (lastPackedValue == null) {
lastPackedValue = Arrays.copyOf(packedValue, packedValue.length);
} else if (BKDUtil.compare(bytesPerDim, lastPackedValue, 0, packedValue, 0) > 0) {
} else if (NumericUtils.compare(bytesPerDim, lastPackedValue, 0, packedValue, 0) > 0) {
throw new RuntimeException("value=" + new BytesRef(packedValue) + " for docID=" + docID + " dim=0" + " sorts before last value=" + new BytesRef(lastPackedValue));
} else {
System.arraycopy(packedValue, 0, lastPackedValue, 0, bytesPerDim);

View File

@ -1,185 +0,0 @@
package org.apache.lucene.util.bkd;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.math.BigInteger;
import java.util.Arrays;
/** Utility methods to convert to/from N-dimensional packed byte[] as unsigned numbers */
public final class BKDUtil {
private BKDUtil() {
// No instance
}
/** Result = a - b, where a &gt;= b, else {@code IllegalArgumentException} is thrown. */
public static void subtract(int bytesPerDim, int dim, byte[] a, byte[] b, byte[] result) {
int start = dim * bytesPerDim;
int end = start + bytesPerDim;
int borrow = 0;
for(int i=end-1;i>=start;i--) {
int diff = (a[i]&0xff) - (b[i]&0xff) - borrow;
if (diff < 0) {
diff += 256;
borrow = 1;
} else {
borrow = 0;
}
result[i-start] = (byte) diff;
}
if (borrow != 0) {
throw new IllegalArgumentException("a < b");
}
}
/** Result = a + b, where a and b are unsigned. If there is an overflow, {@code IllegalArgumentException} is thrown. */
public static void add(int bytesPerDim, int dim, byte[] a, byte[] b, byte[] result) {
int start = dim * bytesPerDim;
int end = start + bytesPerDim;
int carry = 0;
for(int i=end-1;i>=start;i--) {
int digitSum = (a[i]&0xff) + (b[i]&0xff) + carry;
if (digitSum > 255) {
digitSum -= 256;
carry = 1;
} else {
carry = 0;
}
result[i-start] = (byte) digitSum;
}
if (carry != 0) {
throw new IllegalArgumentException("a + b overflows bytesPerDim=" + bytesPerDim);
}
}
/** Returns positive int if a &gt; b, negative int if a &lt; b and 0 if a == b */
public static int compare(int bytesPerDim, byte[] a, int aIndex, byte[] b, int bIndex) {
assert aIndex >= 0;
assert bIndex >= 0;
int aOffset = aIndex*bytesPerDim;
int bOffset = bIndex*bytesPerDim;
for(int i=0;i<bytesPerDim;i++) {
int cmp = (a[aOffset+i]&0xff) - (b[bOffset+i]&0xff);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/** Returns true if N-dim rect A contains N-dim rect B */
public static boolean contains(int bytesPerDim,
byte[] minPackedA, byte[] maxPackedA,
byte[] minPackedB, byte[] maxPackedB) {
int dims = minPackedA.length / bytesPerDim;
for(int dim=0;dim<dims;dim++) {
if (compare(bytesPerDim, minPackedA, dim, minPackedB, dim) > 0) {
return false;
}
if (compare(bytesPerDim, maxPackedA, dim, maxPackedB, dim) < 0) {
return false;
}
}
return true;
}
public static void intToBytes(int x, byte[] dest, int index) {
// Flip the sign bit, so negative ints sort before positive ints correctly:
x ^= 0x80000000;
for(int i=0;i<4;i++) {
dest[4*index+i] = (byte) (x >> 24-i*8);
}
}
public static int bytesToInt(byte[] src, int index) {
int x = 0;
for(int i=0;i<4;i++) {
x |= (src[4*index+i] & 0xff) << (24-i*8);
}
// Re-flip the sign bit to restore the original value:
return x ^ 0x80000000;
}
public static void longToBytes(long v, byte[] bytes, int dim) {
// Flip the sign bit so negative longs sort before positive longs:
v ^= 0x8000000000000000L;
int offset = 8 * dim;
bytes[offset] = (byte) (v >> 56);
bytes[offset+1] = (byte) (v >> 48);
bytes[offset+2] = (byte) (v >> 40);
bytes[offset+3] = (byte) (v >> 32);
bytes[offset+4] = (byte) (v >> 24);
bytes[offset+5] = (byte) (v >> 16);
bytes[offset+6] = (byte) (v >> 8);
bytes[offset+7] = (byte) v;
}
public static long bytesToLong(byte[] bytes, int index) {
int offset = 8 * index;
long v = ((bytes[offset] & 0xffL) << 56) |
((bytes[offset+1] & 0xffL) << 48) |
((bytes[offset+2] & 0xffL) << 40) |
((bytes[offset+3] & 0xffL) << 32) |
((bytes[offset+4] & 0xffL) << 24) |
((bytes[offset+5] & 0xffL) << 16) |
((bytes[offset+6] & 0xffL) << 8) |
(bytes[offset+7] & 0xffL);
// Flip the sign bit back
v ^= 0x8000000000000000L;
return v;
}
public static void sortableBigIntBytes(byte[] bytes) {
bytes[0] ^= 0x80;
for(int i=1;i<bytes.length;i++) {
bytes[i] ^= 0;
}
}
public static void bigIntToBytes(BigInteger bigInt, byte[] result, int dim, int numBytesPerDim) {
byte[] bigIntBytes = bigInt.toByteArray();
byte[] fullBigIntBytes;
if (bigIntBytes.length < numBytesPerDim) {
fullBigIntBytes = new byte[numBytesPerDim];
System.arraycopy(bigIntBytes, 0, fullBigIntBytes, numBytesPerDim-bigIntBytes.length, bigIntBytes.length);
if ((bigIntBytes[0] & 0x80) != 0) {
// sign extend
Arrays.fill(fullBigIntBytes, 0, numBytesPerDim-bigIntBytes.length, (byte) 0xff);
}
} else {
assert bigIntBytes.length == numBytesPerDim;
fullBigIntBytes = bigIntBytes;
}
sortableBigIntBytes(fullBigIntBytes);
System.arraycopy(fullBigIntBytes, 0, result, dim * numBytesPerDim, numBytesPerDim);
assert bytesToBigInt(result, dim, numBytesPerDim).equals(bigInt): "bigInt=" + bigInt + " converted=" + bytesToBigInt(result, dim, numBytesPerDim);
}
public static BigInteger bytesToBigInt(byte[] bytes, int dim, int numBytesPerDim) {
byte[] bigIntBytes = new byte[numBytesPerDim];
System.arraycopy(bytes, dim*numBytesPerDim, bigIntBytes, 0, numBytesPerDim);
sortableBigIntBytes(bigIntBytes);
return new BigInteger(bigIntBytes);
}
}

View File

@ -38,8 +38,9 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.IntroSorter;
import org.apache.lucene.util.LongBitSet;
import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.OfflineSorter;
import org.apache.lucene.util.OfflineSorter.ByteSequencesWriter;
import org.apache.lucene.util.PriorityQueue;
import org.apache.lucene.util.RamUsageEstimator;
import org.apache.lucene.util.StringHelper;
@ -91,7 +92,7 @@ public class BKDWriter implements Closeable {
public static final float DEFAULT_MAX_MB_SORT_IN_HEAP = 16.0f;
/** Maximum number of dimensions */
public static final int MAX_DIMS = 255;
public static final int MAX_DIMS = 8;
/** How many dimensions we are indexing */
protected final int numDims;
@ -534,7 +535,7 @@ public class BKDWriter implements Closeable {
int block = j / writer.valuesPerBlock;
int index = j % writer.valuesPerBlock;
assert index >= 0: "index=" + index + " j=" + j;
int cmp = BKDUtil.compare(bytesPerDim, pivotPackedValue, 0, writer.blocks.get(block), index*numDims+dim);
int cmp = NumericUtils.compare(bytesPerDim, pivotPackedValue, 0, writer.blocks.get(block), index*numDims+dim);
if (cmp != 0) {
return cmp;
}
@ -578,7 +579,7 @@ public class BKDWriter implements Closeable {
int dimI = i % writer.valuesPerBlock;
int blockJ = j / writer.valuesPerBlock;
int dimJ = j % writer.valuesPerBlock;
int cmp = BKDUtil.compare(bytesPerDim, writer.blocks.get(blockI), dimI*numDims+dim, writer.blocks.get(blockJ), dimJ*numDims+dim);
int cmp = NumericUtils.compare(bytesPerDim, writer.blocks.get(blockI), dimI*numDims+dim, writer.blocks.get(blockJ), dimJ*numDims+dim);
if (cmp != 0) {
return cmp;
}
@ -641,7 +642,7 @@ public class BKDWriter implements Closeable {
final int docIDB = reader.readVInt();
final long ordB = reader.readVLong();
int cmp = BKDUtil.compare(bytesPerDim, scratch1, dim, scratch2, dim);
int cmp = NumericUtils.compare(bytesPerDim, scratch1, dim, scratch2, dim);
if (cmp != 0) {
return cmp;
@ -919,10 +920,10 @@ public class BKDWriter implements Closeable {
/** Called only in assert */
private boolean valueInBounds(byte[] packedValue, byte[] minPackedValue, byte[] maxPackedValue) {
for(int dim=0;dim<numDims;dim++) {
if (BKDUtil.compare(bytesPerDim, packedValue, dim, minPackedValue, dim) < 0) {
if (NumericUtils.compare(bytesPerDim, packedValue, dim, minPackedValue, dim) < 0) {
return false;
}
if (BKDUtil.compare(bytesPerDim, packedValue, dim, maxPackedValue, dim) > 0) {
if (NumericUtils.compare(bytesPerDim, packedValue, dim, maxPackedValue, dim) > 0) {
return false;
}
}
@ -935,8 +936,8 @@ public class BKDWriter implements Closeable {
// Find which dim has the largest span so we can split on it:
int splitDim = -1;
for(int dim=0;dim<numDims;dim++) {
BKDUtil.subtract(bytesPerDim, dim, maxPackedValue, minPackedValue, scratchDiff);
if (splitDim == -1 || BKDUtil.compare(bytesPerDim, scratchDiff, 0, scratch1, 0) > 0) {
NumericUtils.subtract(bytesPerDim, dim, maxPackedValue, minPackedValue, scratchDiff);
if (splitDim == -1 || NumericUtils.compare(bytesPerDim, scratchDiff, 0, scratch1, 0) > 0) {
System.arraycopy(scratchDiff, 0, scratch1, 0, bytesPerDim);
splitDim = dim;
}
@ -1145,7 +1146,7 @@ public class BKDWriter implements Closeable {
// only called from assert
private boolean valueInOrder(long ord, byte[] lastPackedValue, byte[] packedValue) {
if (ord > 0 && BKDUtil.compare(bytesPerDim, lastPackedValue, 0, packedValue, 0) > 0) {
if (ord > 0 && NumericUtils.compare(bytesPerDim, lastPackedValue, 0, packedValue, 0) > 0) {
throw new AssertionError("values out of order: last value=" + new BytesRef(lastPackedValue) + " current value=" + new BytesRef(packedValue) + " ord=" + ord);
}
System.arraycopy(packedValue, 0, lastPackedValue, 0, bytesPerDim);

View File

@ -17,18 +17,18 @@ package org.apache.lucene;
* limitations under the License.
*/
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.io.PrintWriter;
import java.io.StringWriter;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.store.*;
import org.apache.lucene.document.*;
import org.apache.lucene.analysis.*;
import org.apache.lucene.document.*;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.LuceneTestCase;
/** JUnit adaptation of an older test case SearchTest. */
public class TestSearch extends LuceneTestCase {
@ -125,7 +125,6 @@ public class TestSearch extends LuceneTestCase {
for (int j = 0; j < docs.length; j++) {
Document d = new Document();
d.add(newTextField("contents", docs[j], Field.Store.YES));
d.add(new IntField("id", j, Field.Store.NO));
d.add(new NumericDocValuesField("id", j));
writer.addDocument(d);
}

View File

@ -26,8 +26,8 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
@ -98,7 +98,7 @@ public class TestSearchForDuplicates extends LuceneTestCase {
for (int j = 0; j < MAX_DOCS; j++) {
Document d = new Document();
d.add(newTextField(PRIORITY_FIELD, HIGH_PRIORITY, Field.Store.YES));
d.add(new IntField(ID_FIELD, j, Field.Store.YES));
d.add(new StoredField(ID_FIELD, j));
d.add(new NumericDocValuesField(ID_FIELD, j));
writer.addDocument(d);
}

View File

@ -17,8 +17,8 @@ package org.apache.lucene.analysis;
* limitations under the License.
*/
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttributeImpl;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
import org.apache.lucene.analysis.tokenattributes.TestCharTermAttributeImpl;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
@ -32,21 +32,21 @@ public class TestNumericTokenStream extends BaseTokenStreamTestCase {
public void testLongStream() throws Exception {
@SuppressWarnings("resource")
final NumericTokenStream stream=new NumericTokenStream().setLongValue(lvalue);
final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setLongValue(lvalue);
final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
assertNotNull(bytesAtt);
final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
assertNotNull(typeAtt);
final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
assertNotNull(numericAtt);
stream.reset();
assertEquals(64, numericAtt.getValueSize());
for (int shift=0; shift<64; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
for (int shift=0; shift<64; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
assertTrue("New token is available", stream.incrementToken());
assertEquals("Shift value wrong", shift, numericAtt.getShift());
assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), NumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
assertEquals("Term is incorrectly encoded", lvalue & ~((1L << shift) - 1L), LegacyNumericUtils.prefixCodedToLong(bytesAtt.getBytesRef()));
assertEquals("Term raw value is incorrectly encoded", lvalue & ~((1L << shift) - 1L), numericAtt.getRawValue());
assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
}
assertFalse("More tokens available", stream.incrementToken());
stream.end();
@ -55,21 +55,21 @@ public class TestNumericTokenStream extends BaseTokenStreamTestCase {
public void testIntStream() throws Exception {
@SuppressWarnings("resource")
final NumericTokenStream stream=new NumericTokenStream().setIntValue(ivalue);
final LegacyNumericTokenStream stream=new LegacyNumericTokenStream().setIntValue(ivalue);
final TermToBytesRefAttribute bytesAtt = stream.getAttribute(TermToBytesRefAttribute.class);
assertNotNull(bytesAtt);
final TypeAttribute typeAtt = stream.getAttribute(TypeAttribute.class);
assertNotNull(typeAtt);
final NumericTokenStream.NumericTermAttribute numericAtt = stream.getAttribute(NumericTokenStream.NumericTermAttribute.class);
final LegacyNumericTokenStream.LegacyNumericTermAttribute numericAtt = stream.getAttribute(LegacyNumericTokenStream.LegacyNumericTermAttribute.class);
assertNotNull(numericAtt);
stream.reset();
assertEquals(32, numericAtt.getValueSize());
for (int shift=0; shift<32; shift+=NumericUtils.PRECISION_STEP_DEFAULT) {
for (int shift=0; shift<32; shift+= LegacyNumericUtils.PRECISION_STEP_DEFAULT) {
assertTrue("New token is available", stream.incrementToken());
assertEquals("Shift value wrong", shift, numericAtt.getShift());
assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), NumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
assertEquals("Term is incorrectly encoded", ivalue & ~((1 << shift) - 1), LegacyNumericUtils.prefixCodedToInt(bytesAtt.getBytesRef()));
assertEquals("Term raw value is incorrectly encoded", ((long) ivalue) & ~((1L << shift) - 1L), numericAtt.getRawValue());
assertEquals("Type incorrect", (shift == 0) ? NumericTokenStream.TOKEN_TYPE_FULL_PREC : NumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
assertEquals("Type incorrect", (shift == 0) ? LegacyNumericTokenStream.TOKEN_TYPE_FULL_PREC : LegacyNumericTokenStream.TOKEN_TYPE_LOWER_PREC, typeAtt.type());
}
assertFalse("More tokens available", stream.incrementToken());
stream.end();
@ -77,7 +77,7 @@ public class TestNumericTokenStream extends BaseTokenStreamTestCase {
}
public void testNotInitialized() throws Exception {
final NumericTokenStream stream=new NumericTokenStream();
final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
try {
stream.reset();
@ -100,28 +100,28 @@ public class TestNumericTokenStream extends BaseTokenStreamTestCase {
public static class TestAttributeImpl extends CharTermAttributeImpl implements TestAttribute {}
public void testCTA() throws Exception {
final NumericTokenStream stream=new NumericTokenStream();
final LegacyNumericTokenStream stream=new LegacyNumericTokenStream();
try {
stream.addAttribute(CharTermAttribute.class);
fail("Succeeded to add CharTermAttribute.");
} catch (IllegalArgumentException iae) {
assertTrue(iae.getMessage().startsWith("NumericTokenStream does not support"));
assertTrue(iae.getMessage().startsWith("LegacyNumericTokenStream does not support"));
}
try {
stream.addAttribute(TestAttribute.class);
fail("Succeeded to add TestAttribute.");
} catch (IllegalArgumentException iae) {
assertTrue(iae.getMessage().startsWith("NumericTokenStream does not support"));
assertTrue(iae.getMessage().startsWith("LegacyNumericTokenStream does not support"));
}
stream.close();
}
public void testAttributeClone() throws Exception {
NumericTermAttributeImpl att = new NumericTermAttributeImpl();
LegacyNumericTermAttributeImpl att = new LegacyNumericTermAttributeImpl();
att.init(1234L, 64, 8, 0); // set some value, to make getBytesRef() work
NumericTermAttributeImpl copy = TestCharTermAttributeImpl.assertCloneIsEqual(att);
LegacyNumericTermAttributeImpl copy = TestCharTermAttributeImpl.assertCloneIsEqual(att);
assertNotSame(att.getBytesRef(), copy.getBytesRef());
NumericTermAttributeImpl copy2 = TestCharTermAttributeImpl.assertCopyIsEqual(att);
LegacyNumericTermAttributeImpl copy2 = TestCharTermAttributeImpl.assertCopyIsEqual(att);
assertNotSame(att.getBytesRef(), copy2.getBytesRef());
}

View File

@ -360,10 +360,10 @@ public class TestDocument extends LuceneTestCase {
public void testNumericFieldAsString() throws Exception {
Document doc = new Document();
doc.add(new IntField("int", 5, Field.Store.YES));
doc.add(new LegacyIntField("int", 5, Field.Store.YES));
assertEquals("5", doc.get("int"));
assertNull(doc.get("somethingElse"));
doc.add(new IntField("int", 4, Field.Store.YES));
doc.add(new LegacyIntField("int", 4, Field.Store.YES));
assertArrayEquals(new String[] { "5", "4" }, doc.getValues("int"));
Directory dir = newDirectory();

View File

@ -38,8 +38,8 @@ public class TestField extends LuceneTestCase {
public void testDoubleField() throws Exception {
Field fields[] = new Field[] {
new DoubleField("foo", 5d, Field.Store.NO),
new DoubleField("foo", 5d, Field.Store.YES)
new LegacyDoubleField("foo", 5d, Field.Store.NO),
new LegacyDoubleField("foo", 5d, Field.Store.YES)
};
for (Field field : fields) {
@ -100,8 +100,8 @@ public class TestField extends LuceneTestCase {
public void testFloatField() throws Exception {
Field fields[] = new Field[] {
new FloatField("foo", 5f, Field.Store.NO),
new FloatField("foo", 5f, Field.Store.YES)
new LegacyFloatField("foo", 5f, Field.Store.NO),
new LegacyFloatField("foo", 5f, Field.Store.YES)
};
for (Field field : fields) {
@ -124,8 +124,8 @@ public class TestField extends LuceneTestCase {
public void testIntField() throws Exception {
Field fields[] = new Field[] {
new IntField("foo", 5, Field.Store.NO),
new IntField("foo", 5, Field.Store.YES)
new LegacyIntField("foo", 5, Field.Store.NO),
new LegacyIntField("foo", 5, Field.Store.YES)
};
for (Field field : fields) {
@ -167,8 +167,8 @@ public class TestField extends LuceneTestCase {
public void testLongField() throws Exception {
Field fields[] = new Field[] {
new LongField("foo", 5L, Field.Store.NO),
new LongField("foo", 5L, Field.Store.YES)
new LegacyLongField("foo", 5L, Field.Store.NO),
new LegacyLongField("foo", 5L, Field.Store.YES)
};
for (Field field : fields) {

View File

@ -20,7 +20,7 @@ package org.apache.lucene.document;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.util.LuceneTestCase;
@ -60,7 +60,7 @@ public class TestFieldType extends LuceneTestCase {
assertFalse(ft7.equals(ft));
FieldType ft8 = new FieldType();
ft8.setNumericType(NumericType.DOUBLE);
ft8.setNumericType(LegacyNumericType.DOUBLE);
assertFalse(ft8.equals(ft));
FieldType ft9 = new FieldType();

View File

@ -32,13 +32,13 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Pattern;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
@ -681,7 +681,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
Document newDoc = new Document();
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
newDoc.add(new NumericDocValuesField("number", value));
newDoc.add(new LongField("number", value, Field.Store.NO));
newDoc.add(new DimensionalLongField("number", value));
w.addDocument(newDoc);
}
@ -737,7 +737,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
Document newDoc = new Document();
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
newDoc.add(new NumericDocValuesField("number_" + newSchemaGen, value));
newDoc.add(new LongField("number", value, Field.Store.NO));
newDoc.add(new DimensionalLongField("number", value));
w.addDocument(newDoc);
}
} else {
@ -832,7 +832,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
Document newDoc = new Document();
long value = Long.parseLong(oldDoc.get("text").split(" ")[1]);
newDoc.add(new NumericDocValuesField("number", newSchemaGen*value));
newDoc.add(new LongField("number", value, Field.Store.NO));
newDoc.add(new DimensionalLongField("number", value));
w.addDocument(newDoc);
}
} else {
@ -1168,7 +1168,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
checkAllNumberDVs(r);
IndexSearcher s = newSearcher(r);
testNumericDVSort(s);
testNumericRangeQuery(s);
testDimensionalRangeQuery(s);
} finally {
reindexer.mgr.release(r);
}
@ -1190,7 +1190,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
checkAllNumberDVs(r);
IndexSearcher s = newSearcher(r);
testNumericDVSort(s);
testNumericRangeQuery(s);
testDimensionalRangeQuery(s);
} finally {
reindexer.mgr.release(r);
}
@ -1209,7 +1209,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
checkAllNumberDVs(r);
IndexSearcher s = newSearcher(r);
testNumericDVSort(s);
testNumericRangeQuery(s);
testDimensionalRangeQuery(s);
} finally {
reindexer.mgr.release(r);
}
@ -1261,7 +1261,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
checkAllNumberDVs(r);
IndexSearcher s = newSearcher(r);
testNumericDVSort(s);
testNumericRangeQuery(s);
testDimensionalRangeQuery(s);
} finally {
reindexer.mgr.release(r);
}
@ -1340,7 +1340,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
}
}
private static void testNumericRangeQuery(IndexSearcher s) throws IOException {
private static void testDimensionalRangeQuery(IndexSearcher s) throws IOException {
NumericDocValues numbers = MultiDocValues.getNumericValues(s.getIndexReader(), "number");
for(int i=0;i<100;i++) {
// Confirm we can range search by the new indexed (numeric) field:
@ -1352,7 +1352,7 @@ public class TestDemoParallelLeafReader extends LuceneTestCase {
max = x;
}
TopDocs hits = s.search(NumericRangeQuery.newLongRange("number", min, max, true, true), 100);
TopDocs hits = s.search(DimensionalRangeQuery.new1DLongRange("number", min, true, max, true), 100);
for(ScoreDoc scoreDoc : hits.scoreDocs) {
long value = Long.parseLong(s.doc(scoreDoc.doc).get("text").split(" ")[1]);
assertTrue(value >= min);

View File

@ -31,7 +31,8 @@ import org.apache.lucene.codecs.DimensionalWriter;
import org.apache.lucene.codecs.FilterCodec;
import org.apache.lucene.codecs.lucene60.Lucene60DimensionalReader;
import org.apache.lucene.codecs.lucene60.Lucene60DimensionalWriter;
import org.apache.lucene.document.DimensionalField;
import org.apache.lucene.document.DimensionalBinaryField;
import org.apache.lucene.document.DimensionalIntField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
@ -44,9 +45,8 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.bkd.BKDUtil;
import org.apache.lucene.util.bkd.BKDWriter;
// TODO: factor out a BaseTestDimensionFormat
@ -60,8 +60,8 @@ public class TestDimensionalValues extends LuceneTestCase {
byte[] point = new byte[4];
for(int i=0;i<20;i++) {
Document doc = new Document();
BKDUtil.intToBytes(i, point, 0);
doc.add(new DimensionalField("dim", point));
NumericUtils.intToBytes(i, point, 0);
doc.add(new DimensionalBinaryField("dim", point));
w.addDocument(doc);
}
w.forceMerge(1);
@ -84,7 +84,7 @@ public class TestDimensionalValues extends LuceneTestCase {
}
public void visit(int docID, byte[] packedValue) {
seen.set(docID);
assertEquals(docID, BKDUtil.bytesToInt(packedValue, 0));
assertEquals(docID, NumericUtils.bytesToInt(packedValue, 0));
}
});
assertEquals(20, seen.cardinality());
@ -99,8 +99,8 @@ public class TestDimensionalValues extends LuceneTestCase {
byte[] point = new byte[4];
for(int i=0;i<20;i++) {
Document doc = new Document();
BKDUtil.intToBytes(i, point, 0);
doc.add(new DimensionalField("dim", point));
NumericUtils.intToBytes(i, point, 0);
doc.add(new DimensionalBinaryField("dim", point));
w.addDocument(doc);
if (i == 10) {
w.commit();
@ -126,7 +126,7 @@ public class TestDimensionalValues extends LuceneTestCase {
}
public void visit(int docID, byte[] packedValue) {
seen.set(docID);
assertEquals(docID, BKDUtil.bytesToInt(packedValue, 0));
assertEquals(docID, NumericUtils.bytesToInt(packedValue, 0));
}
});
assertEquals(20, seen.cardinality());
@ -140,8 +140,8 @@ public class TestDimensionalValues extends LuceneTestCase {
byte[] point = new byte[4];
for(int i=0;i<10;i++) {
Document doc = new Document();
BKDUtil.intToBytes(i, point, 0);
doc.add(new DimensionalField("dim", point));
NumericUtils.intToBytes(i, point, 0);
doc.add(new DimensionalBinaryField("dim", point));
doc.add(new NumericDocValuesField("id", i));
doc.add(newStringField("x", "x", Field.Store.NO));
w.addDocument(doc);
@ -173,7 +173,7 @@ public class TestDimensionalValues extends LuceneTestCase {
if (liveDocs.get(docID)) {
seen.set(docID);
}
assertEquals(idValues.get(docID), BKDUtil.bytesToInt(packedValue, 0));
assertEquals(idValues.get(docID), NumericUtils.bytesToInt(packedValue, 0));
}
});
assertEquals(0, seen.cardinality());
@ -184,8 +184,8 @@ public class TestDimensionalValues extends LuceneTestCase {
/** Make sure we close open files, delete temp files, etc., on exception */
public void testWithExceptions() throws Exception {
int numDocs = atLeast(10000);
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
byte[][][] docValues = new byte[numDocs][][];
@ -244,8 +244,8 @@ public class TestDimensionalValues extends LuceneTestCase {
}
public void testMultiValued() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
int numDocs = atLeast(1000);
List<byte[][]> docValues = new ArrayList<>();
@ -274,8 +274,8 @@ public class TestDimensionalValues extends LuceneTestCase {
}
public void testAllEqual() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
int numDocs = atLeast(1000);
byte[][][] docValues = new byte[numDocs][][];
@ -297,8 +297,8 @@ public class TestDimensionalValues extends LuceneTestCase {
}
public void testOneDimEqual() throws Exception {
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
int numDocs = atLeast(1000);
int theEqualDim = random().nextInt(numDims);
@ -324,8 +324,8 @@ public class TestDimensionalValues extends LuceneTestCase {
int numDocs = atLeast(1000);
try (Directory dir = getDirectory(numDocs)) {
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
// We rely on docIDs not changing:
iwc.setMergePolicy(newLogMergePolicy());
@ -341,14 +341,14 @@ public class TestDimensionalValues extends LuceneTestCase {
for(int dim=0;dim<numDims;dim++) {
values[dim] = randomBigInt(numBytesPerDim);
bytes[dim] = new byte[numBytesPerDim];
BKDUtil.bigIntToBytes(values[dim], bytes[dim], 0, numBytesPerDim);
NumericUtils.bigIntToBytes(values[dim], bytes[dim], 0, numBytesPerDim);
if (VERBOSE) {
System.out.println(" " + dim + " -> " + values[dim]);
}
}
docs[docID] = values;
Document doc = new Document();
doc.add(new DimensionalField("field", bytes));
doc.add(new DimensionalBinaryField("field", bytes));
w.addDocument(doc);
}
@ -391,7 +391,7 @@ public class TestDimensionalValues extends LuceneTestCase {
public void visit(int docID, byte[] packedValue) {
//System.out.println("visit check docID=" + docID);
for(int dim=0;dim<numDims;dim++) {
BigInteger x = BKDUtil.bytesToBigInt(packedValue, dim, numBytesPerDim);
BigInteger x = NumericUtils.bytesToBigInt(packedValue, dim, numBytesPerDim);
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
//System.out.println(" no");
return;
@ -406,8 +406,8 @@ public class TestDimensionalValues extends LuceneTestCase {
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for(int dim=0;dim<numDims;dim++) {
BigInteger min = BKDUtil.bytesToBigInt(minPacked, dim, numBytesPerDim);
BigInteger max = BKDUtil.bytesToBigInt(maxPacked, dim, numBytesPerDim);
BigInteger min = NumericUtils.bytesToBigInt(minPacked, dim, numBytesPerDim);
BigInteger max = NumericUtils.bytesToBigInt(maxPacked, dim, numBytesPerDim);
assert max.compareTo(min) >= 0;
if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
@ -469,7 +469,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = newIndexWriterConfig();
w = new IndexWriter(dir, iwc);
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.close();
dir.close();
}
@ -481,8 +481,8 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -498,10 +498,10 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -517,11 +517,11 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.commit();
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -537,13 +537,13 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -559,7 +559,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -567,7 +567,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
w.addDocument(doc);
try {
w.addIndexes(new Directory[] {dir});
@ -582,7 +582,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -590,7 +590,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(dir);
try {
@ -606,7 +606,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -614,7 +614,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[4], new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4], new byte[4]));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(dir);
try {
@ -625,33 +625,13 @@ public class TestDimensionalValues extends LuceneTestCase {
IOUtils.close(r, w, dir, dir2);
}
public void testIllegalTooManyDimensions() throws Exception {
Directory dir = getDirectory(1);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
byte[][] values = new byte[BKDWriter.MAX_DIMS+1][];
for(int i=0;i<values.length;i++) {
values[i] = new byte[4];
}
doc.add(new DimensionalField("dim", values));
w.addDocument(doc);
try {
w.close();
} catch (IllegalArgumentException iae) {
// expected
assertEquals("numDims must be 1 .. 255 (got: 256)", iae.getMessage());
}
dir.close();
}
public void testIllegalNumBytesChangeOneDoc() throws Exception {
Directory dir = getDirectory(1);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -667,10 +647,10 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -686,11 +666,11 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.commit();
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -706,13 +686,13 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
try {
w.addDocument(doc);
} catch (IllegalArgumentException iae) {
@ -728,7 +708,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -736,7 +716,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
w.addDocument(doc);
try {
w.addIndexes(new Directory[] {dir});
@ -751,7 +731,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -759,7 +739,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(dir);
try {
@ -775,7 +755,7 @@ public class TestDimensionalValues extends LuceneTestCase {
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("dim", new byte[4]));
doc.add(new DimensionalBinaryField("dim", new byte[4]));
w.addDocument(doc);
w.close();
@ -783,7 +763,7 @@ public class TestDimensionalValues extends LuceneTestCase {
iwc = new IndexWriterConfig(new MockAnalyzer(random()));
w = new IndexWriter(dir2, iwc);
doc = new Document();
doc.add(new DimensionalField("dim", new byte[6]));
doc.add(new DimensionalBinaryField("dim", new byte[6]));
w.addDocument(doc);
DirectoryReader r = DirectoryReader.open(dir);
try {
@ -794,10 +774,52 @@ public class TestDimensionalValues extends LuceneTestCase {
IOUtils.close(r, w, dir, dir2);
}
public void testIllegalTooManyBytes() throws Exception {
Directory dir = getDirectory(1);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
doc.add(new DimensionalBinaryField("dim", new byte[DimensionalValues.MAX_NUM_BYTES+1]));
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// expected
}
doc = new Document();
doc.add(new DimensionalIntField("dim", 17));
w.addDocument(doc);
w.close();
dir.close();
}
public void testIllegalTooManyDimensions() throws Exception {
Directory dir = getDirectory(1);
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
IndexWriter w = new IndexWriter(dir, iwc);
Document doc = new Document();
byte[][] values = new byte[DimensionalValues.MAX_DIMENSIONS+1][];
for(int i=0;i<values.length;i++) {
values[i] = new byte[4];
}
doc.add(new DimensionalBinaryField("dim", values));
try {
w.addDocument(doc);
fail("did not hit exception");
} catch (IllegalArgumentException iae) {
// expected
}
doc = new Document();
doc.add(new DimensionalIntField("dim", 17));
w.addDocument(doc);
w.close();
dir.close();
}
private void doTestRandomBinary(int count) throws Exception {
int numDocs = TestUtil.nextInt(random(), count, count*2);
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
byte[][][] docValues = new byte[numDocs][][];
@ -935,7 +957,7 @@ public class TestDimensionalValues extends LuceneTestCase {
doc = new Document();
doc.add(new NumericDocValuesField("id", id));
}
doc.add(new DimensionalField("field", docValues[ord]));
doc.add(new DimensionalBinaryField("field", docValues[ord]));
lastID = id;
if (random().nextInt(30) == 17) {
@ -953,7 +975,7 @@ public class TestDimensionalValues extends LuceneTestCase {
if (random().nextInt(30) == 17) {
// randomly index some documents with this field, but we will delete them:
Document xdoc = new Document();
xdoc.add(new DimensionalField("field", docValues[ord]));
xdoc.add(new DimensionalBinaryField("field", docValues[ord]));
xdoc.add(new StringField("nukeme", "yes", Field.Store.NO));
if (useRealWriter) {
w.w.addDocument(xdoc);
@ -1028,7 +1050,7 @@ public class TestDimensionalValues extends LuceneTestCase {
random().nextBytes(queryMin[dim]);
queryMax[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMax[dim]);
if (BKDUtil.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
byte[] x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
@ -1060,8 +1082,8 @@ public class TestDimensionalValues extends LuceneTestCase {
//System.out.println("visit check docID=" + docID + " id=" + idValues.get(docID));
for(int dim=0;dim<numDims;dim++) {
//System.out.println(" dim=" + dim + " value=" + new BytesRef(packedValue, dim*numBytesPerDim, numBytesPerDim));
if (BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
//System.out.println(" no");
return;
}
@ -1076,12 +1098,12 @@ public class TestDimensionalValues extends LuceneTestCase {
boolean crosses = false;
//System.out.println("compare");
for(int dim=0;dim<numDims;dim++) {
if (BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
//System.out.println(" query_outside_cell");
return Relation.CELL_OUTSIDE_QUERY;
} else if (BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
} else if (NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
crosses = true;
}
}
@ -1101,8 +1123,8 @@ public class TestDimensionalValues extends LuceneTestCase {
boolean matches = true;
for(int dim=0;dim<numDims;dim++) {
byte[] x = docValues[ord][dim];
if (BKDUtil.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
matches = false;
break;
}

View File

@ -23,15 +23,15 @@ import java.util.Collections;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CannedTokenStream;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.analysis.NumericTokenStream.NumericTermAttribute;
import org.apache.lucene.analysis.LegacyNumericTokenStream;
import org.apache.lucene.analysis.LegacyNumericTokenStream.LegacyNumericTermAttribute;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/** test tokenstream reuse by DefaultIndexingChain */
public class TestFieldReuse extends BaseTokenStreamTestCase {
@ -59,7 +59,7 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
// pass a bogus stream and ensure it's still ok
stringField = new StringField("foo", "beer", Field.Store.NO);
TokenStream bogus = new NumericTokenStream();
TokenStream bogus = new LegacyNumericTokenStream();
ts = stringField.tokenStream(null, bogus);
assertNotSame(ts, bogus);
assertTokenStreamContents(ts,
@ -70,32 +70,32 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
}
public void testNumericReuse() throws IOException {
IntField intField = new IntField("foo", 5, Field.Store.NO);
LegacyIntField legacyIntField = new LegacyIntField("foo", 5, Field.Store.NO);
// passing null
TokenStream ts = intField.tokenStream(null, null);
assertTrue(ts instanceof NumericTokenStream);
assertEquals(NumericUtils.PRECISION_STEP_DEFAULT_32, ((NumericTokenStream)ts).getPrecisionStep());
TokenStream ts = legacyIntField.tokenStream(null, null);
assertTrue(ts instanceof LegacyNumericTokenStream);
assertEquals(LegacyNumericUtils.PRECISION_STEP_DEFAULT_32, ((LegacyNumericTokenStream)ts).getPrecisionStep());
assertNumericContents(5, ts);
// now reuse previous stream
intField = new IntField("foo", 20, Field.Store.NO);
TokenStream ts2 = intField.tokenStream(null, ts);
legacyIntField = new LegacyIntField("foo", 20, Field.Store.NO);
TokenStream ts2 = legacyIntField.tokenStream(null, ts);
assertSame(ts, ts2);
assertNumericContents(20, ts);
// pass a bogus stream and ensure it's still ok
intField = new IntField("foo", 2343, Field.Store.NO);
legacyIntField = new LegacyIntField("foo", 2343, Field.Store.NO);
TokenStream bogus = new CannedTokenStream(new Token("bogus", 0, 5));
ts = intField.tokenStream(null, bogus);
ts = legacyIntField.tokenStream(null, bogus);
assertNotSame(bogus, ts);
assertNumericContents(2343, ts);
// pass another bogus stream (numeric, but different precision step!)
intField = new IntField("foo", 42, Field.Store.NO);
assert 3 != NumericUtils.PRECISION_STEP_DEFAULT;
bogus = new NumericTokenStream(3);
ts = intField.tokenStream(null, bogus);
legacyIntField = new LegacyIntField("foo", 42, Field.Store.NO);
assert 3 != LegacyNumericUtils.PRECISION_STEP_DEFAULT;
bogus = new LegacyNumericTokenStream(3);
ts = legacyIntField.tokenStream(null, bogus);
assertNotSame(bogus, ts);
assertNumericContents(42, ts);
}
@ -161,8 +161,8 @@ public class TestFieldReuse extends BaseTokenStreamTestCase {
}
private void assertNumericContents(int value, TokenStream ts) throws IOException {
assertTrue(ts instanceof NumericTokenStream);
NumericTermAttribute numericAtt = ts.getAttribute(NumericTermAttribute.class);
assertTrue(ts instanceof LegacyNumericTokenStream);
LegacyNumericTermAttribute numericAtt = ts.getAttribute(LegacyNumericTermAttribute.class);
ts.reset();
boolean seen = false;
while (ts.incrementToken()) {

View File

@ -33,7 +33,6 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
@ -239,7 +238,6 @@ public class TestPostingsOffsets extends LuceneTestCase {
for(int docCount=0;docCount<numDocs;docCount++) {
Document doc = new Document();
doc.add(new IntField("id", docCount, Field.Store.YES));
doc.add(new NumericDocValuesField("id", docCount));
List<Token> tokens = new ArrayList<>();
final int numTokens = atLeast(100);

View File

@ -17,20 +17,18 @@ package org.apache.lucene.index;
* limitations under the License.
*/
import java.util.*;
import org.apache.lucene.analysis.CannedBinaryTokenStream;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
public class TestTerms extends LuceneTestCase {
@ -92,8 +90,8 @@ public class TestTerms extends LuceneTestCase {
}
public void testEmptyIntFieldMinMax() throws Exception {
assertNull(NumericUtils.getMinInt(EMPTY_TERMS));
assertNull(NumericUtils.getMaxInt(EMPTY_TERMS));
assertNull(LegacyNumericUtils.getMinInt(EMPTY_TERMS));
assertNull(LegacyNumericUtils.getMaxInt(EMPTY_TERMS));
}
public void testIntFieldMinMax() throws Exception {
@ -107,14 +105,14 @@ public class TestTerms extends LuceneTestCase {
int num = random().nextInt();
minValue = Math.min(num, minValue);
maxValue = Math.max(num, maxValue);
doc.add(new IntField("field", num, Field.Store.NO));
doc.add(new LegacyIntField("field", num, Field.Store.NO));
w.addDocument(doc);
}
IndexReader r = w.getReader();
Terms terms = MultiFields.getTerms(r, "field");
assertEquals(new Integer(minValue), NumericUtils.getMinInt(terms));
assertEquals(new Integer(maxValue), NumericUtils.getMaxInt(terms));
assertEquals(new Integer(minValue), LegacyNumericUtils.getMinInt(terms));
assertEquals(new Integer(maxValue), LegacyNumericUtils.getMaxInt(terms));
r.close();
w.close();
@ -122,8 +120,8 @@ public class TestTerms extends LuceneTestCase {
}
public void testEmptyLongFieldMinMax() throws Exception {
assertNull(NumericUtils.getMinLong(EMPTY_TERMS));
assertNull(NumericUtils.getMaxLong(EMPTY_TERMS));
assertNull(LegacyNumericUtils.getMinLong(EMPTY_TERMS));
assertNull(LegacyNumericUtils.getMaxLong(EMPTY_TERMS));
}
public void testLongFieldMinMax() throws Exception {
@ -137,15 +135,15 @@ public class TestTerms extends LuceneTestCase {
long num = random().nextLong();
minValue = Math.min(num, minValue);
maxValue = Math.max(num, maxValue);
doc.add(new LongField("field", num, Field.Store.NO));
doc.add(new LegacyLongField("field", num, Field.Store.NO));
w.addDocument(doc);
}
IndexReader r = w.getReader();
Terms terms = MultiFields.getTerms(r, "field");
assertEquals(new Long(minValue), NumericUtils.getMinLong(terms));
assertEquals(new Long(maxValue), NumericUtils.getMaxLong(terms));
assertEquals(new Long(minValue), LegacyNumericUtils.getMinLong(terms));
assertEquals(new Long(maxValue), LegacyNumericUtils.getMaxLong(terms));
r.close();
w.close();
@ -163,14 +161,14 @@ public class TestTerms extends LuceneTestCase {
float num = random().nextFloat();
minValue = Math.min(num, minValue);
maxValue = Math.max(num, maxValue);
doc.add(new FloatField("field", num, Field.Store.NO));
doc.add(new LegacyFloatField("field", num, Field.Store.NO));
w.addDocument(doc);
}
IndexReader r = w.getReader();
Terms terms = MultiFields.getTerms(r, "field");
assertEquals(minValue, NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)), 0.0f);
assertEquals(maxValue, NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)), 0.0f);
assertEquals(minValue, LegacyNumericUtils.sortableIntToFloat(LegacyNumericUtils.getMinInt(terms)), 0.0f);
assertEquals(maxValue, LegacyNumericUtils.sortableIntToFloat(LegacyNumericUtils.getMaxInt(terms)), 0.0f);
r.close();
w.close();
@ -188,7 +186,7 @@ public class TestTerms extends LuceneTestCase {
double num = random().nextDouble();
minValue = Math.min(num, minValue);
maxValue = Math.max(num, maxValue);
doc.add(new DoubleField("field", num, Field.Store.NO));
doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
w.addDocument(doc);
}
@ -196,8 +194,8 @@ public class TestTerms extends LuceneTestCase {
Terms terms = MultiFields.getTerms(r, "field");
assertEquals(minValue, NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)), 0.0);
assertEquals(maxValue, NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)), 0.0);
assertEquals(minValue, LegacyNumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
assertEquals(maxValue, LegacyNumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);
r.close();
w.close();

View File

@ -23,7 +23,6 @@ import java.util.*;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.store.Directory;
@ -158,7 +157,6 @@ public class TestTermsEnum extends LuceneTestCase {
private void addDoc(RandomIndexWriter w, Collection<String> terms, Map<BytesRef,Integer> termToID, int id) throws IOException {
Document doc = new Document();
doc.add(new IntField("id", id, Field.Store.YES));
doc.add(new NumericDocValuesField("id", id));
if (VERBOSE) {
System.out.println("TEST: addDoc id:" + id + " terms=" + terms);

View File

@ -21,13 +21,16 @@ import java.io.IOException;
import java.util.Random;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.DimensionalDoubleField;
import org.apache.lucene.document.DimensionalFloatField;
import org.apache.lucene.document.DimensionalIntField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.IndexReader;
@ -124,13 +127,13 @@ public class BaseTestRangeFilter extends LuceneTestCase {
Document doc = new Document();
Field idField = newStringField(random, "id", "", Field.Store.YES);
Field idDVField = new SortedDocValuesField("id", new BytesRef());
Field intIdField = new IntField("id_int", 0, Store.YES);
Field intIdField = new DimensionalIntField("id_int", 0);
Field intDVField = new NumericDocValuesField("id_int", 0);
Field floatIdField = new FloatField("id_float", 0, Store.YES);
Field floatIdField = new DimensionalFloatField("id_float", 0);
Field floatDVField = new NumericDocValuesField("id_float", 0);
Field longIdField = new LongField("id_long", 0, Store.YES);
Field longIdField = new DimensionalLongField("id_long", 0);
Field longDVField = new NumericDocValuesField("id_long", 0);
Field doubleIdField = new DoubleField("id_double", 0, Store.YES);
Field doubleIdField = new DimensionalDoubleField("id_double", 0);
Field doubleDVField = new NumericDocValuesField("id_double", 0);
Field randField = newStringField(random, "rand", "", Field.Store.YES);
Field randDVField = new SortedDocValuesField("rand", new BytesRef());

View File

@ -33,11 +33,13 @@ import org.apache.lucene.codecs.DimensionalWriter;
import org.apache.lucene.codecs.FilterCodec;
import org.apache.lucene.codecs.lucene60.Lucene60DimensionalReader;
import org.apache.lucene.codecs.lucene60.Lucene60DimensionalWriter;
import org.apache.lucene.document.DimensionalField;
import org.apache.lucene.document.DimensionalBinaryField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.index.DimensionalValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
@ -52,8 +54,8 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.bkd.BKDUtil;
import org.junit.BeforeClass;
public class TestDimensionalRangeQuery extends LuceneTestCase {
@ -149,8 +151,8 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
for(int i=0;i<10000;i++) {
long v = random().nextLong();
byte[] tmp = new byte[8];
BKDUtil.longToBytes(v, tmp, 0);
long v2 = BKDUtil.bytesToLong(tmp, 0);
NumericUtils.longToBytes(v, tmp, 0);
long v2 = NumericUtils.bytesToLong(tmp, 0);
assertEquals("got bytes=" + Arrays.toString(tmp), v, v2);
}
}
@ -221,10 +223,10 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
}
if (missing.get(id) == false) {
doc.add(new DimensionalField("sn_value", values[id]));
doc.add(new DimensionalLongField("sn_value", values[id]));
byte[] bytes = new byte[8];
BKDUtil.longToBytes(values[id], bytes, 0);
doc.add(new DimensionalField("ss_value", bytes));
NumericUtils.longToBytes(values[id], bytes, 0);
doc.add(new DimensionalBinaryField("ss_value", bytes));
}
}
@ -287,33 +289,33 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
System.out.println("\n" + Thread.currentThread().getName() + ": TEST: iter=" + iter + " value=" + lower + " (inclusive?=" + includeLower + ") TO " + upper + " (inclusive?=" + includeUpper + ")");
byte[] tmp = new byte[8];
if (lower != null) {
BKDUtil.longToBytes(lower, tmp, 0);
NumericUtils.longToBytes(lower, tmp, 0);
System.out.println(" lower bytes=" + Arrays.toString(tmp));
}
if (upper != null) {
BKDUtil.longToBytes(upper, tmp, 0);
NumericUtils.longToBytes(upper, tmp, 0);
System.out.println(" upper bytes=" + Arrays.toString(tmp));
}
}
if (random().nextBoolean()) {
query = new DimensionalRangeQuery("sn_value", lower, includeLower, upper, includeUpper);
query = DimensionalRangeQuery.new1DLongRange("sn_value", lower, includeLower, upper, includeUpper);
} else {
byte[] lowerBytes;
if (lower == null) {
lowerBytes = null;
} else {
lowerBytes = new byte[8];
BKDUtil.longToBytes(lower, lowerBytes, 0);
NumericUtils.longToBytes(lower, lowerBytes, 0);
}
byte[] upperBytes;
if (upper == null) {
upperBytes = null;
} else {
upperBytes = new byte[8];
BKDUtil.longToBytes(upper, upperBytes, 0);
NumericUtils.longToBytes(upper, upperBytes, 0);
}
query = new DimensionalRangeQuery("ss_value", lowerBytes, includeLower, upperBytes, includeUpper);
query = DimensionalRangeQuery.new1DBinaryRange("ss_value", lowerBytes, includeLower, upperBytes, includeUpper);
}
if (VERBOSE) {
@ -382,8 +384,8 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
private void doTestRandomBinary(int count) throws Exception {
int numValues = TestUtil.nextInt(random(), count, count*2);
int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
int numDims = TestUtil.nextInt(random(), 1, 5);
int numBytesPerDim = TestUtil.nextInt(random(), 2, DimensionalValues.MAX_NUM_BYTES);
int numDims = TestUtil.nextInt(random(), 1, DimensionalValues.MAX_DIMENSIONS);
int sameValuePct = random().nextInt(100);
@ -485,7 +487,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
}
if (missing.get(id) == false) {
doc.add(new DimensionalField("value", docValues[ord]));
doc.add(new DimensionalBinaryField("value", docValues[ord]));
if (VERBOSE) {
System.out.println("id=" + id);
for(int dim=0;dim<numDims;dim++) {
@ -558,7 +560,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
// open-ended on the upper bound
}
if (lower[dim] != null && upper[dim] != null && BKDUtil.compare(bytesPerDim, lower[dim], 0, upper[dim], 0) > 0) {
if (lower[dim] != null && upper[dim] != null && NumericUtils.compare(bytesPerDim, lower[dim], 0, upper[dim], 0) > 0) {
byte[] x = lower[dim];
lower[dim] = upper[dim];
upper[dim] = x;
@ -677,7 +679,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
if (lower[dim] == null) {
cmp = 1;
} else {
cmp = BKDUtil.compare(bytesPerDim, value[dim], 0, lower[dim], 0);
cmp = NumericUtils.compare(bytesPerDim, value[dim], 0, lower[dim], 0);
}
if (cmp < 0 || (cmp == 0 && includeLower[dim] == false)) {
@ -688,7 +690,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
if (upper[dim] == null) {
cmp = -1;
} else {
cmp = BKDUtil.compare(bytesPerDim, value[dim], 0, upper[dim], 0);
cmp = NumericUtils.compare(bytesPerDim, value[dim], 0, upper[dim], 0);
}
if (cmp > 0 || (cmp == 0 && includeUpper[dim] == false)) {
@ -718,20 +720,20 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", Long.MIN_VALUE));
doc.add(new DimensionalLongField("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("value", Long.MAX_VALUE));
doc.add(new DimensionalLongField("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
assertEquals(1, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, 0L, true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value", 0L, true, Long.MAX_VALUE, true)));
assertEquals(2, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, 0L, true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", 0L, true, Long.MAX_VALUE, true)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
IOUtils.close(r, w, dir);
}
@ -757,61 +759,61 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", toUTF8("abc")));
doc.add(new DimensionalBinaryField("value", toUTF8("abc")));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("value", toUTF8("def")));
doc.add(new DimensionalBinaryField("value", toUTF8("def")));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
assertEquals(1, s.count(new DimensionalRangeQuery("value",
toUTF8("aaa"),
true,
toUTF8("bbb"),
true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value",
toUTF8("c", 3),
true,
toUTF8("e", 3),
true)));
assertEquals(2, s.count(new DimensionalRangeQuery("value",
toUTF8("a", 3),
true,
toUTF8("z", 3),
true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value",
null,
true,
toUTF8("abc"),
true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value",
toUTF8("a", 3),
true,
toUTF8("abc"),
true)));
assertEquals(0, s.count(new DimensionalRangeQuery("value",
toUTF8("a", 3),
true,
toUTF8("abc"),
false)));
assertEquals(1, s.count(new DimensionalRangeQuery("value",
toUTF8("def"),
true,
null,
false)));
assertEquals(1, s.count(new DimensionalRangeQuery("value",
toUTF8(("def")),
true,
toUTF8("z", 3),
true)));
assertEquals(0, s.count(new DimensionalRangeQuery("value",
toUTF8("def"),
false,
toUTF8("z", 3),
true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("aaa"),
true,
toUTF8("bbb"),
true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("c", 3),
true,
toUTF8("e", 3),
true)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("a", 3),
true,
toUTF8("z", 3),
true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
null,
true,
toUTF8("abc"),
true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("a", 3),
true,
toUTF8("abc"),
true)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("a", 3),
true,
toUTF8("abc"),
false)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("def"),
true,
null,
false)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8(("def")),
true,
toUTF8("z", 3),
true)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DBinaryRange("value",
toUTF8("def"),
false,
toUTF8("z", 3),
true)));
IOUtils.close(r, w, dir);
}
@ -822,22 +824,22 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", Long.MIN_VALUE));
doc.add(new DimensionalLongField("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("value", Long.MAX_VALUE));
doc.add(new DimensionalLongField("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
assertEquals(2, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, Long.MAX_VALUE, false)));
assertEquals(1, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, false, Long.MAX_VALUE, true)));
assertEquals(0, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, false, Long.MAX_VALUE, false)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, Long.MAX_VALUE, false)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, false, Long.MAX_VALUE, true)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, false, Long.MAX_VALUE, false)));
assertEquals(2, s.count(new DimensionalRangeQuery("value", (byte[]) null, true, null, true)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DBinaryRange("value", (byte[]) null, true, null, true)));
IOUtils.close(r, w, dir);
}
@ -848,10 +850,10 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", Long.MIN_VALUE));
doc.add(new DimensionalLongField("value", Long.MIN_VALUE));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("value", Long.MAX_VALUE));
doc.add(new DimensionalLongField("value", Long.MAX_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
@ -859,12 +861,12 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
// We can't wrap with "exotic" readers because the query must see the RangeTreeDVFormat:
IndexSearcher s = newSearcher(r, false);
assertEquals(2, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
assertEquals(1, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, true, Long.MAX_VALUE, false)));
assertEquals(1, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, false, Long.MAX_VALUE, true)));
assertEquals(0, s.count(new DimensionalRangeQuery("value", Long.MIN_VALUE, false, Long.MAX_VALUE, false)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, Long.MAX_VALUE, true)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, true, Long.MAX_VALUE, false)));
assertEquals(1, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, false, Long.MAX_VALUE, true)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DLongRange("value", Long.MIN_VALUE, false, Long.MAX_VALUE, false)));
assertEquals(2, s.count(new DimensionalRangeQuery("value", (Long) null, true, null, true)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DLongRange("value", (Long) null, true, null, true)));
IOUtils.close(r, w, dir);
}
@ -875,18 +877,18 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", toUTF8("a")));
doc.add(new DimensionalBinaryField("value", toUTF8("a")));
w.addDocument(doc);
doc = new Document();
doc.add(new DimensionalField("value", toUTF8("z")));
doc.add(new DimensionalBinaryField("value", toUTF8("z")));
w.addDocument(doc);
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
assertEquals(0, s.count(new DimensionalRangeQuery("value", toUTF8("m"), true, toUTF8("n"), false)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DBinaryRange("value", toUTF8("m"), true, toUTF8("n"), false)));
assertEquals(2, s.count(new DimensionalRangeQuery("value", (byte[]) null, true, null, true)));
assertEquals(2, s.count(DimensionalRangeQuery.new1DBinaryRange("value", (byte[]) null, true, null, true)));
IOUtils.close(r, w, dir);
}
@ -906,7 +908,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
IndexReader r = w.getReader();
IndexSearcher s = new IndexSearcher(r);
assertEquals(0, s.count(new DimensionalRangeQuery("value", 17L, true, 13L, false)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DLongRange("value", 17L, true, 13L, false)));
IOUtils.close(r, w, dir);
}
@ -921,7 +923,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
IndexReader r = w.getReader();
IndexSearcher s = newSearcher(r);
assertEquals(0, s.count(new DimensionalRangeQuery("value", 17L, true, 13L, false)));
assertEquals(0, s.count(DimensionalRangeQuery.new1DLongRange("value", 17L, true, 13L, false)));
IOUtils.close(r, w, dir);
}
@ -932,7 +934,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", Long.MIN_VALUE));
doc.add(new DimensionalLongField("value", Long.MIN_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();
@ -955,7 +957,7 @@ public class TestDimensionalRangeQuery extends LuceneTestCase {
iwc.setCodec(getCodec());
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
doc.add(new DimensionalField("value", Long.MIN_VALUE));
doc.add(new DimensionalLongField("value", Long.MIN_VALUE));
w.addDocument(doc);
IndexReader r = w.getReader();

View File

@ -30,7 +30,7 @@ import java.util.concurrent.CountDownLatch;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
@ -109,7 +109,7 @@ public class TestLiveFieldValues extends LuceneTestCase {
String id = String.format(Locale.ROOT, "%d_%04x", threadID, threadRandom.nextInt(idCount));
Integer field = threadRandom.nextInt(Integer.MAX_VALUE);
doc.add(newStringField("id", new BytesRef(id), Field.Store.YES));
doc.add(new IntField("field", field.intValue(), Field.Store.YES));
doc.add(new StoredField("field", field.intValue()));
w.updateDocument(new Term("id", id), doc);
rt.add(id, field);
if (values.put(id, field) == null) {

View File

@ -24,7 +24,7 @@ import java.text.DecimalFormatSymbols;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.store.Directory;
@ -33,9 +33,9 @@ import org.apache.lucene.util.TestUtil;
public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
/** Tests NumericRangeQuery on a multi-valued field (multiple numeric values per document).
/** Tests LegacyNumericRangeQuery on a multi-valued field (multiple numeric values per document).
* This test ensures, that a classical TermRangeQuery returns exactly the same document numbers as
* NumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
* LegacyNumericRangeQuery (see SOLR-1322 for discussion) and the multiple precision terms per numeric value
* do not interfere with multiple numeric values.
*/
public void testMultiValuedNRQ() throws Exception {
@ -52,7 +52,7 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
for (int m=0, c=random().nextInt(10); m<=c; m++) {
int value = random().nextInt(Integer.MAX_VALUE);
doc.add(newStringField("asc", format.format(value), Field.Store.NO));
doc.add(new IntField("trie", value, Field.Store.NO));
doc.add(new LegacyIntField("trie", value, Field.Store.NO));
}
writer.addDocument(doc);
}
@ -68,10 +68,10 @@ public class TestMultiValuedNumericRangeQuery extends LuceneTestCase {
int a=lower; lower=upper; upper=a;
}
TermRangeQuery cq=TermRangeQuery.newStringRange("asc", format.format(lower), format.format(upper), true, true);
NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange("trie", lower, upper, true, true);
LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange("trie", lower, upper, true, true);
TopDocs trTopDocs = searcher.search(cq, 1);
TopDocs nrTopDocs = searcher.search(tq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", trTopDocs.totalHits, nrTopDocs.totalHits );
}
reader.close();
directory.close();

View File

@ -21,23 +21,21 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestNumericUtils; // NaN arrays
import org.apache.lucene.util.TestLegacyNumericUtils; // NaN arrays
import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -65,7 +63,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
.setMergePolicy(newLogMergePolicy()));
final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
final FieldType storedInt = new FieldType(LegacyIntField.TYPE_NOT_STORED);
storedInt.setStored(true);
storedInt.freeze();
@ -81,7 +79,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
final FieldType storedIntNone = new FieldType(storedInt);
storedIntNone.setNumericPrecisionStep(Integer.MAX_VALUE);
final FieldType unstoredInt = IntField.TYPE_NOT_STORED;
final FieldType unstoredInt = LegacyIntField.TYPE_NOT_STORED;
final FieldType unstoredInt8 = new FieldType(unstoredInt);
unstoredInt8.setNumericPrecisionStep(8);
@ -92,14 +90,14 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
final FieldType unstoredInt2 = new FieldType(unstoredInt);
unstoredInt2.setNumericPrecisionStep(2);
IntField
field8 = new IntField("field8", 0, storedInt8),
field4 = new IntField("field4", 0, storedInt4),
field2 = new IntField("field2", 0, storedInt2),
fieldNoTrie = new IntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
ascfield8 = new IntField("ascfield8", 0, unstoredInt8),
ascfield4 = new IntField("ascfield4", 0, unstoredInt4),
ascfield2 = new IntField("ascfield2", 0, unstoredInt2);
LegacyIntField
field8 = new LegacyIntField("field8", 0, storedInt8),
field4 = new LegacyIntField("field4", 0, storedInt4),
field2 = new LegacyIntField("field2", 0, storedInt2),
fieldNoTrie = new LegacyIntField("field"+Integer.MAX_VALUE, 0, storedIntNone),
ascfield8 = new LegacyIntField("ascfield8", 0, unstoredInt8),
ascfield4 = new LegacyIntField("ascfield4", 0, unstoredInt4),
ascfield2 = new LegacyIntField("ascfield2", 0, unstoredInt2);
Document doc = new Document();
// add fields, that have a distance to test general functionality
@ -149,7 +147,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
for (byte i=0; i<2; i++) {
TopDocs topDocs;
String type;
@ -194,7 +192,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
@Test
public void testOneMatchQuery() throws Exception {
NumericRangeQuery<Integer> q = NumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
LegacyNumericRangeQuery<Integer> q = LegacyNumericRangeQuery.newIntRange("ascfield8", 8, 1000, 1000, true, true);
TopDocs topDocs = searcher.search(q, noDocs);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -205,7 +203,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
int upper=(count-1)*distance + (distance/3) + startOffset;
NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true);
TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -215,7 +213,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
q=NumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -245,7 +243,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
int lower=(count-1)*distance + (distance/3) +startOffset;
NumericRangeQuery<Integer> q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, true);
TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -255,7 +253,7 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().intValue());
q=NumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, null, true, false);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -287,23 +285,23 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
newIndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new FloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
doc.add(new IntField("int", Integer.MIN_VALUE, Field.Store.NO));
doc.add(new LegacyFloatField("float", Float.NEGATIVE_INFINITY, Field.Store.NO));
doc.add(new LegacyIntField("int", Integer.MIN_VALUE, Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
doc.add(new IntField("int", Integer.MAX_VALUE, Field.Store.NO));
doc.add(new LegacyFloatField("float", Float.POSITIVE_INFINITY, Field.Store.NO));
doc.add(new LegacyIntField("int", Integer.MAX_VALUE, Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("float", 0.0f, Field.Store.NO));
doc.add(new IntField("int", 0, Field.Store.NO));
doc.add(new LegacyFloatField("float", 0.0f, Field.Store.NO));
doc.add(new LegacyIntField("int", 0, Field.Store.NO));
writer.addDocument(doc);
for (float f : TestNumericUtils.FLOAT_NANs) {
for (float f : TestLegacyNumericUtils.FLOAT_NANs) {
doc = new Document();
doc.add(new FloatField("float", f, Field.Store.NO));
doc.add(new LegacyFloatField("float", f, Field.Store.NO));
writer.addDocument(doc);
}
@ -312,41 +310,41 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = newSearcher(r);
Query q=NumericRangeQuery.newIntRange("int", null, null, true, true);
Query q= LegacyNumericRangeQuery.newIntRange("int", null, null, true, true);
TopDocs topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newIntRange("int", null, null, false, false);
q= LegacyNumericRangeQuery.newIntRange("int", null, null, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
q= LegacyNumericRangeQuery.newIntRange("int", Integer.MIN_VALUE, Integer.MAX_VALUE, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 1, topDocs.scoreDocs.length );
q=NumericRangeQuery.newFloatRange("float", null, null, true, true);
q= LegacyNumericRangeQuery.newFloatRange("float", null, null, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newFloatRange("float", null, null, false, false);
q= LegacyNumericRangeQuery.newFloatRange("float", null, null, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
q= LegacyNumericRangeQuery.newFloatRange("float", Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 1, topDocs.scoreDocs.length );
q=NumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
q= LegacyNumericRangeQuery.newFloatRange("float", Float.NaN, Float.NaN, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", TestNumericUtils.FLOAT_NANs.length, topDocs.scoreDocs.length );
assertEquals("Score doc count", TestLegacyNumericUtils.FLOAT_NANs.length, topDocs.scoreDocs.length );
r.close();
dir.close();
@ -364,44 +362,44 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
}
final BytesRef lowerBytes, upperBytes;
BytesRefBuilder b = new BytesRefBuilder();
NumericUtils.intToPrefixCodedBytes(lower, 0, b);
LegacyNumericUtils.intToPrefixCodedBytes(lower, 0, b);
lowerBytes = b.toBytesRef();
NumericUtils.intToPrefixCodedBytes(upper, 0, b);
LegacyNumericUtils.intToPrefixCodedBytes(upper, 0, b);
upperBytes = b.toBytesRef();
// test inclusive range
NumericRangeQuery<Integer> tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
LegacyNumericRangeQuery<Integer> tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
TopDocs cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test left exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test right exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
@ -421,13 +419,13 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
int lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
// test empty enum
assert lower < upper;
assertTrue(0 < countTerms(NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(NumericRangeQuery.newIntRange("field4", 4, upper, lower, true, true)));
assertTrue(0 < countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, upper, lower, true, true)));
// test empty enum outside of bounds
lower = distance*noDocs+startOffset;
upper = 2 * lower;
assert lower < upper;
assertEquals(0, countTerms(NumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(LegacyNumericRangeQuery.newIntRange("field4", 4, lower, upper, true, true)));
}
private int countTerms(MultiTermQuery q) throws Exception {
@ -489,19 +487,19 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
int a=lower; lower=upper; upper=a;
}
// test inclusive range
Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
// test exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, false);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
// test left exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, false, true);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
// test right exclusive range
tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, false);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
}
@ -522,13 +520,13 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
testRangeSplit(2);
}
/** we fake a float test using int2float conversion of NumericUtils */
/** we fake a float test using int2float conversion of LegacyNumericUtils */
private void testFloatRange(int precisionStep) throws Exception {
final String field="ascfield"+precisionStep;
final int lower=-1000, upper=+2000;
Query tq=NumericRangeQuery.newFloatRange(field, precisionStep,
NumericUtils.sortableIntToFloat(lower), NumericUtils.sortableIntToFloat(upper), true, true);
Query tq= LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
LegacyNumericUtils.sortableIntToFloat(lower), LegacyNumericUtils.sortableIntToFloat(upper), true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
}
@ -550,40 +548,40 @@ public class TestNumericRangeQuery32 extends LuceneTestCase {
@Test
public void testEqualsAndHash() throws Exception {
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test1", 4, 10, 20, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test2", 4, 10, 20, false, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test3", 4, 10, 20, true, false));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test4", 4, 10, 20, false, false));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test5", 4, 10, null, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test6", 4, null, 20, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newIntRange("test7", 4, null, null, true, true));
QueryUtils.checkEqual(
NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
NumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newIntRange("test8", 4, 10, 20, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
NumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
LegacyNumericRangeQuery.newIntRange("test9", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newIntRange("test9", 8, 10, 20, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
NumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
LegacyNumericRangeQuery.newIntRange("test10a", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newIntRange("test10b", 4, 10, 20, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
NumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
LegacyNumericRangeQuery.newIntRange("test11", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newIntRange("test11", 4, 20, 10, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
NumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newIntRange("test12", 4, 10, 20, false, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
LegacyNumericRangeQuery.newIntRange("test13", 4, 10, 20, true, true),
LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
);
// the following produces a hash collision, because Long and Integer have the same hashcode, so only test equality:
Query q1 = NumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
Query q2 = NumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
Query q1 = LegacyNumericRangeQuery.newIntRange("test14", 4, 10, 20, true, true);
Query q2 = LegacyNumericRangeQuery.newLongRange("test14", 4, 10L, 20L, true, true);
assertFalse(q1.equals(q2));
assertFalse(q2.equals(q1));
}

View File

@ -19,25 +19,23 @@ package org.apache.lucene.search;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.LongField;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiFields;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestNumericUtils; // NaN arrays
import org.apache.lucene.util.TestLegacyNumericUtils;
import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -65,7 +63,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
.setMergePolicy(newLogMergePolicy()));
final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
final FieldType storedLong = new FieldType(LegacyLongField.TYPE_NOT_STORED);
storedLong.setStored(true);
storedLong.freeze();
@ -84,7 +82,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
final FieldType storedLongNone = new FieldType(storedLong);
storedLongNone.setNumericPrecisionStep(Integer.MAX_VALUE);
final FieldType unstoredLong = LongField.TYPE_NOT_STORED;
final FieldType unstoredLong = LegacyLongField.TYPE_NOT_STORED;
final FieldType unstoredLong8 = new FieldType(unstoredLong);
unstoredLong8.setNumericPrecisionStep(8);
@ -98,16 +96,16 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
final FieldType unstoredLong2 = new FieldType(unstoredLong);
unstoredLong2.setNumericPrecisionStep(2);
LongField
field8 = new LongField("field8", 0L, storedLong8),
field6 = new LongField("field6", 0L, storedLong6),
field4 = new LongField("field4", 0L, storedLong4),
field2 = new LongField("field2", 0L, storedLong2),
fieldNoTrie = new LongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
ascfield8 = new LongField("ascfield8", 0L, unstoredLong8),
ascfield6 = new LongField("ascfield6", 0L, unstoredLong6),
ascfield4 = new LongField("ascfield4", 0L, unstoredLong4),
ascfield2 = new LongField("ascfield2", 0L, unstoredLong2);
LegacyLongField
field8 = new LegacyLongField("field8", 0L, storedLong8),
field6 = new LegacyLongField("field6", 0L, storedLong6),
field4 = new LegacyLongField("field4", 0L, storedLong4),
field2 = new LegacyLongField("field2", 0L, storedLong2),
fieldNoTrie = new LegacyLongField("field"+Integer.MAX_VALUE, 0L, storedLongNone),
ascfield8 = new LegacyLongField("ascfield8", 0L, unstoredLong8),
ascfield6 = new LegacyLongField("ascfield6", 0L, unstoredLong6),
ascfield4 = new LegacyLongField("ascfield4", 0L, unstoredLong4),
ascfield2 = new LegacyLongField("ascfield2", 0L, unstoredLong2);
Document doc = new Document();
// add fields, that have a distance to test general functionality
@ -158,7 +156,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
for (byte i=0; i<2; i++) {
TopDocs topDocs;
String type;
@ -208,7 +206,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
@Test
public void testOneMatchQuery() throws Exception {
NumericRangeQuery<Long> q = NumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange("ascfield8", 8, 1000L, 1000L, true, true);
TopDocs topDocs = searcher.search(q, noDocs);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -219,7 +217,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
long upper=(count-1)*distance + (distance/3) + startOffset;
NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true);
TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -229,7 +227,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
q=NumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -264,7 +262,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
String field="field"+precisionStep;
int count=3000;
long lower=(count-1)*distance + (distance/3) +startOffset;
NumericRangeQuery<Long> q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true);
TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
ScoreDoc[] sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -274,7 +272,7 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
doc=searcher.doc(sd[sd.length-1].doc);
assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() );
q=NumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false);
topDocs = searcher.search(q, noDocs, Sort.INDEXORDER);
sd = topDocs.scoreDocs;
assertNotNull(sd);
@ -311,23 +309,23 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random(), dir,
newIndexWriterConfig(new MockAnalyzer(random())));
Document doc = new Document();
doc.add(new DoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
doc.add(new LongField("long", Long.MIN_VALUE, Field.Store.NO));
doc.add(new LegacyDoubleField("double", Double.NEGATIVE_INFINITY, Field.Store.NO));
doc.add(new LegacyLongField("long", Long.MIN_VALUE, Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
doc.add(new LongField("long", Long.MAX_VALUE, Field.Store.NO));
doc.add(new LegacyDoubleField("double", Double.POSITIVE_INFINITY, Field.Store.NO));
doc.add(new LegacyLongField("long", Long.MAX_VALUE, Field.Store.NO));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("double", 0.0, Field.Store.NO));
doc.add(new LongField("long", 0L, Field.Store.NO));
doc.add(new LegacyDoubleField("double", 0.0, Field.Store.NO));
doc.add(new LegacyLongField("long", 0L, Field.Store.NO));
writer.addDocument(doc);
for (double d : TestNumericUtils.DOUBLE_NANs) {
for (double d : TestLegacyNumericUtils.DOUBLE_NANs) {
doc = new Document();
doc.add(new DoubleField("double", d, Field.Store.NO));
doc.add(new LegacyDoubleField("double", d, Field.Store.NO));
writer.addDocument(doc);
}
@ -336,41 +334,41 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
IndexReader r = DirectoryReader.open(dir);
IndexSearcher s = newSearcher(r);
Query q=NumericRangeQuery.newLongRange("long", null, null, true, true);
Query q= LegacyNumericRangeQuery.newLongRange("long", null, null, true, true);
TopDocs topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newLongRange("long", null, null, false, false);
q= LegacyNumericRangeQuery.newLongRange("long", null, null, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
q= LegacyNumericRangeQuery.newLongRange("long", Long.MIN_VALUE, Long.MAX_VALUE, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 1, topDocs.scoreDocs.length );
q=NumericRangeQuery.newDoubleRange("double", null, null, true, true);
q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newDoubleRange("double", null, null, false, false);
q= LegacyNumericRangeQuery.newDoubleRange("double", null, null, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 3, topDocs.scoreDocs.length );
q=NumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, false, false);
topDocs = s.search(q, 10);
assertEquals("Score doc count", 1, topDocs.scoreDocs.length );
q=NumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
q= LegacyNumericRangeQuery.newDoubleRange("double", Double.NaN, Double.NaN, true, true);
topDocs = s.search(q, 10);
assertEquals("Score doc count", TestNumericUtils.DOUBLE_NANs.length, topDocs.scoreDocs.length );
assertEquals("Score doc count", TestLegacyNumericUtils.DOUBLE_NANs.length, topDocs.scoreDocs.length );
r.close();
dir.close();
@ -388,44 +386,44 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
}
final BytesRef lowerBytes, upperBytes;
BytesRefBuilder b = new BytesRefBuilder();
NumericUtils.longToPrefixCodedBytes(lower, 0, b);
LegacyNumericUtils.longToPrefixCodedBytes(lower, 0, b);
lowerBytes = b.toBytesRef();
NumericUtils.longToPrefixCodedBytes(upper, 0, b);
LegacyNumericUtils.longToPrefixCodedBytes(upper, 0, b);
upperBytes = b.toBytesRef();
// test inclusive range
NumericRangeQuery<Long> tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
LegacyNumericRangeQuery<Long> tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
TermRangeQuery cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
TopDocs cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, false);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test left exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, false, true);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
// test right exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
cq=new TermRangeQuery(field, lowerBytes, upperBytes, true, false);
tTopDocs = searcher.search(tq, 1);
cTopDocs = searcher.search(cq, 1);
assertEquals("Returned count for NumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
assertEquals("Returned count for LegacyNumericRangeQuery and TermRangeQuery must be equal", cTopDocs.totalHits, tTopDocs.totalHits );
totalTermCountT += termCountT = countTerms(tq);
totalTermCountC += termCountC = countTerms(cq);
checkTermCounts(precisionStep, termCountT, termCountC);
@ -445,13 +443,13 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3);
// test empty enum
assert lower < upper;
assertTrue(0 < countTerms(NumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(NumericRangeQuery.newLongRange("field4", 4, upper, lower, true, true)));
assertTrue(0 < countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, upper, lower, true, true)));
// test empty enum outside of bounds
lower = distance*noDocs+startOffset;
upper = 2L * lower;
assert lower < upper;
assertEquals(0, countTerms(NumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
assertEquals(0, countTerms(LegacyNumericRangeQuery.newLongRange("field4", 4, lower, upper, true, true)));
}
private int countTerms(MultiTermQuery q) throws Exception {
@ -518,19 +516,19 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
long a=lower; lower=upper; upper=a;
}
// test inclusive range
Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
// test exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, false);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to exclusive range length", Math.max(upper-lower-1, 0), tTopDocs.totalHits );
// test left exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, false, true);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
// test right exclusive range
tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, false);
tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to half exclusive range length", upper-lower, tTopDocs.totalHits );
}
@ -556,13 +554,13 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
testRangeSplit(2);
}
/** we fake a double test using long2double conversion of NumericUtils */
/** we fake a double test using long2double conversion of LegacyNumericUtils */
private void testDoubleRange(int precisionStep) throws Exception {
final String field="ascfield"+precisionStep;
final long lower=-1000L, upper=+2000L;
Query tq=NumericRangeQuery.newDoubleRange(field, precisionStep,
NumericUtils.sortableLongToDouble(lower), NumericUtils.sortableLongToDouble(upper), true, true);
Query tq= LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
LegacyNumericUtils.sortableLongToDouble(lower), LegacyNumericUtils.sortableLongToDouble(upper), true, true);
TopDocs tTopDocs = searcher.search(tq, 1);
assertEquals("Returned count of range query must be equal to inclusive range length", upper-lower+1, tTopDocs.totalHits );
}
@ -589,36 +587,36 @@ public class TestNumericRangeQuery64 extends LuceneTestCase {
@Test
public void testEqualsAndHash() throws Exception {
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
QueryUtils.checkHashEquals(NumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test1", 4, 10L, 20L, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test2", 4, 10L, 20L, false, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test3", 4, 10L, 20L, true, false));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test4", 4, 10L, 20L, false, false));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test5", 4, 10L, null, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test6", 4, null, 20L, true, true));
QueryUtils.checkHashEquals(LegacyNumericRangeQuery.newLongRange("test7", 4, null, null, true, true));
QueryUtils.checkEqual(
NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
NumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newLongRange("test8", 4, 10L, 20L, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
NumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
LegacyNumericRangeQuery.newLongRange("test9", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newLongRange("test9", 8, 10L, 20L, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
NumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
LegacyNumericRangeQuery.newLongRange("test10a", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newLongRange("test10b", 4, 10L, 20L, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
NumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
LegacyNumericRangeQuery.newLongRange("test11", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newLongRange("test11", 4, 20L, 10L, true, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
NumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newLongRange("test12", 4, 10L, 20L, false, true)
);
QueryUtils.checkUnequal(
NumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
NumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
LegacyNumericRangeQuery.newLongRange("test13", 4, 10L, 20L, true, true),
LegacyNumericRangeQuery.newFloatRange("test13", 4, 10f, 20f, true, true)
);
// difference to int range is tested in TestNumericRangeQuery32
}

View File

@ -25,8 +25,8 @@ import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
/** Simple tests for SortedNumericSortField */
public class TestSortedNumericSortField extends LuceneTestCase {
@ -223,12 +223,12 @@ public class TestSortedNumericSortField extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(-3f)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.floatToSortableInt(-3f)));
doc.add(newStringField("id", "2", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(-5f)));
doc.add(new SortedNumericDocValuesField("value", NumericUtils.floatToSortableInt(7f)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.floatToSortableInt(-5f)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.floatToSortableInt(7f)));
doc.add(newStringField("id", "1", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();
@ -251,12 +251,12 @@ public class TestSortedNumericSortField extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(-3d)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.doubleToSortableLong(-3d)));
doc.add(newStringField("id", "2", Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(-5d)));
doc.add(new SortedNumericDocValuesField("value", NumericUtils.doubleToSortableLong(7d)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.doubleToSortableLong(-5d)));
doc.add(new SortedNumericDocValuesField("value", LegacyNumericUtils.doubleToSortableLong(7d)));
doc.add(newStringField("id", "1", Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = writer.getReader();

View File

@ -26,7 +26,7 @@ public class TestUsageTrackingFilterCachingPolicy extends LuceneTestCase {
public void testCostlyFilter() {
assertTrue(UsageTrackingQueryCachingPolicy.isCostly(new PrefixQuery(new Term("field", "prefix"))));
assertTrue(UsageTrackingQueryCachingPolicy.isCostly(NumericRangeQuery.newIntRange("intField", 8, 1, 1000, true, true)));
assertTrue(UsageTrackingQueryCachingPolicy.isCostly(DimensionalRangeQuery.new1DIntRange("intField", 1, true, 1000, true)));
assertFalse(UsageTrackingQueryCachingPolicy.isCostly(new TermQuery(new Term("field", "value"))));
}

View File

@ -22,21 +22,21 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.Random;
public class TestNumericUtils extends LuceneTestCase {
public class TestLegacyNumericUtils extends LuceneTestCase {
public void testLongConversionAndOrdering() throws Exception {
// generate a series of encoded longs, each numerical one bigger than the one before
BytesRefBuilder last = new BytesRefBuilder();
BytesRefBuilder act = new BytesRefBuilder();
for (long l=-100000L; l<100000L; l++) {
NumericUtils.longToPrefixCodedBytes(l, 0, act);
LegacyNumericUtils.longToPrefixCodedBytes(l, 0, act);
if (last!=null) {
// test if smaller
assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
}
// test is back and forward conversion works
assertEquals("forward and back conversion should generate same long", l, NumericUtils.prefixCodedToLong(act.get()));
assertEquals("forward and back conversion should generate same long", l, LegacyNumericUtils.prefixCodedToLong(act.get()));
// next step
last.copyBytes(act);
}
@ -47,14 +47,14 @@ public class TestNumericUtils extends LuceneTestCase {
BytesRefBuilder act = new BytesRefBuilder();
BytesRefBuilder last = new BytesRefBuilder();
for (int i=-100000; i<100000; i++) {
NumericUtils.intToPrefixCodedBytes(i, 0, act);
LegacyNumericUtils.intToPrefixCodedBytes(i, 0, act);
if (last!=null) {
// test if smaller
assertTrue("actual bigger than last (BytesRef)", last.get().compareTo(act.get()) < 0 );
assertTrue("actual bigger than last (as String)", last.get().utf8ToString().compareTo(act.get().utf8ToString()) < 0 );
}
// test is back and forward conversion works
assertEquals("forward and back conversion should generate same int", i, NumericUtils.prefixCodedToInt(act.get()));
assertEquals("forward and back conversion should generate same int", i, LegacyNumericUtils.prefixCodedToInt(act.get()));
// next step
last.copyBytes(act.get());
}
@ -69,14 +69,14 @@ public class TestNumericUtils extends LuceneTestCase {
for (int i=0; i<vals.length; i++) {
prefixVals[i] = new BytesRefBuilder();
NumericUtils.longToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
LegacyNumericUtils.longToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
// check forward and back conversion
assertEquals( "forward and back conversion should generate same long", vals[i], NumericUtils.prefixCodedToLong(prefixVals[i].get()) );
assertEquals( "forward and back conversion should generate same long", vals[i], LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get()) );
// test if decoding values as int fails correctly
try {
NumericUtils.prefixCodedToInt(prefixVals[i].get());
LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get());
fail("decoding a prefix coded long value as int should fail");
} catch (NumberFormatException e) {
// worked
@ -92,8 +92,8 @@ public class TestNumericUtils extends LuceneTestCase {
final BytesRefBuilder ref = new BytesRefBuilder();
for (int i=0; i<vals.length; i++) {
for (int j=0; j<64; j++) {
NumericUtils.longToPrefixCodedBytes(vals[i], j, ref);
long prefixVal=NumericUtils.prefixCodedToLong(ref.get());
LegacyNumericUtils.longToPrefixCodedBytes(vals[i], j, ref);
long prefixVal= LegacyNumericUtils.prefixCodedToLong(ref.get());
long mask=(1L << j) - 1L;
assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
}
@ -109,14 +109,14 @@ public class TestNumericUtils extends LuceneTestCase {
for (int i=0; i<vals.length; i++) {
prefixVals[i] = new BytesRefBuilder();
NumericUtils.intToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
LegacyNumericUtils.intToPrefixCodedBytes(vals[i], 0, prefixVals[i]);
// check forward and back conversion
assertEquals( "forward and back conversion should generate same int", vals[i], NumericUtils.prefixCodedToInt(prefixVals[i].get()) );
assertEquals( "forward and back conversion should generate same int", vals[i], LegacyNumericUtils.prefixCodedToInt(prefixVals[i].get()) );
// test if decoding values as long fails correctly
try {
NumericUtils.prefixCodedToLong(prefixVals[i].get());
LegacyNumericUtils.prefixCodedToLong(prefixVals[i].get());
fail("decoding a prefix coded int value as long should fail");
} catch (NumberFormatException e) {
// worked
@ -132,8 +132,8 @@ public class TestNumericUtils extends LuceneTestCase {
final BytesRefBuilder ref = new BytesRefBuilder();
for (int i=0; i<vals.length; i++) {
for (int j=0; j<32; j++) {
NumericUtils.intToPrefixCodedBytes(vals[i], j, ref);
int prefixVal=NumericUtils.prefixCodedToInt(ref.get());
LegacyNumericUtils.intToPrefixCodedBytes(vals[i], j, ref);
int prefixVal= LegacyNumericUtils.prefixCodedToInt(ref.get());
int mask=(1 << j) - 1;
assertEquals( "difference between prefix val and original value for "+vals[i]+" with shift="+j, vals[i] & mask, vals[i]-prefixVal );
}
@ -149,8 +149,8 @@ public class TestNumericUtils extends LuceneTestCase {
// check forward and back conversion
for (int i=0; i<vals.length; i++) {
longVals[i]=NumericUtils.doubleToSortableLong(vals[i]);
assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], NumericUtils.sortableLongToDouble(longVals[i]))==0 );
longVals[i]= LegacyNumericUtils.doubleToSortableLong(vals[i]);
assertTrue( "forward and back conversion should generate same double", Double.compare(vals[i], LegacyNumericUtils.sortableLongToDouble(longVals[i]))==0 );
}
// check sort order (prefixVals should be ascending)
@ -168,10 +168,10 @@ public class TestNumericUtils extends LuceneTestCase {
};
public void testSortableDoubleNaN() {
final long plusInf = NumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
final long plusInf = LegacyNumericUtils.doubleToSortableLong(Double.POSITIVE_INFINITY);
for (double nan : DOUBLE_NANs) {
assertTrue(Double.isNaN(nan));
final long sortable = NumericUtils.doubleToSortableLong(nan);
final long sortable = LegacyNumericUtils.doubleToSortableLong(nan);
assertTrue("Double not sorted correctly: " + nan + ", long repr: "
+ sortable + ", positive inf.: " + plusInf, sortable > plusInf);
}
@ -186,8 +186,8 @@ public class TestNumericUtils extends LuceneTestCase {
// check forward and back conversion
for (int i=0; i<vals.length; i++) {
intVals[i]=NumericUtils.floatToSortableInt(vals[i]);
assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], NumericUtils.sortableIntToFloat(intVals[i]))==0 );
intVals[i]= LegacyNumericUtils.floatToSortableInt(vals[i]);
assertTrue( "forward and back conversion should generate same double", Float.compare(vals[i], LegacyNumericUtils.sortableIntToFloat(intVals[i]))==0 );
}
// check sort order (prefixVals should be ascending)
@ -205,10 +205,10 @@ public class TestNumericUtils extends LuceneTestCase {
};
public void testSortableFloatNaN() {
final int plusInf = NumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
final int plusInf = LegacyNumericUtils.floatToSortableInt(Float.POSITIVE_INFINITY);
for (float nan : FLOAT_NANs) {
assertTrue(Float.isNaN(nan));
final int sortable = NumericUtils.floatToSortableInt(nan);
final int sortable = LegacyNumericUtils.floatToSortableInt(nan);
assertTrue("Float not sorted correctly: " + nan + ", int repr: "
+ sortable + ", positive inf.: " + plusInf, sortable > plusInf);
}
@ -225,12 +225,12 @@ public class TestNumericUtils extends LuceneTestCase {
final Iterator<Long> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
NumericUtils.splitLongRange(new NumericUtils.LongRangeBuilder() {
LegacyNumericUtils.splitLongRange(new LegacyNumericUtils.LongRangeBuilder() {
@Override
public void addRange(long min, long max, int shift) {
assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
if (useBitSet) for (long l=min; l<=max; l++) {
assertFalse("ranges should not overlap", bits.getAndSet(l-lower) );
assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
if (useBitSet) for (long l = min; l <= max; l++) {
assertFalse("ranges should not overlap", bits.getAndSet(l - lower));
// extra exit condition to prevent overflow on MAX_VALUE
if (l == max) break;
}
@ -240,9 +240,9 @@ public class TestNumericUtils extends LuceneTestCase {
min ^= 0x8000000000000000L;
max ^= 0x8000000000000000L;
//System.out.println("0x"+Long.toHexString(min>>>shift)+"L,0x"+Long.toHexString(max>>>shift)+"L)/*shift="+shift+"*/,");
assertEquals( "shift", neededShifts.next().intValue(), shift);
assertEquals( "inner min bound", neededBounds.next().longValue(), min>>>shift);
assertEquals( "inner max bound", neededBounds.next().longValue(), max>>>shift);
assertEquals("shift", neededShifts.next().intValue(), shift);
assertEquals("inner min bound", neededBounds.next().longValue(), min >>> shift);
assertEquals("inner max bound", neededBounds.next().longValue(), max >>> shift);
}
}, precisionStep, lower, upper);
@ -253,7 +253,7 @@ public class TestNumericUtils extends LuceneTestCase {
}
}
/** LUCENE-2541: NumericRangeQuery errors with endpoints near long min and max values */
/** LUCENE-2541: LegacyNumericRangeQuery errors with endpoints near long min and max values */
public void testLongExtremeValues() throws Exception {
// upper end extremes
assertLongRangeSplit(Long.MAX_VALUE, Long.MAX_VALUE, 1, true, Arrays.asList(
@ -465,12 +465,12 @@ public class TestNumericUtils extends LuceneTestCase {
final Iterator<Integer> neededBounds = (expectedBounds == null) ? null : expectedBounds.iterator();
final Iterator<Integer> neededShifts = (expectedShifts == null) ? null : expectedShifts.iterator();
NumericUtils.splitIntRange(new NumericUtils.IntRangeBuilder() {
LegacyNumericUtils.splitIntRange(new LegacyNumericUtils.IntRangeBuilder() {
@Override
public void addRange(int min, int max, int shift) {
assertTrue("min, max should be inside bounds", min>=lower && min<=upper && max>=lower && max<=upper);
if (useBitSet) for (int i=min; i<=max; i++) {
assertFalse("ranges should not overlap", bits.getAndSet(i-lower) );
assertTrue("min, max should be inside bounds", min >= lower && min <= upper && max >= lower && max <= upper);
if (useBitSet) for (int i = min; i <= max; i++) {
assertFalse("ranges should not overlap", bits.getAndSet(i - lower));
// extra exit condition to prevent overflow on MAX_VALUE
if (i == max) break;
}
@ -480,9 +480,9 @@ public class TestNumericUtils extends LuceneTestCase {
min ^= 0x80000000;
max ^= 0x80000000;
//System.out.println("0x"+Integer.toHexString(min>>>shift)+",0x"+Integer.toHexString(max>>>shift)+")/*shift="+shift+"*/,");
assertEquals( "shift", neededShifts.next().intValue(), shift);
assertEquals( "inner min bound", neededBounds.next().intValue(), min>>>shift);
assertEquals( "inner max bound", neededBounds.next().intValue(), max>>>shift);
assertEquals("shift", neededShifts.next().intValue(), shift);
assertEquals("inner min bound", neededBounds.next().intValue(), min >>> shift);
assertEquals("inner max bound", neededBounds.next().intValue(), max >>> shift);
}
}, precisionStep, lower, upper);

View File

@ -34,6 +34,7 @@ import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
public class TestBKD extends LuceneTestCase {
@ -43,7 +44,7 @@ public class TestBKD extends LuceneTestCase {
BKDWriter w = new BKDWriter(dir, "tmp", 1, 4, 2, 1.0f);
byte[] scratch = new byte[4];
for(int docID=0;docID<100;docID++) {
BKDUtil.intToBytes(docID, scratch, 0);
NumericUtils.intToBytes(docID, scratch, 0);
w.add(scratch, docID);
}
@ -72,7 +73,7 @@ public class TestBKD extends LuceneTestCase {
@Override
public void visit(int docID, byte[] packedValue) {
int x = BKDUtil.bytesToInt(packedValue, 0);
int x = NumericUtils.bytesToInt(packedValue, 0);
if (VERBOSE) {
System.out.println("visit docID=" + docID + " x=" + x);
}
@ -83,8 +84,8 @@ public class TestBKD extends LuceneTestCase {
@Override
public Relation compare(byte[] minPacked, byte[] maxPacked) {
int min = BKDUtil.bytesToInt(minPacked, 0);
int max = BKDUtil.bytesToInt(maxPacked, 0);
int min = NumericUtils.bytesToInt(minPacked, 0);
int max = NumericUtils.bytesToInt(maxPacked, 0);
assert max >= min;
if (VERBOSE) {
System.out.println("compare: min=" + min + " max=" + max + " vs queryMin=" + queryMin + " queryMax=" + queryMax);
@ -129,7 +130,7 @@ public class TestBKD extends LuceneTestCase {
}
for(int dim=0;dim<numDims;dim++) {
values[dim] = random().nextInt();
BKDUtil.intToBytes(values[dim], scratch, dim);
NumericUtils.intToBytes(values[dim], scratch, dim);
if (VERBOSE) {
System.out.println(" " + dim + " -> " + values[dim]);
}
@ -178,7 +179,7 @@ public class TestBKD extends LuceneTestCase {
public void visit(int docID, byte[] packedValue) {
//System.out.println("visit check docID=" + docID);
for(int dim=0;dim<numDims;dim++) {
int x = BKDUtil.bytesToInt(packedValue, dim);
int x = NumericUtils.bytesToInt(packedValue, dim);
if (x < queryMin[dim] || x > queryMax[dim]) {
//System.out.println(" no");
return;
@ -193,8 +194,8 @@ public class TestBKD extends LuceneTestCase {
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for(int dim=0;dim<numDims;dim++) {
int min = BKDUtil.bytesToInt(minPacked, dim);
int max = BKDUtil.bytesToInt(maxPacked, dim);
int min = NumericUtils.bytesToInt(minPacked, dim);
int max = NumericUtils.bytesToInt(maxPacked, dim);
assert max >= min;
if (max < queryMin[dim] || min > queryMax[dim]) {
@ -250,7 +251,7 @@ public class TestBKD extends LuceneTestCase {
}
for(int dim=0;dim<numDims;dim++) {
values[dim] = randomBigInt(numBytesPerDim);
BKDUtil.bigIntToBytes(values[dim], scratch, dim, numBytesPerDim);
NumericUtils.bigIntToBytes(values[dim], scratch, dim, numBytesPerDim);
if (VERBOSE) {
System.out.println(" " + dim + " -> " + values[dim]);
}
@ -299,7 +300,7 @@ public class TestBKD extends LuceneTestCase {
public void visit(int docID, byte[] packedValue) {
//System.out.println("visit check docID=" + docID);
for(int dim=0;dim<numDims;dim++) {
BigInteger x = BKDUtil.bytesToBigInt(packedValue, dim, numBytesPerDim);
BigInteger x = NumericUtils.bytesToBigInt(packedValue, dim, numBytesPerDim);
if (x.compareTo(queryMin[dim]) < 0 || x.compareTo(queryMax[dim]) > 0) {
//System.out.println(" no");
return;
@ -314,8 +315,8 @@ public class TestBKD extends LuceneTestCase {
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for(int dim=0;dim<numDims;dim++) {
BigInteger min = BKDUtil.bytesToBigInt(minPacked, dim, numBytesPerDim);
BigInteger max = BKDUtil.bytesToBigInt(maxPacked, dim, numBytesPerDim);
BigInteger min = NumericUtils.bytesToBigInt(minPacked, dim, numBytesPerDim);
BigInteger max = NumericUtils.bytesToBigInt(maxPacked, dim, numBytesPerDim);
assert max.compareTo(min) >= 0;
if (max.compareTo(queryMin[dim]) < 0 || min.compareTo(queryMax[dim]) > 0) {
@ -518,7 +519,7 @@ public class TestBKD extends LuceneTestCase {
verify(docValuesArray, docIDsArray, numDims, numBytesPerDim);
}
public void testBKDUtilAdd() throws Exception {
public void testNumericUtilsAdd() throws Exception {
int iters = atLeast(10000);
int numBytes = TestUtil.nextInt(random(), 1, 100);
for(int iter=0;iter<iters;iter++) {
@ -536,26 +537,26 @@ public class TestBKD extends LuceneTestCase {
System.arraycopy(v2RawBytes, 0, v2Bytes, v2Bytes.length-v2RawBytes.length, v2RawBytes.length);
byte[] result = new byte[numBytes];
BKDUtil.add(numBytes, 0, v1Bytes, v2Bytes, result);
NumericUtils.add(numBytes, 0, v1Bytes, v2Bytes, result);
BigInteger sum = v1.add(v2);
assertTrue("sum=" + sum + " v1=" + v1 + " v2=" + v2 + " but result=" + new BigInteger(1, result), sum.equals(new BigInteger(1, result)));
}
}
public void testIllegalBKDUtilAdd() throws Exception {
public void testIllegalNumericUtilsAdd() throws Exception {
byte[] bytes = new byte[4];
Arrays.fill(bytes, (byte) 0xff);
byte[] one = new byte[4];
one[3] = 1;
try {
BKDUtil.add(4, 0, bytes, one, new byte[4]);
NumericUtils.add(4, 0, bytes, one, new byte[4]);
} catch (IllegalArgumentException iae) {
assertEquals("a + b overflows bytesPerDim=4", iae.getMessage());
}
}
public void testBKDUtilSubtract() throws Exception {
public void testNumericUtilsSubtract() throws Exception {
int iters = atLeast(10000);
int numBytes = TestUtil.nextInt(random(), 1, 100);
for(int iter=0;iter<iters;iter++) {
@ -579,7 +580,7 @@ public class TestBKD extends LuceneTestCase {
System.arraycopy(v2RawBytes, 0, v2Bytes, v2Bytes.length-v2RawBytes.length, v2RawBytes.length);
byte[] result = new byte[numBytes];
BKDUtil.subtract(numBytes, 0, v1Bytes, v2Bytes, result);
NumericUtils.subtract(numBytes, 0, v1Bytes, v2Bytes, result);
BigInteger diff = v1.subtract(v2);
@ -587,13 +588,13 @@ public class TestBKD extends LuceneTestCase {
}
}
public void testIllegalBKDUtilSubtract() throws Exception {
public void testIllegalNumericUtilsSubtract() throws Exception {
byte[] v1 = new byte[4];
v1[3] = (byte) 0xf0;
byte[] v2 = new byte[4];
v2[3] = (byte) 0xf1;
try {
BKDUtil.subtract(4, 0, v1, v2, new byte[4]);
NumericUtils.subtract(4, 0, v1, v2, new byte[4]);
} catch (IllegalArgumentException iae) {
assertEquals("a < b", iae.getMessage());
}
@ -722,7 +723,7 @@ public class TestBKD extends LuceneTestCase {
random().nextBytes(queryMin[dim]);
queryMax[dim] = new byte[numBytesPerDim];
random().nextBytes(queryMax[dim]);
if (BKDUtil.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, queryMin[dim], 0, queryMax[dim], 0) > 0) {
byte[] x = queryMin[dim];
queryMin[dim] = queryMax[dim];
queryMax[dim] = x;
@ -741,8 +742,8 @@ public class TestBKD extends LuceneTestCase {
public void visit(int docID, byte[] packedValue) {
//System.out.println("visit check docID=" + docID);
for(int dim=0;dim<numDims;dim++) {
if (BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, packedValue, dim, queryMax[dim], 0) > 0) {
//System.out.println(" no");
return;
}
@ -756,11 +757,11 @@ public class TestBKD extends LuceneTestCase {
public Relation compare(byte[] minPacked, byte[] maxPacked) {
boolean crosses = false;
for(int dim=0;dim<numDims;dim++) {
if (BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMax[dim], 0) > 0) {
return Relation.CELL_OUTSIDE_QUERY;
} else if (BKDUtil.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
} else if (NumericUtils.compare(numBytesPerDim, minPacked, dim, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, maxPacked, dim, queryMax[dim], 0) > 0) {
crosses = true;
}
}
@ -778,8 +779,8 @@ public class TestBKD extends LuceneTestCase {
boolean matches = true;
for(int dim=0;dim<numDims;dim++) {
byte[] x = docValues[ord][dim];
if (BKDUtil.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
BKDUtil.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
if (NumericUtils.compare(numBytesPerDim, x, 0, queryMin[dim], 0) < 0 ||
NumericUtils.compare(numBytesPerDim, x, 0, queryMax[dim], 0) > 0) {
matches = false;
break;
}

View File

@ -17,20 +17,6 @@ package org.apache.lucene.demo;
* limitations under the License.
*/
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
@ -44,6 +30,20 @@ import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.Date;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
/** Index all text files under a directory.
* <p>
* This is a command-line application demonstrating simple Lucene indexing.
@ -178,13 +178,13 @@ public class IndexFiles {
doc.add(pathField);
// Add the last modified date of the file a field named "modified".
// Use a LongField that is indexed (i.e. efficiently filterable with
// NumericRangeFilter). This indexes to milli-second resolution, which
// Use a DimensionalLongField that is indexed (i.e. efficiently filterable with
// DimensionalRangeQuery). This indexes to milli-second resolution, which
// is often too fine. You could instead create a number based on
// year/month/day/hour/minutes/seconds, down the resolution you require.
// For example the long value 2011021714 would mean
// February 17, 2011, 2-3 PM.
doc.add(new LongField("modified", lastModified, Field.Store.NO));
doc.add(new DimensionalLongField("modified", lastModified));
// Add the contents of the file to a field named "contents". Specify a Reader,
// so that the text of the file is tokenized and indexed, but not stored.

View File

@ -17,14 +17,9 @@ package org.apache.lucene.demo.facet;
* limitations under the License.
*/
import java.io.Closeable;
import java.io.IOException;
import java.text.ParseException;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.document.DimensionalDoubleField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.expressions.Expression;
import org.apache.lucene.expressions.SimpleBindings;
@ -45,9 +40,9 @@ import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopDocs;
@ -55,6 +50,10 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.SloppyMath;
import java.io.Closeable;
import java.io.IOException;
import java.text.ParseException;
/** Shows simple usage of dynamic range faceting, using the
* expressions module to calculate distance. */
public class DistanceFacetsExample implements Closeable {
@ -92,25 +91,25 @@ public class DistanceFacetsExample implements Closeable {
// TODO: we could index in radians instead ... saves all the conversions in getBoundingBoxFilter
// Add documents with latitude/longitude location:
// we index these both as DoubleFields (for bounding box/ranges) and as NumericDocValuesFields (for scoring)
// we index these both as DimensionalDoubleFields (for bounding box/ranges) and as NumericDocValuesFields (for scoring)
Document doc = new Document();
doc.add(new DoubleField("latitude", 40.759011, Field.Store.NO));
doc.add(new DimensionalDoubleField("latitude", 40.759011));
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.759011)));
doc.add(new DoubleField("longitude", -73.9844722, Field.Store.NO));
doc.add(new DimensionalDoubleField("longitude", -73.9844722));
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-73.9844722)));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("latitude", 40.718266, Field.Store.NO));
doc.add(new DimensionalDoubleField("latitude", 40.718266));
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.718266)));
doc.add(new DoubleField("longitude", -74.007819, Field.Store.NO));
doc.add(new DimensionalDoubleField("longitude", -74.007819));
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.007819)));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("latitude", 40.7051157, Field.Store.NO));
doc.add(new DimensionalDoubleField("latitude", 40.7051157));
doc.add(new NumericDocValuesField("latitude", Double.doubleToRawLongBits(40.7051157)));
doc.add(new DoubleField("longitude", -74.0088305, Field.Store.NO));
doc.add(new DimensionalDoubleField("longitude", -74.0088305));
doc.add(new NumericDocValuesField("longitude", Double.doubleToRawLongBits(-74.0088305)));
writer.addDocument(doc);
@ -182,7 +181,7 @@ public class DistanceFacetsExample implements Closeable {
BooleanQuery.Builder f = new BooleanQuery.Builder();
// Add latitude range filter:
f.add(NumericRangeQuery.newDoubleRange("latitude", Math.toDegrees(minLat), Math.toDegrees(maxLat), true, true),
f.add(DimensionalRangeQuery.new1DDoubleRange("latitude", Math.toDegrees(minLat), true, Math.toDegrees(maxLat), true),
BooleanClause.Occur.FILTER);
// Add longitude range filter:
@ -190,13 +189,13 @@ public class DistanceFacetsExample implements Closeable {
// The bounding box crosses the international date
// line:
BooleanQuery.Builder lonF = new BooleanQuery.Builder();
lonF.add(NumericRangeQuery.newDoubleRange("longitude", Math.toDegrees(minLng), null, true, true),
lonF.add(DimensionalRangeQuery.new1DDoubleRange("longitude", Math.toDegrees(minLng), true, null, true),
BooleanClause.Occur.SHOULD);
lonF.add(NumericRangeQuery.newDoubleRange("longitude", null, Math.toDegrees(maxLng), true, true),
lonF.add(DimensionalRangeQuery.new1DDoubleRange("longitude", null, true, Math.toDegrees(maxLng), true),
BooleanClause.Occur.SHOULD);
f.add(lonF.build(), BooleanClause.Occur.MUST);
} else {
f.add(NumericRangeQuery.newDoubleRange("longitude", Math.toDegrees(minLng), Math.toDegrees(maxLng), true, true),
f.add(DimensionalRangeQuery.new1DDoubleRange("longitude", Math.toDegrees(minLng), true, Math.toDegrees(maxLng), true),
BooleanClause.Occur.FILTER);
}

View File

@ -17,13 +17,9 @@ package org.apache.lucene.demo.facet;
* limitations under the License.
*/
import java.io.Closeable;
import java.io.IOException;
import org.apache.lucene.analysis.core.WhitespaceAnalyzer;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.facet.DrillDownQuery;
import org.apache.lucene.facet.FacetResult;
@ -36,13 +32,16 @@ import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexWriterConfig.OpenMode;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import java.io.Closeable;
import java.io.IOException;
/** Shows simple usage of dynamic range faceting. */
public class RangeFacetsExample implements Closeable {
@ -70,7 +69,7 @@ public class RangeFacetsExample implements Closeable {
// Add as doc values field, so we can compute range facets:
doc.add(new NumericDocValuesField("timestamp", then));
// Add as numeric field so we can drill-down:
doc.add(new LongField("timestamp", then, Field.Store.NO));
doc.add(new DimensionalLongField("timestamp", then));
indexWriter.addDocument(doc);
}
@ -108,7 +107,7 @@ public class RangeFacetsExample implements Closeable {
// documents ("browse only"):
DrillDownQuery q = new DrillDownQuery(getConfig());
q.add("timestamp", NumericRangeQuery.newLongRange("timestamp", range.min, range.max, range.minInclusive, range.maxInclusive));
q.add("timestamp", DimensionalRangeQuery.new1DLongRange("timestamp", range.min, range.minInclusive, range.max, range.maxInclusive));
return searcher.search(q, 10);
}

View File

@ -33,7 +33,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/** Represents a range over double values.
*
@ -63,7 +63,7 @@ public final class DoubleRange extends Range {
this.maxInclusive = maxInclusive;
// TODO: if DoubleDocValuesField used
// NumericUtils.doubleToSortableLong format (instead of
// LegacyNumericUtils.doubleToSortableLong format (instead of
// Double.doubleToRawLongBits) we could do comparisons
// in long space
@ -97,8 +97,8 @@ public final class DoubleRange extends Range {
LongRange toLongRange() {
return new LongRange(label,
NumericUtils.doubleToSortableLong(minIncl), true,
NumericUtils.doubleToSortableLong(maxIncl), true);
LegacyNumericUtils.doubleToSortableLong(minIncl), true,
LegacyNumericUtils.doubleToSortableLong(maxIncl), true);
}
@Override

View File

@ -38,7 +38,7 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/** {@link Facets} implementation that computes counts for
* dynamic double ranges from a provided {@link
@ -89,8 +89,8 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
for(int i=0;i<ranges.length;i++) {
DoubleRange range = ranges[i];
longRanges[i] = new LongRange(range.label,
NumericUtils.doubleToSortableLong(range.minIncl), true,
NumericUtils.doubleToSortableLong(range.maxIncl), true);
LegacyNumericUtils.doubleToSortableLong(range.minIncl), true,
LegacyNumericUtils.doubleToSortableLong(range.maxIncl), true);
}
LongRangeCounter counter = new LongRangeCounter(longRanges);
@ -131,7 +131,7 @@ public class DoubleRangeFacetCounts extends RangeFacetCounts {
}
// Skip missing docs:
if (fv.exists(doc)) {
counter.add(NumericUtils.doubleToSortableLong(fv.doubleVal(doc)));
counter.add(LegacyNumericUtils.doubleToSortableLong(fv.doubleVal(doc)));
} else {
missingCount++;
}

View File

@ -19,7 +19,7 @@ package org.apache.lucene.facet.range;
import org.apache.lucene.facet.DrillDownQuery; // javadocs
import org.apache.lucene.queries.function.ValueSource;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.DimensionalRangeQuery; // javadocs
import org.apache.lucene.search.Query;
/** Base class for a single labeled range.
@ -46,7 +46,7 @@ public abstract class Range {
* or when intersected with another query that can lead the
* iteration. If the {@link ValueSource} is static, e.g. an
* indexed numeric field, then it may be more efficient to use
* {@link NumericRangeQuery}. The provided fastMatchQuery,
* {@link DimensionalRangeQuery}. The provided fastMatchQuery,
* if non-null, will first be consulted, and only if
* that is set for each document will the range then be
* checked. */
@ -60,7 +60,7 @@ public abstract class Range {
* or when intersected with another query that can lead the
* iteration. If the {@link ValueSource} is static, e.g. an
* indexed numeric field, then it may be more efficient to
* use {@link NumericRangeQuery}. */
* use {@link DimensionalRangeQuery}. */
public Query getQuery(ValueSource valueSource) {
return getQuery(null, valueSource);
}

View File

@ -23,13 +23,12 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.lucene.document.DimensionalDoubleField;
import org.apache.lucene.document.DimensionalFloatField;
import org.apache.lucene.document.DimensionalLongField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleDocValuesField;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.facet.DrillDownQuery;
import org.apache.lucene.facet.DrillSideways;
@ -56,10 +55,10 @@ import org.apache.lucene.queries.function.docvalues.DoubleDocValues;
import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
import org.apache.lucene.queries.function.valuesource.LongFieldSource;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
@ -67,7 +66,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
public class TestRangeFacetCounts extends FacetTestCase {
public void testBasicLong() throws Exception {
@ -221,7 +219,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// For computing range facet counts:
doc.add(new NumericDocValuesField("field", l));
// For drill down by numeric range:
doc.add(new LongField("field", l, Field.Store.NO));
doc.add(new DimensionalLongField("field", l));
if ((l&3) == 0) {
doc.add(new FacetField("dim", "a"));
@ -297,7 +295,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Third search, drill down on "less than or equal to 10":
ddq = new DrillDownQuery(config);
ddq.add("field", NumericRangeQuery.newLongRange("field", 0L, 10L, true, true));
ddq.add("field", DimensionalRangeQuery.new1DLongRange("field", 0L, true, 10L, true));
dsr = ds.search(null, ddq, 10);
assertEquals(11, dsr.hits.totalHits);
@ -385,7 +383,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
long v = random().nextLong();
values[i] = v;
doc.add(new NumericDocValuesField("field", v));
doc.add(new LongField("field", v, Field.Store.NO));
doc.add(new DimensionalLongField("field", v));
w.addDocument(doc);
minValue = Math.min(minValue, v);
maxValue = Math.max(maxValue, v);
@ -477,9 +475,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Query fastMatchQuery;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchQuery = NumericRangeQuery.newLongRange("field", minValue, maxValue, true, true);
fastMatchQuery = DimensionalRangeQuery.new1DLongRange("field", minValue, true, maxValue, true);
} else {
fastMatchQuery = NumericRangeQuery.newLongRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchQuery = DimensionalRangeQuery.new1DLongRange("field", minAcceptedValue, true, maxAcceptedValue, true);
}
} else {
fastMatchQuery = null;
@ -501,7 +499,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
ddq.add("field", NumericRangeQuery.newLongRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
ddq.add("field", DimensionalRangeQuery.new1DLongRange("field", range.min, range.minInclusive, range.max, range.maxInclusive));
} else {
ddq.add("field", range.getQuery(fastMatchQuery, vs));
}
@ -526,7 +524,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
float v = random().nextFloat();
values[i] = v;
doc.add(new FloatDocValuesField("field", v));
doc.add(new FloatField("field", v, Field.Store.NO));
doc.add(new DimensionalFloatField("field", v));
w.addDocument(doc);
minValue = Math.min(minValue, v);
maxValue = Math.max(maxValue, v);
@ -632,9 +630,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Query fastMatchQuery;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchQuery = NumericRangeQuery.newFloatRange("field", minValue, maxValue, true, true);
fastMatchQuery = DimensionalRangeQuery.new1DFloatRange("field", minValue, true, maxValue, true);
} else {
fastMatchQuery = NumericRangeQuery.newFloatRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchQuery = DimensionalRangeQuery.new1DFloatRange("field", minAcceptedValue, true, maxAcceptedValue, true);
}
} else {
fastMatchQuery = null;
@ -656,7 +654,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
ddq.add("field", NumericRangeQuery.newFloatRange("field", (float) range.min, (float) range.max, range.minInclusive, range.maxInclusive));
ddq.add("field", DimensionalRangeQuery.new1DFloatRange("field", (float) range.min, range.minInclusive, (float) range.max, range.maxInclusive));
} else {
ddq.add("field", range.getQuery(fastMatchQuery, vs));
}
@ -681,7 +679,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
double v = random().nextDouble();
values[i] = v;
doc.add(new DoubleDocValuesField("field", v));
doc.add(new DoubleField("field", v, Field.Store.NO));
doc.add(new DimensionalDoubleField("field", v));
w.addDocument(doc);
minValue = Math.min(minValue, v);
maxValue = Math.max(maxValue, v);
@ -771,9 +769,9 @@ public class TestRangeFacetCounts extends FacetTestCase {
Query fastMatchFilter;
if (random().nextBoolean()) {
if (random().nextBoolean()) {
fastMatchFilter = NumericRangeQuery.newDoubleRange("field", minValue, maxValue, true, true);
fastMatchFilter = DimensionalRangeQuery.new1DDoubleRange("field", minValue, true, maxValue, true);
} else {
fastMatchFilter = NumericRangeQuery.newDoubleRange("field", minAcceptedValue, maxAcceptedValue, true, true);
fastMatchFilter = DimensionalRangeQuery.new1DDoubleRange("field", minAcceptedValue, true, maxAcceptedValue, true);
}
} else {
fastMatchFilter = null;
@ -795,7 +793,7 @@ public class TestRangeFacetCounts extends FacetTestCase {
// Test drill-down:
DrillDownQuery ddq = new DrillDownQuery(config);
if (random().nextBoolean()) {
ddq.add("field", NumericRangeQuery.newDoubleRange("field", range.min, range.max, range.minInclusive, range.maxInclusive));
ddq.add("field", DimensionalRangeQuery.new1DDoubleRange("field", range.min, range.minInclusive, range.max, range.maxInclusive));
} else {
ddq.add("field", range.getQuery(fastMatchFilter, vs));
}

View File

@ -31,7 +31,6 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.DirectoryReader;
@ -237,9 +236,6 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
Field content = newTextField("content", "", Field.Store.NO);
doc.add(content);
docNoGroup.add(content);
IntField id = new IntField("id", 0, Field.Store.NO);
doc.add(id);
docNoGroup.add(id);
NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
doc.add(idDV);
docNoGroup.add(idDV);
@ -275,7 +271,6 @@ public class AllGroupHeadsCollectorTest extends LuceneTestCase {
sort2.setBytesValue(groupDoc.sort2);
sort3.setBytesValue(groupDoc.sort3);
content.setStringValue(groupDoc.content);
id.setIntValue(groupDoc.id);
idDV.setLongValue(groupDoc.id);
if (groupDoc.group == null) {
w.addDocument(docNoGroup);

View File

@ -33,7 +33,6 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StringField;
@ -597,7 +596,6 @@ public class TestGrouping extends LuceneTestCase {
doc.add(new SortedDocValuesField("sort1", BytesRef.deepCopyOf(groupValue.sort1)));
doc.add(newStringField("sort2", groupValue.sort2.utf8ToString(), Field.Store.NO));
doc.add(new SortedDocValuesField("sort2", BytesRef.deepCopyOf(groupValue.sort2)));
doc.add(new IntField("id", groupValue.id, Field.Store.NO));
doc.add(new NumericDocValuesField("id", groupValue.id));
doc.add(newTextField("content", groupValue.content, Field.Store.NO));
//System.out.println("TEST: doc content=" + groupValue.content + " group=" + (groupValue.group == null ? "null" : groupValue.group.utf8ToString()) + " sort1=" + groupValue.sort1.utf8ToString() + " id=" + groupValue.id);
@ -712,11 +710,8 @@ public class TestGrouping extends LuceneTestCase {
Field content = newTextField("content", "", Field.Store.NO);
doc.add(content);
docNoGroup.add(content);
IntField id = new IntField("id", 0, Field.Store.NO);
doc.add(id);
NumericDocValuesField idDV = new NumericDocValuesField("id", 0);
doc.add(idDV);
docNoGroup.add(id);
docNoGroup.add(idDV);
final GroupDoc[] groupDocs = new GroupDoc[numDocs];
for(int i=0;i<numDocs;i++) {
@ -751,7 +746,6 @@ public class TestGrouping extends LuceneTestCase {
sort1.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort1));
sort2.setBytesValue(BytesRef.deepCopyOf(groupDoc.sort2));
content.setStringValue(groupDoc.content);
id.setIntValue(groupDoc.id);
idDV.setLongValue(groupDoc.id);
if (groupDoc.group == null) {
w.addDocument(docNoGroup);

View File

@ -17,8 +17,6 @@ package org.apache.lucene.search.highlight;
* limitations under the License.
*/
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
@ -31,6 +29,9 @@ import java.util.List;
import java.util.Map;
import java.util.StringTokenizer;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.BaseTokenStreamTestCase;
import org.apache.lucene.analysis.CachingTokenFilter;
@ -44,10 +45,10 @@ import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.DimensionalIntField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
@ -59,14 +60,15 @@ import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.CommonTermsQuery;
import org.apache.lucene.queries.CustomScoreQuery;
import org.apache.lucene.queries.payloads.SpanPayloadCheckQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DimensionalRangeQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PhraseQuery.Builder;
import org.apache.lucene.search.PrefixQuery;
@ -82,7 +84,6 @@ import org.apache.lucene.search.join.QueryBitSetProducer;
import org.apache.lucene.search.join.ScoreMode;
import org.apache.lucene.search.join.ToChildBlockJoinQuery;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
import org.apache.lucene.queries.payloads.SpanPayloadCheckQuery;
import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanNotQuery;
@ -558,9 +559,9 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
numHighlights == 5);
}
public void testNumericRangeQuery() throws Exception {
public void testDimensionalRangeQuery() throws Exception {
// doesn't currently highlight, but make sure it doesn't cause exception either
query = NumericRangeQuery.newIntRange(NUMERIC_FIELD_NAME, 2, 6, true, true);
query = DimensionalRangeQuery.new1DIntRange(NUMERIC_FIELD_NAME, 2, true, 6, true);
searcher = newSearcher(reader);
hits = searcher.search(query, 100);
int maxNumFragmentsRequired = 2;
@ -2051,22 +2052,22 @@ public class HighlighterTest extends BaseTokenStreamTestCase implements Formatte
// a few tests need other docs...:
Document doc = new Document();
doc.add(new IntField(NUMERIC_FIELD_NAME, 1, Field.Store.NO));
doc.add(new DimensionalIntField(NUMERIC_FIELD_NAME, 1));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 1));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField(NUMERIC_FIELD_NAME, 3, Field.Store.NO));
doc.add(new DimensionalIntField(NUMERIC_FIELD_NAME, 3));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 3));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField(NUMERIC_FIELD_NAME, 5, Field.Store.NO));
doc.add(new DimensionalIntField(NUMERIC_FIELD_NAME, 5));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 5));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField(NUMERIC_FIELD_NAME, 7, Field.Store.NO));
doc.add(new DimensionalIntField(NUMERIC_FIELD_NAME, 7));
doc.add(new StoredField(NUMERIC_FIELD_NAME, 7));
writer.addDocument(doc);

View File

@ -3,7 +3,8 @@ package org.apache.lucene.search.join;
import java.io.IOException;
import java.util.function.LongConsumer;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReader;
@ -14,7 +15,7 @@ import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
@ -59,7 +60,7 @@ abstract class DocValuesTermsCollector<DV> extends SimpleCollector {
return (ctx) -> DocValues.getSortedSet(ctx, field);
}
static Function<BinaryDocValues> numericAsBinaryDocValues(String field, NumericType numTyp) {
static Function<BinaryDocValues> numericAsBinaryDocValues(String field, LegacyNumericType numTyp) {
return (ctx) -> {
final NumericDocValues numeric = DocValues.getNumeric(ctx, field);
final BytesRefBuilder bytes = new BytesRefBuilder();
@ -77,21 +78,21 @@ abstract class DocValuesTermsCollector<DV> extends SimpleCollector {
};
}
static LongConsumer coder(BytesRefBuilder bytes, NumericType type, String fieldName){
static LongConsumer coder(BytesRefBuilder bytes, LegacyNumericType type, String fieldName){
switch(type){
case INT:
return (l) -> NumericUtils.intToPrefixCoded((int)l, 0, bytes);
return (l) -> LegacyNumericUtils.intToPrefixCoded((int) l, 0, bytes);
case LONG:
return (l) -> NumericUtils.longToPrefixCoded(l, 0, bytes);
return (l) -> LegacyNumericUtils.longToPrefixCoded(l, 0, bytes);
default:
throw new IllegalArgumentException("Unsupported "+type+
". Only "+NumericType.INT+" and "+NumericType.LONG+" are supported."
". Only "+ LegacyNumericType.INT+" and "+ FieldType.LegacyNumericType.LONG+" are supported."
+ "Field "+fieldName );
}
}
/** this adapter is quite weird. ords are per doc index, don't use ords across different docs*/
static Function<SortedSetDocValues> sortedNumericAsSortedSetDocValues(String field, NumericType numTyp) {
static Function<SortedSetDocValues> sortedNumericAsSortedSetDocValues(String field, FieldType.LegacyNumericType numTyp) {
return (ctx) -> {
final SortedNumericDocValues numerics = DocValues.getSortedNumeric(ctx, field);
final BytesRefBuilder bytes = new BytesRefBuilder();

View File

@ -3,9 +3,7 @@ package org.apache.lucene.search.join;
import java.io.IOException;
import java.util.Locale;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValuesType;
@ -106,8 +104,8 @@ public final class JoinUtil {
* @param multipleValuesPerDocument Whether the from field has multiple terms per document
* when true fromField might be {@link DocValuesType#SORTED_NUMERIC},
* otherwise fromField should be {@link DocValuesType#NUMERIC}
* @param toField The to field to join to, should be {@link IntField} or {@link LongField}
* @param numericType either {@link NumericType#INT} or {@link NumericType#LONG}, it should correspond to fromField and toField types
* @param toField The to field to join to, should be {@link org.apache.lucene.document.LegacyIntField} or {@link org.apache.lucene.document.LegacyLongField}
* @param numericType either {@link org.apache.lucene.document.FieldType.LegacyNumericType#INT} or {@link org.apache.lucene.document.FieldType.LegacyNumericType#LONG}, it should correspond to fromField and toField types
* @param fromQuery The query to match documents on the from side
* @param fromSearcher The searcher that executed the specified fromQuery
* @param scoreMode Instructs how scores from the fromQuery are mapped to the returned query
@ -118,7 +116,7 @@ public final class JoinUtil {
public static Query createJoinQuery(String fromField,
boolean multipleValuesPerDocument,
String toField, NumericType numericType,
String toField, LegacyNumericType numericType,
Query fromQuery,
IndexSearcher fromSearcher,
ScoreMode scoreMode) throws IOException {

View File

@ -38,7 +38,7 @@ import org.apache.lucene.util.BitSetIterator;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
class TermsIncludingScoreQuery extends Query {
@ -267,10 +267,10 @@ class TermsIncludingScoreQuery extends Query {
terms.get(ords[i], ref);
out.print(ref+" "+ref.utf8ToString()+" ");
try {
out.print(Long.toHexString(NumericUtils.prefixCodedToLong(ref))+"L");
out.print(Long.toHexString(LegacyNumericUtils.prefixCodedToLong(ref))+"L");
} catch (Exception e) {
try {
out.print(Integer.toHexString(NumericUtils.prefixCodedToInt(ref))+"i");
out.print(Integer.toHexString(LegacyNumericUtils.prefixCodedToInt(ref))+"i");
} catch (Exception ee) {
}
}

View File

@ -27,7 +27,7 @@ import org.apache.lucene.search.FieldComparator;
import org.apache.lucene.search.SortField;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import java.io.IOException;
@ -199,7 +199,7 @@ public class ToParentBlockJoinSortField extends SortField {
return new NumericDocValues() {
@Override
public long get(int docID) {
return NumericUtils.sortableFloatBits((int) view.get(docID));
return LegacyNumericUtils.sortableFloatBits((int) view.get(docID));
}
};
}
@ -224,7 +224,7 @@ public class ToParentBlockJoinSortField extends SortField {
return new NumericDocValues() {
@Override
public long get(int docID) {
return NumericUtils.sortableDoubleBits(view.get(docID));
return LegacyNumericUtils.sortableDoubleBits(view.get(docID));
}
};
}

View File

@ -26,9 +26,9 @@ import java.util.Locale;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StoredField;
@ -46,18 +46,18 @@ import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.index.StoredDocument;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.MultiTermQuery;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryUtils;
@ -76,8 +76,8 @@ import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
public class TestBlockJoin extends LuceneTestCase {
@ -95,7 +95,7 @@ public class TestBlockJoin extends LuceneTestCase {
private Document makeJob(String skill, int year) {
Document job = new Document();
job.add(newStringField("skill", skill, Field.Store.YES));
job.add(new IntField("year", year, Field.Store.NO));
job.add(new LegacyIntField("year", year, Field.Store.NO));
job.add(new StoredField("year", year));
return job;
}
@ -104,7 +104,7 @@ public class TestBlockJoin extends LuceneTestCase {
private Document makeQualification(String qualification, int year) {
Document job = new Document();
job.add(newStringField("qualification", qualification, Field.Store.YES));
job.add(new IntField("year", year, Field.Store.NO));
job.add(new LegacyIntField("year", year, Field.Store.NO));
return job;
}
@ -137,7 +137,7 @@ public class TestBlockJoin extends LuceneTestCase {
BooleanQuery.Builder childQuery = new BooleanQuery.Builder();
childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
childQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
ToParentBlockJoinQuery childJoinQuery = new ToParentBlockJoinQuery(childQuery.build(), parentsFilter, ScoreMode.Avg);
@ -191,7 +191,7 @@ public class TestBlockJoin extends LuceneTestCase {
// Define child document criteria (finds an example of relevant work experience)
BooleanQuery.Builder childQuery = new BooleanQuery.Builder();
childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
childQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
// Define parent document criteria (find a resident in the UK)
Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@ -271,7 +271,7 @@ public class TestBlockJoin extends LuceneTestCase {
w.close();
IndexSearcher s = newSearcher(r);
MultiTermQuery qc = NumericRangeQuery.newIntRange("year", 2007, 2007, true, true);
MultiTermQuery qc = LegacyNumericRangeQuery.newIntRange("year", 2007, 2007, true, true);
// Hacky: this causes the query to need 2 rewrite
// iterations:
qc.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE);
@ -344,7 +344,7 @@ public class TestBlockJoin extends LuceneTestCase {
// Define child document criteria (finds an example of relevant work experience)
BooleanQuery.Builder childQuery = new BooleanQuery.Builder();
childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
childQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
// Define parent document criteria (find a resident in the UK)
Query parentQuery = new TermQuery(new Term("country", "United Kingdom"));
@ -518,7 +518,7 @@ public class TestBlockJoin extends LuceneTestCase {
for(int parentDocID=0;parentDocID<numParentDocs;parentDocID++) {
Document parentDoc = new Document();
Document parentJoinDoc = new Document();
Field id = new IntField("parentID", parentDocID, Field.Store.YES);
Field id = new LegacyIntField("parentID", parentDocID, Field.Store.YES);
parentDoc.add(id);
parentJoinDoc.add(id);
parentJoinDoc.add(newStringField("isParent", "x", Field.Store.NO));
@ -540,8 +540,8 @@ public class TestBlockJoin extends LuceneTestCase {
}
if (doDeletes) {
parentDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
parentJoinDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
parentDoc.add(new LegacyIntField("blockID", parentDocID, Field.Store.NO));
parentJoinDoc.add(new LegacyIntField("blockID", parentDocID, Field.Store.NO));
}
final List<Document> joinDocs = new ArrayList<>();
@ -565,7 +565,7 @@ public class TestBlockJoin extends LuceneTestCase {
Document joinChildDoc = new Document();
joinDocs.add(joinChildDoc);
Field childID = new IntField("childID", childDocID, Field.Store.YES);
Field childID = new LegacyIntField("childID", childDocID, Field.Store.YES);
childDoc.add(childID);
joinChildDoc.add(childID);
childID = new NumericDocValuesField("childID", childDocID);
@ -598,7 +598,7 @@ public class TestBlockJoin extends LuceneTestCase {
}
if (doDeletes) {
joinChildDoc.add(new IntField("blockID", parentDocID, Field.Store.NO));
joinChildDoc.add(new LegacyIntField("blockID", parentDocID, Field.Store.NO));
}
w.addDocument(childDoc);
@ -618,7 +618,7 @@ public class TestBlockJoin extends LuceneTestCase {
if (VERBOSE) {
System.out.println("DELETE parentID=" + deleteID);
}
NumericUtils.intToPrefixCodedBytes(deleteID, 0, term);
LegacyNumericUtils.intToPrefixCodedBytes(deleteID, 0, term);
w.deleteDocuments(new Term("blockID", term.toBytesRef()));
joinW.deleteDocuments(new Term("blockID", term.toBytesRef()));
}
@ -1063,11 +1063,11 @@ public class TestBlockJoin extends LuceneTestCase {
// Define child document criteria (finds an example of relevant work experience)
BooleanQuery.Builder childJobQuery = new BooleanQuery.Builder();
childJobQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
childJobQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
childJobQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
BooleanQuery.Builder childQualificationQuery = new BooleanQuery.Builder();
childQualificationQuery.add(new BooleanClause(new TermQuery(new Term("qualification", "maths")), Occur.MUST));
childQualificationQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 1980, 2000, true, true), Occur.MUST));
childQualificationQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 1980, 2000, true, true), Occur.MUST));
// Define parent document criteria (find a resident in the UK)
@ -1212,7 +1212,7 @@ public class TestBlockJoin extends LuceneTestCase {
// Define child document criteria (finds an example of relevant work experience)
BooleanQuery.Builder childQuery = new BooleanQuery.Builder();
childQuery.add(new BooleanClause(new TermQuery(new Term("skill", "java")), Occur.MUST));
childQuery.add(new BooleanClause(NumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
childQuery.add(new BooleanClause(LegacyNumericRangeQuery.newIntRange("year", 2006, 2011, true, true), Occur.MUST));
// Wrap the child document query to 'join' any matches
// up to corresponding parent:
@ -1711,7 +1711,7 @@ public class TestBlockJoin extends LuceneTestCase {
Query resumeQuery = new ToChildBlockJoinQuery(new TermQuery(new Term("country","rv" + qrv)),
resumeFilter);
Query jobQuery = new ToChildBlockJoinQuery(NumericRangeQuery.newIntRange("year", qjv, qjv, true, true),
Query jobQuery = new ToChildBlockJoinQuery(LegacyNumericRangeQuery.newIntRange("year", qjv, qjv, true, true),
jobFilter);
BooleanQuery.Builder fullQuery = new BooleanQuery.Builder();

View File

@ -19,9 +19,9 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.MockTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
@ -863,7 +863,7 @@ public class TestJoinUtil extends LuceneTestCase {
final String toField = from ? "to":"from";
if (random().nextBoolean()) { // numbers
final NumericType numType = random().nextBoolean() ? NumericType.INT: NumericType.LONG ;
final LegacyNumericType numType = random().nextBoolean() ? LegacyNumericType.INT: LegacyNumericType.LONG ;
joinQuery = JoinUtil.createJoinQuery(fromField+numType, muliValsQuery, toField+numType, numType, actualQuery, indexSearcher, scoreMode);
} else {
joinQuery = JoinUtil.createJoinQuery(fromField, muliValsQuery, toField, actualQuery, indexSearcher, scoreMode);
@ -1204,19 +1204,19 @@ public class TestJoinUtil extends LuceneTestCase {
document.add(newTextField(random, fieldName, linkValue, Field.Store.NO));
final int linkInt = Integer.parseUnsignedInt(linkValue,16);
document.add(new IntField(fieldName+NumericType.INT, linkInt, Field.Store.NO));
document.add(new LegacyIntField(fieldName+ LegacyNumericType.INT, linkInt, Field.Store.NO));
final long linkLong = linkInt<<32 | linkInt;
document.add(new LongField(fieldName+NumericType.LONG, linkLong, Field.Store.NO));
document.add(new LegacyLongField(fieldName+ LegacyNumericType.LONG, linkLong, Field.Store.NO));
if (multipleValuesPerDocument) {
document.add(new SortedSetDocValuesField(fieldName, new BytesRef(linkValue)));
document.add(new SortedNumericDocValuesField(fieldName+NumericType.INT, linkInt));
document.add(new SortedNumericDocValuesField(fieldName+NumericType.LONG, linkLong));
document.add(new SortedNumericDocValuesField(fieldName+ LegacyNumericType.INT, linkInt));
document.add(new SortedNumericDocValuesField(fieldName+ LegacyNumericType.LONG, linkLong));
} else {
document.add(new SortedDocValuesField(fieldName, new BytesRef(linkValue)));
document.add(new NumericDocValuesField(fieldName+NumericType.INT, linkInt));
document.add(new NumericDocValuesField(fieldName+NumericType.LONG, linkLong));
document.add(new NumericDocValuesField(fieldName+ LegacyNumericType.INT, linkInt));
document.add(new NumericDocValuesField(fieldName+ LegacyNumericType.LONG, linkLong));
}
if (globalOrdinalJoin) {
document.add(new SortedDocValuesField("join_field", new BytesRef(linkValue)));

View File

@ -20,11 +20,6 @@ package org.apache.lucene.uninverting;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.lucene.analysis.NumericTokenStream;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.BinaryDocValues;
@ -37,7 +32,7 @@ import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.RamUsageEstimator;
/**
@ -88,18 +83,18 @@ interface FieldCache {
public static FieldCache DEFAULT = new FieldCacheImpl();
/**
* A parser instance for int values encoded by {@link NumericUtils}, e.g. when indexed
* via {@link IntField}/{@link NumericTokenStream}.
* A parser instance for int values encoded by {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
* via {@link org.apache.lucene.document.LegacyIntField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
*/
public static final Parser NUMERIC_UTILS_INT_PARSER = new Parser() {
@Override
public long parseValue(BytesRef term) {
return NumericUtils.prefixCodedToInt(term);
return LegacyNumericUtils.prefixCodedToInt(term);
}
@Override
public TermsEnum termsEnum(Terms terms) throws IOException {
return NumericUtils.filterPrefixCodedInts(terms.iterator());
return LegacyNumericUtils.filterPrefixCodedInts(terms.iterator());
}
@Override
@ -109,13 +104,13 @@ interface FieldCache {
};
/**
* A parser instance for float values encoded with {@link NumericUtils}, e.g. when indexed
* via {@link FloatField}/{@link NumericTokenStream}.
* A parser instance for float values encoded with {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
* via {@link org.apache.lucene.document.LegacyFloatField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
*/
public static final Parser NUMERIC_UTILS_FLOAT_PARSER = new Parser() {
@Override
public long parseValue(BytesRef term) {
int val = NumericUtils.prefixCodedToInt(term);
int val = LegacyNumericUtils.prefixCodedToInt(term);
if (val<0) val ^= 0x7fffffff;
return val;
}
@ -127,18 +122,18 @@ interface FieldCache {
@Override
public TermsEnum termsEnum(Terms terms) throws IOException {
return NumericUtils.filterPrefixCodedInts(terms.iterator());
return LegacyNumericUtils.filterPrefixCodedInts(terms.iterator());
}
};
/**
* A parser instance for long values encoded by {@link NumericUtils}, e.g. when indexed
* via {@link LongField}/{@link NumericTokenStream}.
* A parser instance for long values encoded by {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
* via {@link org.apache.lucene.document.LegacyLongField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
*/
public static final Parser NUMERIC_UTILS_LONG_PARSER = new Parser() {
@Override
public long parseValue(BytesRef term) {
return NumericUtils.prefixCodedToLong(term);
return LegacyNumericUtils.prefixCodedToLong(term);
}
@Override
public String toString() {
@ -147,18 +142,18 @@ interface FieldCache {
@Override
public TermsEnum termsEnum(Terms terms) throws IOException {
return NumericUtils.filterPrefixCodedLongs(terms.iterator());
return LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
}
};
/**
* A parser instance for double values encoded with {@link NumericUtils}, e.g. when indexed
* via {@link DoubleField}/{@link NumericTokenStream}.
* A parser instance for double values encoded with {@link org.apache.lucene.util.LegacyNumericUtils}, e.g. when indexed
* via {@link org.apache.lucene.document.LegacyDoubleField}/{@link org.apache.lucene.analysis.LegacyNumericTokenStream}.
*/
public static final Parser NUMERIC_UTILS_DOUBLE_PARSER = new Parser() {
@Override
public long parseValue(BytesRef term) {
long val = NumericUtils.prefixCodedToLong(term);
long val = LegacyNumericUtils.prefixCodedToLong(term);
if (val<0) val ^= 0x7fffffffffffffffL;
return val;
}
@ -169,7 +164,7 @@ interface FieldCache {
@Override
public TermsEnum termsEnum(Terms terms) throws IOException {
return NumericUtils.filterPrefixCodedLongs(terms.iterator());
return LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
}
};
@ -196,7 +191,7 @@ interface FieldCache {
* @param parser
* Computes long for string values. May be {@code null} if the
* requested field was indexed as {@link NumericDocValuesField} or
* {@link LongField}.
* {@link org.apache.lucene.document.LegacyLongField}.
* @param setDocsWithField
* If true then {@link #getDocsWithField} will also be computed and
* stored in the FieldCache.
@ -247,9 +242,9 @@ interface FieldCache {
public SortedDocValues getTermsIndex(LeafReader reader, String field, float acceptableOverheadRatio) throws IOException;
/** Can be passed to {@link #getDocTermOrds} to filter for 32-bit numeric terms */
public static final BytesRef INT32_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_INT });
public static final BytesRef INT32_TERM_PREFIX = new BytesRef(new byte[] { LegacyNumericUtils.SHIFT_START_INT });
/** Can be passed to {@link #getDocTermOrds} to filter for 64-bit numeric terms */
public static final BytesRef INT64_TERM_PREFIX = new BytesRef(new byte[] { NumericUtils.SHIFT_START_LONG });
public static final BytesRef INT64_TERM_PREFIX = new BytesRef(new byte[] { LegacyNumericUtils.SHIFT_START_LONG });
/**
* Checks the internal cache for an appropriate entry, and if none is found, reads the term values

View File

@ -23,10 +23,6 @@ import java.util.Collections;
import java.util.Map;
import org.apache.lucene.document.BinaryDocValuesField; // javadocs
import org.apache.lucene.document.DoubleField; // javadocs
import org.apache.lucene.document.FloatField; // javadocs
import org.apache.lucene.document.IntField; // javadocs
import org.apache.lucene.document.LongField; // javadocs
import org.apache.lucene.document.NumericDocValuesField; // javadocs
import org.apache.lucene.document.SortedDocValuesField; // javadocs
import org.apache.lucene.document.SortedSetDocValuesField; // javadocs
@ -64,28 +60,28 @@ public class UninvertingReader extends FilterLeafReader {
*/
public static enum Type {
/**
* Single-valued Integer, (e.g. indexed with {@link IntField})
* Single-valued Integer, (e.g. indexed with {@link org.apache.lucene.document.LegacyIntField})
* <p>
* Fields with this type act as if they were indexed with
* {@link NumericDocValuesField}.
*/
INTEGER,
/**
* Single-valued Long, (e.g. indexed with {@link LongField})
* Single-valued Long, (e.g. indexed with {@link org.apache.lucene.document.LegacyLongField})
* <p>
* Fields with this type act as if they were indexed with
* {@link NumericDocValuesField}.
*/
LONG,
/**
* Single-valued Float, (e.g. indexed with {@link FloatField})
* Single-valued Float, (e.g. indexed with {@link org.apache.lucene.document.LegacyFloatField})
* <p>
* Fields with this type act as if they were indexed with
* {@link NumericDocValuesField}.
*/
FLOAT,
/**
* Single-valued Double, (e.g. indexed with {@link DoubleField})
* Single-valued Double, (e.g. indexed with {@link org.apache.lucene.document.LegacyDoubleField})
* <p>
* Fields with this type act as if they were indexed with
* {@link NumericDocValuesField}.
@ -113,28 +109,28 @@ public class UninvertingReader extends FilterLeafReader {
*/
SORTED_SET_BINARY,
/**
* Multi-valued Integer, (e.g. indexed with {@link IntField})
* Multi-valued Integer, (e.g. indexed with {@link org.apache.lucene.document.LegacyIntField})
* <p>
* Fields with this type act as if they were indexed with
* {@link SortedSetDocValuesField}.
*/
SORTED_SET_INTEGER,
/**
* Multi-valued Float, (e.g. indexed with {@link FloatField})
* Multi-valued Float, (e.g. indexed with {@link org.apache.lucene.document.LegacyFloatField})
* <p>
* Fields with this type act as if they were indexed with
* {@link SortedSetDocValuesField}.
*/
SORTED_SET_FLOAT,
/**
* Multi-valued Long, (e.g. indexed with {@link LongField})
* Multi-valued Long, (e.g. indexed with {@link org.apache.lucene.document.LegacyLongField})
* <p>
* Fields with this type act as if they were indexed with
* {@link SortedSetDocValuesField}.
*/
SORTED_SET_LONG,
/**
* Multi-valued Double, (e.g. indexed with {@link DoubleField})
* Multi-valued Double, (e.g. indexed with {@link org.apache.lucene.document.LegacyDoubleField})
* <p>
* Fields with this type act as if they were indexed with
* {@link SortedSetDocValuesField}.

View File

@ -30,10 +30,10 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.DimensionalField;
import org.apache.lucene.document.DimensionalBinaryField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
@ -54,8 +54,8 @@ import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.FixedBitSet;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.apache.lucene.util.bkd.BKDUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -174,8 +174,8 @@ public abstract class SorterTestBase extends LuceneTestCase {
doc.add(new SortedNumericDocValuesField(SORTED_NUMERIC_DV_FIELD, id + 1));
doc.add(new Field(TERM_VECTORS_FIELD, Integer.toString(id), TERM_VECTORS_TYPE));
byte[] bytes = new byte[4];
BKDUtil.intToBytes(id, bytes, 0);
doc.add(new DimensionalField(DIMENSIONAL_FIELD, bytes));
NumericUtils.intToBytes(id, bytes, 0);
doc.add(new DimensionalBinaryField(DIMENSIONAL_FIELD, bytes));
return doc;
}
@ -390,7 +390,7 @@ public abstract class SorterTestBase extends LuceneTestCase {
@Override
public void visit(int docID, byte[] packedValues) {
assertEquals(sortedValues[docID].intValue(), BKDUtil.bytesToInt(packedValues, 0));
assertEquals(sortedValues[docID].intValue(), NumericUtils.bytesToInt(packedValues, 0));
}
@Override

View File

@ -25,7 +25,7 @@ import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FloatDocValuesField;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.DocValues;
@ -334,7 +334,7 @@ public class TestDiversifiedTopDocsCollector extends LuceneTestCase {
new BytesRef(""));
Field weeksAtNumberOneField = new FloatDocValuesField("weeksAtNumberOne",
0.0F);
Field weeksStoredField = new FloatField("weeks", 0.0F, Store.YES);
Field weeksStoredField = new LegacyFloatField("weeks", 0.0F, Store.YES);
Field idField = newStringField("id", "", Field.Store.YES);
Field songField = newTextField("song", "", Field.Store.NO);
Field storedArtistField = newTextField("artistName", "", Field.Store.NO);

View File

@ -29,8 +29,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@ -51,7 +51,7 @@ import org.apache.lucene.index.TermsEnum.SeekStatus;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.TestUtil;
@ -171,7 +171,7 @@ public class TestDocTermOrds extends LuceneTestCase {
for(int id=0;id<NUM_DOCS;id++) {
Document doc = new Document();
doc.add(new IntField("id", id, Field.Store.YES));
doc.add(new LegacyIntField("id", id, Field.Store.YES));
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
while(ordsForDocSet.size() < termCount) {
@ -269,7 +269,7 @@ public class TestDocTermOrds extends LuceneTestCase {
for(int id=0;id<NUM_DOCS;id++) {
Document doc = new Document();
doc.add(new IntField("id", id, Field.Store.YES));
doc.add(new LegacyIntField("id", id, Field.Store.YES));
final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER);
while(ordsForDocSet.size() < termCount) {
@ -462,12 +462,12 @@ public class TestDocTermOrds extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new IntField("foo", 5, Field.Store.NO));
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new IntField("foo", 5, Field.Store.NO));
doc.add(new IntField("foo", -3, Field.Store.NO));
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
doc.add(new LegacyIntField("foo", -3, Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -489,10 +489,10 @@ public class TestDocTermOrds extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(-3, NumericUtils.prefixCodedToInt(value));
assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value));
value = v.lookupOrd(1);
assertEquals(5, NumericUtils.prefixCodedToInt(value));
assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value));
ir.close();
dir.close();
@ -503,12 +503,12 @@ public class TestDocTermOrds extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new LongField("foo", 5, Field.Store.NO));
doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new LongField("foo", 5, Field.Store.NO));
doc.add(new LongField("foo", -3, Field.Store.NO));
doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
doc.add(new LegacyLongField("foo", -3, Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -530,10 +530,10 @@ public class TestDocTermOrds extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(-3, NumericUtils.prefixCodedToLong(value));
assertEquals(-3, LegacyNumericUtils.prefixCodedToLong(value));
value = v.lookupOrd(1);
assertEquals(5, NumericUtils.prefixCodedToLong(value));
assertEquals(5, LegacyNumericUtils.prefixCodedToLong(value));
ir.close();
dir.close();

View File

@ -30,12 +30,12 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedSetDocValuesField;
@ -57,8 +57,8 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.TestUtil;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@ -88,16 +88,16 @@ public class TestFieldCache extends LuceneTestCase {
}
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
doc.add(new LongField("theLong", theLong--, Field.Store.NO));
doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
doc.add(new IntField("theInt", theInt--, Field.Store.NO));
doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
doc.add(new LegacyLongField("theLong", theLong--, Field.Store.NO));
doc.add(new LegacyDoubleField("theDouble", theDouble--, Field.Store.NO));
doc.add(new LegacyIntField("theInt", theInt--, Field.Store.NO));
doc.add(new LegacyFloatField("theFloat", theFloat--, Field.Store.NO));
if (i%2 == 0) {
doc.add(new IntField("sparse", i, Field.Store.NO));
doc.add(new LegacyIntField("sparse", i, Field.Store.NO));
}
if (i%2 == 0) {
doc.add(new IntField("numInt", i, Field.Store.NO));
doc.add(new LegacyIntField("numInt", i, Field.Store.NO));
}
// sometimes skip the field:
@ -142,11 +142,11 @@ public class TestFieldCache extends LuceneTestCase {
cache.getNumerics(reader, "theDouble", new FieldCache.Parser() {
@Override
public TermsEnum termsEnum(Terms terms) throws IOException {
return NumericUtils.filterPrefixCodedLongs(terms.iterator());
return LegacyNumericUtils.filterPrefixCodedLongs(terms.iterator());
}
@Override
public long parseValue(BytesRef term) {
int val = (int) NumericUtils.prefixCodedToLong(term);
int val = (int) LegacyNumericUtils.prefixCodedToLong(term);
if (val<0) val ^= 0x7fffffff;
return val;
}
@ -672,7 +672,7 @@ public class TestFieldCache extends LuceneTestCase {
cfg.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
Document doc = new Document();
LongField field = new LongField("f", 0L, Store.YES);
LegacyLongField field = new LegacyLongField("f", 0L, Store.YES);
doc.add(field);
final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
for (int i = 0; i < values.length; ++i) {
@ -718,7 +718,7 @@ public class TestFieldCache extends LuceneTestCase {
cfg.setMergePolicy(newLogMergePolicy());
RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
Document doc = new Document();
IntField field = new IntField("f", 0, Store.YES);
LegacyIntField field = new LegacyIntField("f", 0, Store.YES);
doc.add(field);
final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
for (int i = 0; i < values.length; ++i) {

View File

@ -20,7 +20,7 @@ package org.apache.lucene.uninverting;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
@ -43,7 +43,7 @@ public class TestFieldCacheReopen extends LuceneTestCase {
setMergePolicy(newLogMergePolicy(10))
);
Document doc = new Document();
doc.add(new IntField("number", 17, Field.Store.NO));
doc.add(new LegacyIntField("number", 17, Field.Store.NO));
writer.addDocument(doc);
writer.commit();

View File

@ -20,11 +20,11 @@ import java.io.IOException;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
@ -59,10 +59,10 @@ public class TestFieldCacheSanityChecker extends LuceneTestCase {
float theFloat = Float.MAX_VALUE;
for (int i = 0; i < NUM_DOCS; i++){
Document doc = new Document();
doc.add(new LongField("theLong", theLong--, Field.Store.NO));
doc.add(new DoubleField("theDouble", theDouble--, Field.Store.NO));
doc.add(new IntField("theInt", theInt--, Field.Store.NO));
doc.add(new FloatField("theFloat", theFloat--, Field.Store.NO));
doc.add(new LegacyLongField("theLong", theLong--, Field.Store.NO));
doc.add(new LegacyDoubleField("theDouble", theDouble--, Field.Store.NO));
doc.add(new LegacyIntField("theInt", theInt--, Field.Store.NO));
doc.add(new LegacyFloatField("theFloat", theFloat--, Field.Store.NO));
if (0 == i % 3) {
wA.addDocument(doc);
} else {

View File

@ -24,11 +24,11 @@ import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
@ -450,13 +450,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new IntField("value", 300000, Field.Store.YES));
doc.add(new LegacyIntField("value", 300000, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", -1, Field.Store.YES));
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", 4, Field.Store.YES));
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.INTEGER));
@ -483,10 +483,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", -1, Field.Store.YES));
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", 4, Field.Store.YES));
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.INTEGER));
@ -513,10 +513,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", -1, Field.Store.YES));
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", 4, Field.Store.YES));
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.INTEGER));
@ -543,13 +543,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new IntField("value", 300000, Field.Store.YES));
doc.add(new LegacyIntField("value", 300000, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", -1, Field.Store.YES));
doc.add(new LegacyIntField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new IntField("value", 4, Field.Store.YES));
doc.add(new LegacyIntField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.INTEGER));
@ -574,13 +574,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new LongField("value", 3000000000L, Field.Store.YES));
doc.add(new LegacyLongField("value", 3000000000L, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", -1, Field.Store.YES));
doc.add(new LegacyLongField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", 4, Field.Store.YES));
doc.add(new LegacyLongField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.LONG));
@ -607,10 +607,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", -1, Field.Store.YES));
doc.add(new LegacyLongField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", 4, Field.Store.YES));
doc.add(new LegacyLongField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.LONG));
@ -637,10 +637,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", -1, Field.Store.YES));
doc.add(new LegacyLongField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", 4, Field.Store.YES));
doc.add(new LegacyLongField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.LONG));
@ -667,13 +667,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new LongField("value", 3000000000L, Field.Store.YES));
doc.add(new LegacyLongField("value", 3000000000L, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", -1, Field.Store.YES));
doc.add(new LegacyLongField("value", -1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new LongField("value", 4, Field.Store.YES));
doc.add(new LegacyLongField("value", 4, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.LONG));
@ -698,13 +698,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new FloatField("value", 30.1f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 30.1f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", -1.3f, Field.Store.YES));
doc.add(new LegacyFloatField("value", -1.3f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", 4.2f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 4.2f, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.FLOAT));
@ -731,10 +731,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", -1.3f, Field.Store.YES));
doc.add(new LegacyFloatField("value", -1.3f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", 4.2f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 4.2f, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.FLOAT));
@ -761,10 +761,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", -1.3f, Field.Store.YES));
doc.add(new LegacyFloatField("value", -1.3f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", 4.2f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 4.2f, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.FLOAT));
@ -791,13 +791,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new FloatField("value", 30.1f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 30.1f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", -1.3f, Field.Store.YES));
doc.add(new LegacyFloatField("value", -1.3f, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new FloatField("value", 4.2f, Field.Store.YES));
doc.add(new LegacyFloatField("value", 4.2f, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.FLOAT));
@ -822,16 +822,16 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleField("value", 30.1, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 30.1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", -1.3, Field.Store.YES));
doc.add(new LegacyDoubleField("value", -1.3, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333333, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333332, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.DOUBLE));
@ -857,10 +857,10 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleField("value", +0d, Field.Store.YES));
doc.add(new LegacyDoubleField("value", +0d, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", -0d, Field.Store.YES));
doc.add(new LegacyDoubleField("value", -0d, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
@ -892,13 +892,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", -1.3, Field.Store.YES));
doc.add(new LegacyDoubleField("value", -1.3, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333333, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333332, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.DOUBLE));
@ -926,13 +926,13 @@ public class TestFieldCacheSort extends LuceneTestCase {
Document doc = new Document();
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", -1.3, Field.Store.YES));
doc.add(new LegacyDoubleField("value", -1.3, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333333, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333332, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.DOUBLE));
@ -960,16 +960,16 @@ public class TestFieldCacheSort extends LuceneTestCase {
Directory dir = newDirectory();
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
Document doc = new Document();
doc.add(new DoubleField("value", 30.1, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 30.1, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", -1.3, Field.Store.YES));
doc.add(new LegacyDoubleField("value", -1.3, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333333, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333333, Field.Store.YES));
writer.addDocument(doc);
doc = new Document();
doc.add(new DoubleField("value", 4.2333333333332, Field.Store.YES));
doc.add(new LegacyDoubleField("value", 4.2333333333332, Field.Store.YES));
writer.addDocument(doc);
IndexReader ir = UninvertingReader.wrap(writer.getReader(),
Collections.singletonMap("value", Type.DOUBLE));
@ -1050,7 +1050,7 @@ public class TestFieldCacheSort extends LuceneTestCase {
for(int seg=0;seg<2;seg++) {
for(int docIDX=0;docIDX<10;docIDX++) {
Document doc = new Document();
doc.add(new IntField("id", docIDX, Field.Store.YES));
doc.add(new LegacyIntField("id", docIDX, Field.Store.YES));
StringBuilder sb = new StringBuilder();
for(int i=0;i<id;i++) {
sb.append(' ');

View File

@ -31,7 +31,7 @@ import java.util.Set;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
@ -119,7 +119,7 @@ public class TestFieldCacheSortRandom extends LuceneTestCase {
docValues.add(null);
}
doc.add(new IntField("id", numDocs, Field.Store.YES));
doc.add(new LegacyIntField("id", numDocs, Field.Store.YES));
writer.addDocument(doc);
numDocs++;

View File

@ -23,11 +23,11 @@ import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
@ -63,7 +63,7 @@ public class TestNumericTerms32 extends LuceneTestCase {
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
.setMergePolicy(newLogMergePolicy()));
final FieldType storedInt = new FieldType(IntField.TYPE_NOT_STORED);
final FieldType storedInt = new FieldType(LegacyIntField.TYPE_NOT_STORED);
storedInt.setStored(true);
storedInt.freeze();
@ -76,10 +76,10 @@ public class TestNumericTerms32 extends LuceneTestCase {
final FieldType storedInt2 = new FieldType(storedInt);
storedInt2.setNumericPrecisionStep(2);
IntField
field8 = new IntField("field8", 0, storedInt8),
field4 = new IntField("field4", 0, storedInt4),
field2 = new IntField("field2", 0, storedInt2);
LegacyIntField
field8 = new LegacyIntField("field8", 0, storedInt8),
field4 = new LegacyIntField("field4", 0, storedInt4),
field2 = new LegacyIntField("field2", 0, storedInt2);
Document doc = new Document();
// add fields, that have a distance to test general functionality
@ -126,7 +126,7 @@ public class TestNumericTerms32 extends LuceneTestCase {
if (lower>upper) {
int a=lower; lower=upper; upper=a;
}
Query tq=NumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true);
TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.INT, true)));
if (topDocs.totalHits==0) continue;
ScoreDoc[] sd = topDocs.scoreDocs;

View File

@ -23,11 +23,11 @@ import java.util.Map;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
@ -63,7 +63,7 @@ public class TestNumericTerms64 extends LuceneTestCase {
.setMaxBufferedDocs(TestUtil.nextInt(random(), 100, 1000))
.setMergePolicy(newLogMergePolicy()));
final FieldType storedLong = new FieldType(LongField.TYPE_NOT_STORED);
final FieldType storedLong = new FieldType(LegacyLongField.TYPE_NOT_STORED);
storedLong.setStored(true);
storedLong.freeze();
@ -79,11 +79,11 @@ public class TestNumericTerms64 extends LuceneTestCase {
final FieldType storedLong2 = new FieldType(storedLong);
storedLong2.setNumericPrecisionStep(2);
LongField
field8 = new LongField("field8", 0L, storedLong8),
field6 = new LongField("field6", 0L, storedLong6),
field4 = new LongField("field4", 0L, storedLong4),
field2 = new LongField("field2", 0L, storedLong2);
LegacyLongField
field8 = new LegacyLongField("field8", 0L, storedLong8),
field6 = new LegacyLongField("field6", 0L, storedLong6),
field4 = new LegacyLongField("field4", 0L, storedLong4),
field2 = new LegacyLongField("field2", 0L, storedLong2);
Document doc = new Document();
// add fields, that have a distance to test general functionality
@ -131,7 +131,7 @@ public class TestNumericTerms64 extends LuceneTestCase {
if (lower>upper) {
long a=lower; lower=upper; upper=a;
}
Query tq=NumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
Query tq= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true);
TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.LONG, true)));
if (topDocs.totalHits==0) continue;
ScoreDoc[] sd = topDocs.scoreDocs;

View File

@ -28,8 +28,8 @@ import java.util.Collections;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
@ -41,7 +41,7 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.uninverting.UninvertingReader.Type;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.util.TestUtil;
public class TestUninvertingReader extends LuceneTestCase {
@ -51,12 +51,12 @@ public class TestUninvertingReader extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new IntField("foo", 5, Field.Store.NO));
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new IntField("foo", 5, Field.Store.NO));
doc.add(new IntField("foo", -3, Field.Store.NO));
doc.add(new LegacyIntField("foo", 5, Field.Store.NO));
doc.add(new LegacyIntField("foo", -3, Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -78,10 +78,10 @@ public class TestUninvertingReader extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(-3, NumericUtils.prefixCodedToInt(value));
assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value));
value = v.lookupOrd(1);
assertEquals(5, NumericUtils.prefixCodedToInt(value));
assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value));
TestUtil.checkReader(ir);
ir.close();
dir.close();
@ -92,12 +92,12 @@ public class TestUninvertingReader extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new IntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
doc.add(new IntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO));
doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO));
doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -120,10 +120,10 @@ public class TestUninvertingReader extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(Float.floatToRawIntBits(-3f), NumericUtils.prefixCodedToInt(value));
assertEquals(Float.floatToRawIntBits(-3f), LegacyNumericUtils.prefixCodedToInt(value));
value = v.lookupOrd(1);
assertEquals(Float.floatToRawIntBits(5f), NumericUtils.prefixCodedToInt(value));
assertEquals(Float.floatToRawIntBits(5f), LegacyNumericUtils.prefixCodedToInt(value));
TestUtil.checkReader(ir);
ir.close();
dir.close();
@ -134,12 +134,12 @@ public class TestUninvertingReader extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new LongField("foo", 5, Field.Store.NO));
doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new LongField("foo", 5, Field.Store.NO));
doc.add(new LongField("foo", -3, Field.Store.NO));
doc.add(new LegacyLongField("foo", 5, Field.Store.NO));
doc.add(new LegacyLongField("foo", -3, Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -161,10 +161,10 @@ public class TestUninvertingReader extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(-3, NumericUtils.prefixCodedToLong(value));
assertEquals(-3, LegacyNumericUtils.prefixCodedToLong(value));
value = v.lookupOrd(1);
assertEquals(5, NumericUtils.prefixCodedToLong(value));
assertEquals(5, LegacyNumericUtils.prefixCodedToLong(value));
TestUtil.checkReader(ir);
ir.close();
dir.close();
@ -175,12 +175,12 @@ public class TestUninvertingReader extends LuceneTestCase {
IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
Document doc = new Document();
doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
iw.addDocument(doc);
doc = new Document();
doc.add(new LongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
doc.add(new LongField("foo", Double.doubleToRawLongBits(-3d), Field.Store.NO));
doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(5d), Field.Store.NO));
doc.add(new LegacyLongField("foo", Double.doubleToRawLongBits(-3d), Field.Store.NO));
iw.addDocument(doc);
iw.forceMerge(1);
@ -202,10 +202,10 @@ public class TestUninvertingReader extends LuceneTestCase {
assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd());
BytesRef value = v.lookupOrd(0);
assertEquals(Double.doubleToRawLongBits(-3d), NumericUtils.prefixCodedToLong(value));
assertEquals(Double.doubleToRawLongBits(-3d), LegacyNumericUtils.prefixCodedToLong(value));
value = v.lookupOrd(1);
assertEquals(Double.doubleToRawLongBits(5d), NumericUtils.prefixCodedToLong(value));
assertEquals(Double.doubleToRawLongBits(5d), LegacyNumericUtils.prefixCodedToLong(value));
TestUtil.checkReader(ir);
ir.close();
dir.close();
@ -217,7 +217,7 @@ public class TestUninvertingReader extends LuceneTestCase {
final Directory dir = newDirectory();
final IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
final FieldType NO_TRIE_TYPE = new FieldType(IntField.TYPE_NOT_STORED);
final FieldType NO_TRIE_TYPE = new FieldType(LegacyIntField.TYPE_NOT_STORED);
NO_TRIE_TYPE.setNumericPrecisionStep(Integer.MAX_VALUE);
final Map<String,Type> UNINVERT_MAP = new LinkedHashMap<String,Type>();
@ -238,8 +238,8 @@ public class TestUninvertingReader extends LuceneTestCase {
{ // (at least) one doc should have every value, so that at least one segment has every value
final Document doc = new Document();
for (int i = MIN; i <= MAX; i++) {
doc.add(new IntField("trie_multi", i, Field.Store.NO));
doc.add(new IntField("notrie_multi", i, NO_TRIE_TYPE));
doc.add(new LegacyIntField("trie_multi", i, Field.Store.NO));
doc.add(new LegacyIntField("notrie_multi", i, NO_TRIE_TYPE));
}
iw.addDocument(doc);
}
@ -249,15 +249,15 @@ public class TestUninvertingReader extends LuceneTestCase {
final Document doc = new Document();
if (0 != TestUtil.nextInt(random(), 0, 9)) {
int val = TestUtil.nextInt(random(), MIN, MAX);
doc.add(new IntField("trie_single", val, Field.Store.NO));
doc.add(new IntField("notrie_single", val, NO_TRIE_TYPE));
doc.add(new LegacyIntField("trie_single", val, Field.Store.NO));
doc.add(new LegacyIntField("notrie_single", val, NO_TRIE_TYPE));
}
if (0 != TestUtil.nextInt(random(), 0, 9)) {
int numMulti = atLeast(1);
while (0 < numMulti--) {
int val = TestUtil.nextInt(random(), MIN, MAX);
doc.add(new IntField("trie_multi", val, Field.Store.NO));
doc.add(new IntField("notrie_multi", val, NO_TRIE_TYPE));
doc.add(new LegacyIntField("trie_multi", val, Field.Store.NO));
doc.add(new LegacyIntField("notrie_multi", val, NO_TRIE_TYPE));
}
}
iw.addDocument(doc);

View File

@ -5,8 +5,8 @@ import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.TextField;
@ -143,11 +143,11 @@ public abstract class FunctionTestSetup extends LuceneTestCase {
f = newField(TEXT_FIELD, "text of doc" + scoreAndID + textLine(i), customType2); // for regular search
d.add(f);
f = new IntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
f = new LegacyIntField(INT_FIELD, scoreAndID, Store.YES); // for function scoring
d.add(f);
d.add(new NumericDocValuesField(INT_FIELD, scoreAndID));
f = new FloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
f = new LegacyFloatField(FLOAT_FIELD, scoreAndID, Store.YES); // for function scoring
d.add(f);
d.add(new NumericDocValuesField(FLOAT_FIELD, Float.floatToRawIntBits(scoreAndID)));

View File

@ -21,7 +21,7 @@ import java.io.IOException;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriterConfig;
@ -103,7 +103,7 @@ public class TestFunctionQuerySort extends LuceneTestCase {
RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
Document doc = new Document();
Field field = new IntField("value", 0, Field.Store.YES);
Field field = new LegacyIntField("value", 0, Field.Store.YES);
Field dvField = new NumericDocValuesField("value", 0);
doc.add(field);
doc.add(dvField);

View File

@ -25,11 +25,11 @@ import java.io.IOException;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.StringField;
@ -121,19 +121,19 @@ public class TestValueSources extends LuceneTestCase {
document.add(idField);
Field idDVField = new SortedDocValuesField("id", new BytesRef());
document.add(idDVField);
Field doubleField = new DoubleField("double", 0d, Field.Store.NO);
Field doubleField = new LegacyDoubleField("double", 0d, Field.Store.NO);
document.add(doubleField);
Field doubleDVField = new NumericDocValuesField("double", 0);
document.add(doubleDVField);
Field floatField = new FloatField("float", 0f, Field.Store.NO);
Field floatField = new LegacyFloatField("float", 0f, Field.Store.NO);
document.add(floatField);
Field floatDVField = new NumericDocValuesField("float", 0);
document.add(floatDVField);
Field intField = new IntField("int", 0, Field.Store.NO);
Field intField = new LegacyIntField("int", 0, Field.Store.NO);
document.add(intField);
Field intDVField = new NumericDocValuesField("int", 0);
document.add(intDVField);
Field longField = new LongField("long", 0L, Field.Store.NO);
Field longField = new LegacyLongField("long", 0L, Field.Store.NO);
document.add(longField);
Field longDVField = new NumericDocValuesField("long", 0);
document.add(longDVField);

View File

@ -17,7 +17,7 @@ package org.apache.lucene.queryparser.flexible.standard.builders;
* limitations under the License.
*/
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
import org.apache.lucene.queryparser.flexible.core.nodes.QueryNode;
@ -26,12 +26,12 @@ import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
import org.apache.lucene.queryparser.flexible.standard.config.NumericConfig;
import org.apache.lucene.queryparser.flexible.standard.nodes.NumericQueryNode;
import org.apache.lucene.queryparser.flexible.standard.nodes.NumericRangeQueryNode;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.LegacyNumericRangeQuery;
/**
* Builds {@link NumericRangeQuery}s out of {@link NumericRangeQueryNode}s.
* Builds {@link org.apache.lucene.search.LegacyNumericRangeQuery}s out of {@link NumericRangeQueryNode}s.
*
* @see NumericRangeQuery
* @see org.apache.lucene.search.LegacyNumericRangeQuery
* @see NumericRangeQueryNode
*/
public class NumericRangeQueryNodeBuilder implements StandardQueryBuilder {
@ -44,7 +44,7 @@ public class NumericRangeQueryNodeBuilder implements StandardQueryBuilder {
}
@Override
public NumericRangeQuery<? extends Number> build(QueryNode queryNode)
public LegacyNumericRangeQuery<? extends Number> build(QueryNode queryNode)
throws QueryNodeException {
NumericRangeQueryNode numericRangeNode = (NumericRangeQueryNode) queryNode;
@ -55,7 +55,7 @@ public class NumericRangeQueryNodeBuilder implements StandardQueryBuilder {
Number upperNumber = upperNumericNode.getValue();
NumericConfig numericConfig = numericRangeNode.getNumericConfig();
NumericType numberType = numericConfig.getType();
FieldType.LegacyNumericType numberType = numericConfig.getType();
String field = StringUtils.toString(numericRangeNode.getField());
boolean minInclusive = numericRangeNode.isLowerInclusive();
boolean maxInclusive = numericRangeNode.isUpperInclusive();
@ -64,21 +64,21 @@ public class NumericRangeQueryNodeBuilder implements StandardQueryBuilder {
switch (numberType) {
case LONG:
return NumericRangeQuery.newLongRange(field, precisionStep,
return LegacyNumericRangeQuery.newLongRange(field, precisionStep,
(Long) lowerNumber, (Long) upperNumber, minInclusive, maxInclusive);
case INT:
return NumericRangeQuery.newIntRange(field, precisionStep,
return LegacyNumericRangeQuery.newIntRange(field, precisionStep,
(Integer) lowerNumber, (Integer) upperNumber, minInclusive,
maxInclusive);
case FLOAT:
return NumericRangeQuery.newFloatRange(field, precisionStep,
return LegacyNumericRangeQuery.newFloatRange(field, precisionStep,
(Float) lowerNumber, (Float) upperNumber, minInclusive,
maxInclusive);
case DOUBLE:
return NumericRangeQuery.newDoubleRange(field, precisionStep,
return LegacyNumericRangeQuery.newDoubleRange(field, precisionStep,
(Double) lowerNumber, (Double) upperNumber, minInclusive,
maxInclusive);

View File

@ -20,14 +20,14 @@ package org.apache.lucene.queryparser.flexible.standard.config;
import java.text.NumberFormat;
import java.util.Objects;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
/**
* This class holds the configuration used to parse numeric queries and create
* {@link NumericRangeQuery}s.
* {@link org.apache.lucene.search.LegacyNumericRangeQuery}s.
*
* @see NumericRangeQuery
* @see org.apache.lucene.search.LegacyNumericRangeQuery
* @see NumberFormat
*/
public class NumericConfig {
@ -36,7 +36,7 @@ public class NumericConfig {
private NumberFormat format;
private NumericType type;
private FieldType.LegacyNumericType type;
/**
* Constructs a {@link NumericConfig} object.
@ -51,10 +51,10 @@ public class NumericConfig {
*
* @see NumericConfig#setPrecisionStep(int)
* @see NumericConfig#setNumberFormat(NumberFormat)
* @see #setType(org.apache.lucene.document.FieldType.NumericType)
* @see #setType(org.apache.lucene.document.FieldType.LegacyNumericType)
*/
public NumericConfig(int precisionStep, NumberFormat format,
NumericType type) {
LegacyNumericType type) {
setPrecisionStep(precisionStep);
setNumberFormat(format);
setType(type);
@ -66,7 +66,7 @@ public class NumericConfig {
*
* @return the precision used to index the numeric values
*
* @see NumericRangeQuery#getPrecisionStep()
* @see org.apache.lucene.search.LegacyNumericRangeQuery#getPrecisionStep()
*/
public int getPrecisionStep() {
return precisionStep;
@ -78,7 +78,7 @@ public class NumericConfig {
* @param precisionStep
* the precision used to index the numeric values
*
* @see NumericRangeQuery#getPrecisionStep()
* @see org.apache.lucene.search.LegacyNumericRangeQuery#getPrecisionStep()
*/
public void setPrecisionStep(int precisionStep) {
this.precisionStep = precisionStep;
@ -100,7 +100,7 @@ public class NumericConfig {
*
* @return the numeric type used to index the numeric values
*/
public NumericType getType() {
public LegacyNumericType getType() {
return type;
}
@ -109,7 +109,7 @@ public class NumericConfig {
*
* @param type the numeric type used to index the numeric values
*/
public void setType(NumericType type) {
public void setType(LegacyNumericType type) {
if (type == null) {
throw new IllegalArgumentException("type cannot be null!");

View File

@ -17,10 +17,10 @@ package org.apache.lucene.queryparser.flexible.standard.nodes;
* the License.
*/
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
import org.apache.lucene.queryparser.flexible.core.messages.QueryParserMessages;
import org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode;
import org.apache.lucene.queryparser.flexible.messages.MessageImpl;
import org.apache.lucene.queryparser.flexible.standard.config.NumericConfig;
@ -53,16 +53,16 @@ public class NumericRangeQueryNode extends
setBounds(lower, upper, lowerInclusive, upperInclusive, numericConfig);
}
private static NumericType getNumericDataType(Number number) throws QueryNodeException {
private static LegacyNumericType getNumericDataType(Number number) throws QueryNodeException {
if (number instanceof Long) {
return NumericType.LONG;
return FieldType.LegacyNumericType.LONG;
} else if (number instanceof Integer) {
return NumericType.INT;
return FieldType.LegacyNumericType.INT;
} else if (number instanceof Double) {
return NumericType.DOUBLE;
return LegacyNumericType.DOUBLE;
} else if (number instanceof Float) {
return NumericType.FLOAT;
return FieldType.LegacyNumericType.FLOAT;
} else {
throw new QueryNodeException(
new MessageImpl(
@ -90,7 +90,7 @@ public class NumericRangeQueryNode extends
throw new IllegalArgumentException("numericConfig cannot be null!");
}
NumericType lowerNumberType, upperNumberType;
LegacyNumericType lowerNumberType, upperNumberType;
if (lower != null && lower.getValue() != null) {
lowerNumberType = getNumericDataType(lower.getValue());

View File

@ -67,7 +67,7 @@ public class CoreParser implements QueryBuilder {
queryFactory.addBuilder("TermsQuery", new TermsQueryBuilder(analyzer));
queryFactory.addBuilder("MatchAllDocsQuery", new MatchAllDocsQueryBuilder());
queryFactory.addBuilder("BooleanQuery", new BooleanQueryBuilder(queryFactory));
queryFactory.addBuilder("NumericRangeQuery", new NumericRangeQueryBuilder());
queryFactory.addBuilder("LegacyNumericRangeQuery", new LegacyNumericRangeQueryBuilder());
queryFactory.addBuilder("RangeQuery", new RangeQueryBuilder());
queryFactory.addBuilder("DisjunctionMaxQuery", new DisjunctionMaxQueryBuilder(queryFactory));
if (parser != null) {

View File

@ -17,19 +17,19 @@ package org.apache.lucene.queryparser.xml.builders;
* limitations under the License.
*/
import org.apache.lucene.search.NumericRangeQuery;
import org.apache.lucene.search.LegacyNumericRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.LegacyNumericUtils;
import org.apache.lucene.queryparser.xml.DOMUtils;
import org.apache.lucene.queryparser.xml.ParserException;
import org.apache.lucene.queryparser.xml.QueryBuilder;
import org.w3c.dom.Element;
/**
* Creates a {@link NumericRangeQuery}. The table below specifies the required
* Creates a {@link org.apache.lucene.search.LegacyNumericRangeQuery}. The table below specifies the required
* attributes and the defaults if optional attributes are omitted. For more
* detail on what each of the attributes actually do, consult the documentation
* for {@link NumericRangeQuery}:
* for {@link org.apache.lucene.search.LegacyNumericRangeQuery}:
* <table summary="supported attributes">
* <tr>
* <th>Attribute name</th>
@ -85,7 +85,7 @@ import org.w3c.dom.Element;
* supplied <tt>lowerTerm</tt> or <tt>upperTerm</tt> into the numeric type
* specified by <tt>type</tt>.
*/
public class NumericRangeQueryBuilder implements QueryBuilder {
public class LegacyNumericRangeQueryBuilder implements QueryBuilder {
@Override
public Query getQuery(Element e) throws ParserException {
@ -94,26 +94,26 @@ public class NumericRangeQueryBuilder implements QueryBuilder {
String upperTerm = DOMUtils.getAttributeOrFail(e, "upperTerm");
boolean lowerInclusive = DOMUtils.getAttribute(e, "includeLower", true);
boolean upperInclusive = DOMUtils.getAttribute(e, "includeUpper", true);
int precisionStep = DOMUtils.getAttribute(e, "precisionStep", NumericUtils.PRECISION_STEP_DEFAULT);
int precisionStep = DOMUtils.getAttribute(e, "precisionStep", LegacyNumericUtils.PRECISION_STEP_DEFAULT);
String type = DOMUtils.getAttribute(e, "type", "int");
try {
Query filter;
if (type.equalsIgnoreCase("int")) {
filter = NumericRangeQuery.newIntRange(field, precisionStep, Integer
.valueOf(lowerTerm), Integer.valueOf(upperTerm), lowerInclusive,
filter = LegacyNumericRangeQuery.newIntRange(field, precisionStep, Integer
.valueOf(lowerTerm), Integer.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("long")) {
filter = NumericRangeQuery.newLongRange(field, precisionStep, Long
.valueOf(lowerTerm), Long.valueOf(upperTerm), lowerInclusive,
filter = LegacyNumericRangeQuery.newLongRange(field, precisionStep, Long
.valueOf(lowerTerm), Long.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("double")) {
filter = NumericRangeQuery.newDoubleRange(field, precisionStep, Double
.valueOf(lowerTerm), Double.valueOf(upperTerm), lowerInclusive,
filter = LegacyNumericRangeQuery.newDoubleRange(field, precisionStep, Double
.valueOf(lowerTerm), Double.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else if (type.equalsIgnoreCase("float")) {
filter = NumericRangeQuery.newFloatRange(field, precisionStep, Float
.valueOf(lowerTerm), Float.valueOf(upperTerm), lowerInclusive,
filter = LegacyNumericRangeQuery.newFloatRange(field, precisionStep, Float
.valueOf(lowerTerm), Float.valueOf(upperTerm), lowerInclusive,
upperInclusive);
} else {
throw new ParserException("type attribute must be one of: [long, int, double, float]");

View File

@ -288,7 +288,7 @@ Passes content directly through to the standard LuceneQuery parser see "Lucene Q
@example
<em>Search for documents about people who are aged 20-25</em>
%
<NumericRangeQuery fieldName="age" lowerTerm="20" upperTerm="25" />
<LegacyNumericRangeQuery fieldName="age" lowerTerm="20" upperTerm="25" />
%
-->
<!ELEMENT NumericRangeQuery EMPTY>

View File

@ -33,13 +33,13 @@ import java.util.TimeZone;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.DoubleField;
import org.apache.lucene.document.LegacyDoubleField;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FieldType.NumericType;
import org.apache.lucene.document.FieldType.LegacyNumericType;
import org.apache.lucene.document.FieldType;
import org.apache.lucene.document.FloatField;
import org.apache.lucene.document.IntField;
import org.apache.lucene.document.LongField;
import org.apache.lucene.document.LegacyFloatField;
import org.apache.lucene.document.LegacyIntField;
import org.apache.lucene.document.LegacyLongField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.queryparser.flexible.core.QueryNodeException;
@ -179,10 +179,10 @@ public class TestNumericQueryParser extends LuceneTestCase {
while ((randomInt = normalizeNumber(Math.abs(random().nextInt())).intValue()) == 0)
;
randomNumberMap.put(NumericType.LONG.name(), randomLong);
randomNumberMap.put(NumericType.INT.name(), randomInt);
randomNumberMap.put(NumericType.FLOAT.name(), randomFloat);
randomNumberMap.put(NumericType.DOUBLE.name(), randomDouble);
randomNumberMap.put(LegacyNumericType.LONG.name(), randomLong);
randomNumberMap.put(FieldType.LegacyNumericType.INT.name(), randomInt);
randomNumberMap.put(LegacyNumericType.FLOAT.name(), randomFloat);
randomNumberMap.put(LegacyNumericType.DOUBLE.name(), randomDouble);
randomNumberMap.put(DATE_FIELD_NAME, randomDate);
RANDOM_NUMBER_MAP = Collections.unmodifiableMap(randomNumberMap);
@ -198,11 +198,11 @@ public class TestNumericQueryParser extends LuceneTestCase {
HashMap<String,Field> numericFieldMap = new HashMap<>();
qp.setNumericConfigMap(numericConfigMap);
for (NumericType type : NumericType.values()) {
for (LegacyNumericType type : LegacyNumericType.values()) {
numericConfigMap.put(type.name(), new NumericConfig(PRECISION_STEP,
NUMBER_FORMAT, type));
FieldType ft = new FieldType(IntField.TYPE_NOT_STORED);
FieldType ft = new FieldType(LegacyIntField.TYPE_NOT_STORED);
ft.setNumericType(type);
ft.setStored(true);
ft.setNumericPrecisionStep(PRECISION_STEP);
@ -211,16 +211,16 @@ public class TestNumericQueryParser extends LuceneTestCase {
switch(type) {
case INT:
field = new IntField(type.name(), 0, ft);
field = new LegacyIntField(type.name(), 0, ft);
break;
case FLOAT:
field = new FloatField(type.name(), 0.0f, ft);
field = new LegacyFloatField(type.name(), 0.0f, ft);
break;
case LONG:
field = new LongField(type.name(), 0l, ft);
field = new LegacyLongField(type.name(), 0l, ft);
break;
case DOUBLE:
field = new DoubleField(type.name(), 0.0, ft);
field = new LegacyDoubleField(type.name(), 0.0, ft);
break;
default:
fail();
@ -231,11 +231,11 @@ public class TestNumericQueryParser extends LuceneTestCase {
}
numericConfigMap.put(DATE_FIELD_NAME, new NumericConfig(PRECISION_STEP,
DATE_FORMAT, NumericType.LONG));
FieldType ft = new FieldType(LongField.TYPE_NOT_STORED);
DATE_FORMAT, LegacyNumericType.LONG));
FieldType ft = new FieldType(LegacyLongField.TYPE_NOT_STORED);
ft.setStored(true);
ft.setNumericPrecisionStep(PRECISION_STEP);
LongField dateField = new LongField(DATE_FIELD_NAME, 0l, ft);
LegacyLongField dateField = new LegacyLongField(DATE_FIELD_NAME, 0l, ft);
numericFieldMap.put(DATE_FIELD_NAME, dateField);
doc.add(dateField);
@ -265,17 +265,17 @@ public class TestNumericQueryParser extends LuceneTestCase {
case NEGATIVE:
Number number = RANDOM_NUMBER_MAP.get(fieldName);
if (NumericType.LONG.name().equals(fieldName)
if (LegacyNumericType.LONG.name().equals(fieldName)
|| DATE_FIELD_NAME.equals(fieldName)) {
number = -number.longValue();
} else if (NumericType.DOUBLE.name().equals(fieldName)) {
} else if (FieldType.LegacyNumericType.DOUBLE.name().equals(fieldName)) {
number = -number.doubleValue();
} else if (NumericType.FLOAT.name().equals(fieldName)) {
} else if (FieldType.LegacyNumericType.FLOAT.name().equals(fieldName)) {
number = -number.floatValue();
} else if (NumericType.INT.name().equals(fieldName)) {
} else if (LegacyNumericType.INT.name().equals(fieldName)) {
number = -number.intValue();
} else {
@ -295,21 +295,21 @@ public class TestNumericQueryParser extends LuceneTestCase {
private static void setFieldValues(NumberType numberType,
HashMap<String,Field> numericFieldMap) {
Number number = getNumberType(numberType, NumericType.DOUBLE
Number number = getNumberType(numberType, LegacyNumericType.DOUBLE
.name());
numericFieldMap.get(NumericType.DOUBLE.name()).setDoubleValue(
numericFieldMap.get(LegacyNumericType.DOUBLE.name()).setDoubleValue(
number.doubleValue());
number = getNumberType(numberType, NumericType.INT.name());
numericFieldMap.get(NumericType.INT.name()).setIntValue(
number = getNumberType(numberType, FieldType.LegacyNumericType.INT.name());
numericFieldMap.get(FieldType.LegacyNumericType.INT.name()).setIntValue(
number.intValue());
number = getNumberType(numberType, NumericType.LONG.name());
numericFieldMap.get(NumericType.LONG.name()).setLongValue(
number = getNumberType(numberType, LegacyNumericType.LONG.name());
numericFieldMap.get(FieldType.LegacyNumericType.LONG.name()).setLongValue(
number.longValue());
number = getNumberType(numberType, NumericType.FLOAT.name());
numericFieldMap.get(NumericType.FLOAT.name()).setFloatValue(
number = getNumberType(numberType, FieldType.LegacyNumericType.FLOAT.name());
numericFieldMap.get(FieldType.LegacyNumericType.FLOAT.name()).setFloatValue(
number.floatValue());
number = getNumberType(numberType, DATE_FIELD_NAME);
@ -411,7 +411,7 @@ public class TestNumericQueryParser extends LuceneTestCase {
String lowerInclusiveStr = (lowerInclusive ? "[" : "{");
String upperInclusiveStr = (upperInclusive ? "]" : "}");
for (NumericType type : NumericType.values()) {
for (LegacyNumericType type : LegacyNumericType.values()) {
String lowerStr = numberToString(getNumberType(lowerType, type.name()));
String upperStr = numberToString(getNumberType(upperType, type.name()));
@ -457,7 +457,7 @@ public class TestNumericQueryParser extends LuceneTestCase {
StringBuilder sb = new StringBuilder();
for (NumericType type : NumericType.values()) {
for (LegacyNumericType type : FieldType.LegacyNumericType.values()) {
String boundStr = numberToString(getNumberType(boundType, type.name()));
sb.append("+").append(type.name()).append(operator).append('"').append(boundStr).append('"').append(' ');
@ -476,7 +476,7 @@ public class TestNumericQueryParser extends LuceneTestCase {
throws QueryNodeException, IOException {
StringBuilder sb = new StringBuilder();
for (NumericType type : NumericType.values()) {
for (LegacyNumericType type : LegacyNumericType.values()) {
String numberStr = numberToString(getNumberType(numberType, type.name()));
sb.append('+').append(type.name()).append(":\"").append(numberStr)
.append("\" ");

View File

@ -26,6 +26,6 @@
<TermQuery>bank</TermQuery>
</Clause>
<Clause occurs="must">
<NumericRangeQuery fieldName="date2" lowerTerm="19870409" upperTerm="19870412"/>
<LegacyNumericRangeQuery fieldName="date2" lowerTerm="19870409" upperTerm="19870412"/>
</Clause>
</BooleanQuery>

Some files were not shown because too many files have changed in this diff Show More