mirror of https://github.com/apache/lucene.git
Merge branch 'master' of https://git-wip-us.apache.org/repos/asf/lucene-solr
This commit is contained in:
commit
bc815b5207
|
@ -193,6 +193,8 @@ Optimizations
|
||||||
* LUCENE-6940: MUST_NOT clauses execute faster, especially when they are sparse.
|
* LUCENE-6940: MUST_NOT clauses execute faster, especially when they are sparse.
|
||||||
(Adrien Grand)
|
(Adrien Grand)
|
||||||
|
|
||||||
|
* LUCENE-6470: Improve efficiency of TermsQuery constructors. (Robert Muir)
|
||||||
|
|
||||||
Bug Fixes
|
Bug Fixes
|
||||||
|
|
||||||
* LUCENE-6976: BytesRefTermAttributeImpl.copyTo NPE'ed if BytesRef was null.
|
* LUCENE-6976: BytesRefTermAttributeImpl.copyTo NPE'ed if BytesRef was null.
|
||||||
|
@ -255,6 +257,9 @@ Other
|
||||||
Locale#forLanguageTag() and Locale#toString() were placed on list
|
Locale#forLanguageTag() and Locale#toString() were placed on list
|
||||||
of forbidden signatures. (Uwe Schindler, Robert Muir)
|
of forbidden signatures. (Uwe Schindler, Robert Muir)
|
||||||
|
|
||||||
|
* LUCENE-6988: You can now add IndexableFields directly to a MemoryIndex,
|
||||||
|
and create a MemoryIndex from a lucene Document. (Alan Woodward)
|
||||||
|
|
||||||
======================= Lucene 5.4.1 =======================
|
======================= Lucene 5.4.1 =======================
|
||||||
|
|
||||||
Bug Fixes
|
Bug Fixes
|
||||||
|
|
|
@ -66,22 +66,27 @@ public class PrefixCodedTerms implements Accountable {
|
||||||
|
|
||||||
/** add a term */
|
/** add a term */
|
||||||
public void add(Term term) {
|
public void add(Term term) {
|
||||||
assert lastTerm.equals(new Term("")) || term.compareTo(lastTerm) > 0;
|
add(term.field(), term.bytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
/** add a term */
|
||||||
|
public void add(String field, BytesRef bytes) {
|
||||||
|
assert lastTerm.equals(new Term("")) || new Term(field, bytes).compareTo(lastTerm) > 0;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
int prefix = sharedPrefix(lastTerm.bytes, term.bytes);
|
int prefix = sharedPrefix(lastTerm.bytes, bytes);
|
||||||
int suffix = term.bytes.length - prefix;
|
int suffix = bytes.length - prefix;
|
||||||
if (term.field.equals(lastTerm.field)) {
|
if (field.equals(lastTerm.field)) {
|
||||||
output.writeVInt(prefix << 1);
|
output.writeVInt(prefix << 1);
|
||||||
} else {
|
} else {
|
||||||
output.writeVInt(prefix << 1 | 1);
|
output.writeVInt(prefix << 1 | 1);
|
||||||
output.writeString(term.field);
|
output.writeString(field);
|
||||||
}
|
}
|
||||||
output.writeVInt(suffix);
|
output.writeVInt(suffix);
|
||||||
output.writeBytes(term.bytes.bytes, term.bytes.offset + prefix, suffix);
|
output.writeBytes(bytes.bytes, bytes.offset + prefix, suffix);
|
||||||
lastTermBytes.copyBytes(term.bytes);
|
lastTermBytes.copyBytes(bytes);
|
||||||
lastTerm.bytes = lastTermBytes.get();
|
lastTerm.bytes = lastTermBytes.get();
|
||||||
lastTerm.field = term.field;
|
lastTerm.field = field;
|
||||||
size += 1;
|
size += 1;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
|
|
|
@ -33,46 +33,18 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||||
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute;
|
||||||
import org.apache.lucene.index.BinaryDocValues;
|
import org.apache.lucene.document.Document;
|
||||||
import org.apache.lucene.index.PointValues;
|
import org.apache.lucene.index.*;
|
||||||
import org.apache.lucene.index.DocValuesType;
|
|
||||||
import org.apache.lucene.index.FieldInfo;
|
|
||||||
import org.apache.lucene.index.FieldInfos;
|
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
|
||||||
import org.apache.lucene.index.Fields;
|
|
||||||
import org.apache.lucene.index.IndexOptions;
|
|
||||||
import org.apache.lucene.index.LeafReader;
|
|
||||||
import org.apache.lucene.index.NumericDocValues;
|
|
||||||
import org.apache.lucene.index.OrdTermState;
|
|
||||||
import org.apache.lucene.index.PostingsEnum;
|
|
||||||
import org.apache.lucene.index.SortedDocValues;
|
|
||||||
import org.apache.lucene.index.SortedNumericDocValues;
|
|
||||||
import org.apache.lucene.index.SortedSetDocValues;
|
|
||||||
import org.apache.lucene.index.StoredFieldVisitor;
|
|
||||||
import org.apache.lucene.index.TermState;
|
|
||||||
import org.apache.lucene.index.Terms;
|
|
||||||
import org.apache.lucene.index.TermsEnum;
|
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
import org.apache.lucene.search.Scorer;
|
import org.apache.lucene.search.Scorer;
|
||||||
import org.apache.lucene.search.SimpleCollector;
|
import org.apache.lucene.search.SimpleCollector;
|
||||||
import org.apache.lucene.search.similarities.Similarity;
|
import org.apache.lucene.search.similarities.Similarity;
|
||||||
import org.apache.lucene.store.RAMDirectory;
|
import org.apache.lucene.store.RAMDirectory;
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.*;
|
||||||
import org.apache.lucene.util.Bits;
|
|
||||||
import org.apache.lucene.util.ByteBlockPool;
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
|
||||||
import org.apache.lucene.util.BytesRefArray;
|
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
|
||||||
import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
|
import org.apache.lucene.util.BytesRefHash.DirectBytesStartArray;
|
||||||
import org.apache.lucene.util.BytesRefHash;
|
|
||||||
import org.apache.lucene.util.Counter;
|
|
||||||
import org.apache.lucene.util.IntBlockPool.SliceReader;
|
import org.apache.lucene.util.IntBlockPool.SliceReader;
|
||||||
import org.apache.lucene.util.IntBlockPool.SliceWriter;
|
import org.apache.lucene.util.IntBlockPool.SliceWriter;
|
||||||
import org.apache.lucene.util.IntBlockPool;
|
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
|
||||||
import org.apache.lucene.util.RecyclingByteBlockAllocator;
|
|
||||||
import org.apache.lucene.util.RecyclingIntBlockAllocator;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* High-performance single-document main memory Apache Lucene fulltext search index.
|
* High-performance single-document main memory Apache Lucene fulltext search index.
|
||||||
|
@ -288,6 +260,46 @@ public class MemoryIndex {
|
||||||
addField(fieldName, stream, 1.0f, analyzer.getPositionIncrementGap(fieldName), analyzer.getOffsetGap(fieldName));
|
addField(fieldName, stream, 1.0f, analyzer.getPositionIncrementGap(fieldName), analyzer.getOffsetGap(fieldName));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a MemoryIndex from a lucene {@link Document} using an analyzer
|
||||||
|
*
|
||||||
|
* @param document the document to index
|
||||||
|
* @param analyzer the analyzer to use
|
||||||
|
* @return a MemoryIndex
|
||||||
|
*/
|
||||||
|
public static MemoryIndex fromDocument(Document document, Analyzer analyzer) {
|
||||||
|
return fromDocument(document, analyzer, false, false, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a MemoryIndex from a lucene {@link Document} using an analyzer
|
||||||
|
* @param document the document to index
|
||||||
|
* @param analyzer the analyzer to use
|
||||||
|
* @param storeOffsets <code>true</code> if offsets should be stored
|
||||||
|
* @param storePayloads <code>true</code> if payloads should be stored
|
||||||
|
* @return a MemoryIndex
|
||||||
|
*/
|
||||||
|
public static MemoryIndex fromDocument(Document document, Analyzer analyzer, boolean storeOffsets, boolean storePayloads) {
|
||||||
|
return fromDocument(document, analyzer, storeOffsets, storePayloads, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a MemoryIndex from a lucene {@link Document} using an analyzer
|
||||||
|
* @param document the document to index
|
||||||
|
* @param analyzer the analyzer to use
|
||||||
|
* @param storeOffsets <code>true</code> if offsets should be stored
|
||||||
|
* @param storePayloads <code>true</code> if payloads should be stored
|
||||||
|
* @param maxReusedBytes the number of bytes that should remain in the internal memory pools after {@link #reset()} is called
|
||||||
|
* @return a MemoryIndex
|
||||||
|
*/
|
||||||
|
public static MemoryIndex fromDocument(Document document, Analyzer analyzer, boolean storeOffsets, boolean storePayloads, long maxReusedBytes) {
|
||||||
|
MemoryIndex mi = new MemoryIndex(storeOffsets, storePayloads, maxReusedBytes);
|
||||||
|
for (IndexableField field : document) {
|
||||||
|
mi.addField(field, analyzer);
|
||||||
|
}
|
||||||
|
return mi;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convenience method; Creates and returns a token stream that generates a
|
* Convenience method; Creates and returns a token stream that generates a
|
||||||
* token for each keyword in the given collection, "as is", without any
|
* token for each keyword in the given collection, "as is", without any
|
||||||
|
@ -339,6 +351,39 @@ public class MemoryIndex {
|
||||||
public void addField(String fieldName, TokenStream stream) {
|
public void addField(String fieldName, TokenStream stream) {
|
||||||
addField(fieldName, stream, 1.0f);
|
addField(fieldName, stream, 1.0f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a lucene {@link IndexableField} to the MemoryIndex using the provided analyzer
|
||||||
|
* @param field the field to add
|
||||||
|
* @param analyzer the analyzer to use for term analysis
|
||||||
|
* @throws IllegalArgumentException if the field is a DocValues or Point field, as these
|
||||||
|
* structures are not supported by MemoryIndex
|
||||||
|
*/
|
||||||
|
public void addField(IndexableField field, Analyzer analyzer) {
|
||||||
|
addField(field, analyzer, 1.0f);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds a lucene {@link IndexableField} to the MemoryIndex using the provided analyzer
|
||||||
|
* @param field the field to add
|
||||||
|
* @param analyzer the analyzer to use for term analysis
|
||||||
|
* @param boost a field boost
|
||||||
|
* @throws IllegalArgumentException if the field is a DocValues or Point field, as these
|
||||||
|
* structures are not supported by MemoryIndex
|
||||||
|
*/
|
||||||
|
public void addField(IndexableField field, Analyzer analyzer, float boost) {
|
||||||
|
if (field.fieldType().docValuesType() != DocValuesType.NONE)
|
||||||
|
throw new IllegalArgumentException("MemoryIndex does not support DocValues fields");
|
||||||
|
if (field.fieldType().pointDimensionCount() != 0)
|
||||||
|
throw new IllegalArgumentException("MemoryIndex does not support Points");
|
||||||
|
if (analyzer == null) {
|
||||||
|
addField(field.name(), field.tokenStream(null, null), boost);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
addField(field.name(), field.tokenStream(analyzer, null), boost,
|
||||||
|
analyzer.getPositionIncrementGap(field.name()), analyzer.getOffsetGap(field.name()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Iterates over the given token stream and adds the resulting terms to the index;
|
* Iterates over the given token stream and adds the resulting terms to the index;
|
||||||
|
|
|
@ -22,12 +22,17 @@ import java.io.IOException;
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.MockAnalyzer;
|
import org.apache.lucene.analysis.MockAnalyzer;
|
||||||
import org.apache.lucene.analysis.MockPayloadAnalyzer;
|
import org.apache.lucene.analysis.MockPayloadAnalyzer;
|
||||||
|
import org.apache.lucene.document.Document;
|
||||||
|
import org.apache.lucene.document.Field;
|
||||||
|
import org.apache.lucene.document.StringField;
|
||||||
|
import org.apache.lucene.document.TextField;
|
||||||
import org.apache.lucene.index.FieldInvertState;
|
import org.apache.lucene.index.FieldInvertState;
|
||||||
import org.apache.lucene.index.LeafReader;
|
import org.apache.lucene.index.LeafReader;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.index.TermsEnum;
|
import org.apache.lucene.index.TermsEnum;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||||
|
import org.apache.lucene.search.PhraseQuery;
|
||||||
import org.apache.lucene.search.TermQuery;
|
import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||||
|
@ -156,5 +161,27 @@ public class TestMemoryIndex extends LuceneTestCase {
|
||||||
TestUtil.checkReader(reader);
|
TestUtil.checkReader(reader);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBuildFromDocument() {
|
||||||
|
|
||||||
|
Document doc = new Document();
|
||||||
|
doc.add(new TextField("field1", "some text", Field.Store.NO));
|
||||||
|
doc.add(new TextField("field1", "some more text", Field.Store.NO));
|
||||||
|
doc.add(new StringField("field2", "untokenized text", Field.Store.NO));
|
||||||
|
|
||||||
|
analyzer.setPositionIncrementGap(100);
|
||||||
|
|
||||||
|
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer);
|
||||||
|
|
||||||
|
assertThat(mi.search(new TermQuery(new Term("field1", "text"))), not(0.0f));
|
||||||
|
assertThat(mi.search(new TermQuery(new Term("field2", "text"))), is(0.0f));
|
||||||
|
assertThat(mi.search(new TermQuery(new Term("field2", "untokenized text"))), not(0.0f));
|
||||||
|
|
||||||
|
assertThat(mi.search(new PhraseQuery("field1", "some", "more", "text")), not(0.0f));
|
||||||
|
assertThat(mi.search(new PhraseQuery("field1", "some", "text")), not(0.0f));
|
||||||
|
assertThat(mi.search(new PhraseQuery("field1", "text", "some")), is(0.0f));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.SortedSet;
|
||||||
|
|
||||||
import org.apache.lucene.index.Fields;
|
import org.apache.lucene.index.Fields;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
@ -55,6 +56,7 @@ import org.apache.lucene.util.Accountable;
|
||||||
import org.apache.lucene.util.ArrayUtil;
|
import org.apache.lucene.util.ArrayUtil;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.DocIdSetBuilder;
|
import org.apache.lucene.util.DocIdSetBuilder;
|
||||||
|
import org.apache.lucene.util.BytesRefBuilder;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
import org.apache.lucene.util.RamUsageEstimator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -86,22 +88,17 @@ public class TermsQuery extends Query implements Accountable {
|
||||||
private final PrefixCodedTerms termData;
|
private final PrefixCodedTerms termData;
|
||||||
private final int termDataHashCode; // cached hashcode of termData
|
private final int termDataHashCode; // cached hashcode of termData
|
||||||
|
|
||||||
private static Term[] toTermArray(String field, List<BytesRef> termBytes) {
|
|
||||||
Term[] array = new Term[termBytes.size()];
|
|
||||||
int i = 0;
|
|
||||||
for (BytesRef t : termBytes) {
|
|
||||||
array[i++] = new Term(field, t);
|
|
||||||
}
|
|
||||||
return array;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new {@link TermsQuery} from the given list. The list
|
* Creates a new {@link TermsQuery} from the given collection. It
|
||||||
* can contain duplicate terms and multiple fields.
|
* can contain duplicate terms and multiple fields.
|
||||||
*/
|
*/
|
||||||
public TermsQuery(final List<Term> terms) {
|
public TermsQuery(Collection<Term> terms) {
|
||||||
Term[] sortedTerms = terms.toArray(new Term[terms.size()]);
|
Term[] sortedTerms = terms.toArray(new Term[terms.size()]);
|
||||||
ArrayUtil.timSort(sortedTerms);
|
// already sorted if we are a SortedSet with natural order
|
||||||
|
boolean sorted = terms instanceof SortedSet && ((SortedSet<Term>)terms).comparator() == null;
|
||||||
|
if (!sorted) {
|
||||||
|
ArrayUtil.timSort(sortedTerms);
|
||||||
|
}
|
||||||
PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
|
PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
|
||||||
Term previous = null;
|
Term previous = null;
|
||||||
for (Term term : sortedTerms) {
|
for (Term term : sortedTerms) {
|
||||||
|
@ -113,21 +110,38 @@ public class TermsQuery extends Query implements Accountable {
|
||||||
termData = builder.finish();
|
termData = builder.finish();
|
||||||
termDataHashCode = termData.hashCode();
|
termDataHashCode = termData.hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new {@link TermsQuery} from the given {@link BytesRef} list for
|
* Creates a new {@link TermsQuery} from the given collection for
|
||||||
* a single field.
|
* a single field. It can contain duplicate terms.
|
||||||
*/
|
*/
|
||||||
public TermsQuery(final String field, final List<BytesRef> terms) {
|
public TermsQuery(String field, Collection<BytesRef> terms) {
|
||||||
this(toTermArray(field, terms));
|
BytesRef[] sortedTerms = terms.toArray(new BytesRef[terms.size()]);
|
||||||
|
// already sorted if we are a SortedSet with natural order
|
||||||
|
boolean sorted = terms instanceof SortedSet && ((SortedSet<BytesRef>)terms).comparator() == null;
|
||||||
|
if (!sorted) {
|
||||||
|
ArrayUtil.timSort(sortedTerms);
|
||||||
|
}
|
||||||
|
PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
|
||||||
|
BytesRefBuilder previous = null;
|
||||||
|
for (BytesRef term : sortedTerms) {
|
||||||
|
if (previous == null) {
|
||||||
|
previous = new BytesRefBuilder();
|
||||||
|
} else if (previous.get().equals(term)) {
|
||||||
|
continue; // deduplicate
|
||||||
|
}
|
||||||
|
builder.add(field, term);
|
||||||
|
previous.copyBytes(term);
|
||||||
|
}
|
||||||
|
termData = builder.finish();
|
||||||
|
termDataHashCode = termData.hashCode();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new {@link TermsQuery} from the given {@link BytesRef} array for
|
* Creates a new {@link TermsQuery} from the given {@link BytesRef} array for
|
||||||
* a single field.
|
* a single field.
|
||||||
*/
|
*/
|
||||||
public TermsQuery(final String field, final BytesRef...terms) {
|
public TermsQuery(String field, BytesRef...terms) {
|
||||||
// this ctor prevents unnecessary Term creations
|
|
||||||
this(field, Arrays.asList(terms));
|
this(field, Arrays.asList(terms));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -265,7 +265,7 @@ public class CommonTermsQueryTest extends LuceneTestCase {
|
||||||
assertEquals("0", r.document(search.scoreDocs[0].doc).get("id"));
|
assertEquals("0", r.document(search.scoreDocs[0].doc).get("id"));
|
||||||
assertEquals("2", r.document(search.scoreDocs[1].doc).get("id"));
|
assertEquals("2", r.document(search.scoreDocs[1].doc).get("id"));
|
||||||
assertEquals("3", r.document(search.scoreDocs[2].doc).get("id"));
|
assertEquals("3", r.document(search.scoreDocs[2].doc).get("id"));
|
||||||
assertTrue(search.scoreDocs[1].score > search.scoreDocs[2].score);
|
assertTrue(search.scoreDocs[1].score >= search.scoreDocs[2].score);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -348,4 +348,39 @@ public class TestGeoPointQuery extends BaseGeoPointTestCase {
|
||||||
assertEquals(180.0, GeoUtils.mortonUnhashLon(hash), 0);
|
assertEquals(180.0, GeoUtils.mortonUnhashLon(hash), 0);
|
||||||
assertEquals(90.0, GeoUtils.mortonUnhashLat(hash), 0);
|
assertEquals(90.0, GeoUtils.mortonUnhashLat(hash), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testEncodeDecode() throws Exception {
|
||||||
|
int iters = atLeast(10000);
|
||||||
|
boolean small = random().nextBoolean();
|
||||||
|
for(int iter=0;iter<iters;iter++) {
|
||||||
|
double lat = randomLat(small);
|
||||||
|
double lon = randomLon(small);
|
||||||
|
|
||||||
|
long enc = GeoUtils.mortonHash(lon, lat);
|
||||||
|
double latEnc = GeoUtils.mortonUnhashLat(enc);
|
||||||
|
double lonEnc = GeoUtils.mortonUnhashLon(enc);
|
||||||
|
|
||||||
|
assertEquals("lat=" + lat + " latEnc=" + latEnc + " diff=" + (lat - latEnc), lat, latEnc, GeoUtils.TOLERANCE);
|
||||||
|
assertEquals("lon=" + lon + " lonEnc=" + lonEnc + " diff=" + (lon - lonEnc), lon, lonEnc, GeoUtils.TOLERANCE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testScaleUnscaleIsStable() throws Exception {
|
||||||
|
int iters = atLeast(1000);
|
||||||
|
boolean small = random().nextBoolean();
|
||||||
|
for(int iter=0;iter<iters;iter++) {
|
||||||
|
double lat = randomLat(small);
|
||||||
|
double lon = randomLon(small);
|
||||||
|
|
||||||
|
long enc = GeoUtils.mortonHash(lon, lat);
|
||||||
|
double latEnc = GeoUtils.mortonUnhashLat(enc);
|
||||||
|
double lonEnc = GeoUtils.mortonUnhashLon(enc);
|
||||||
|
|
||||||
|
long enc2 = GeoUtils.mortonHash(lon, lat);
|
||||||
|
double latEnc2 = GeoUtils.mortonUnhashLat(enc2);
|
||||||
|
double lonEnc2 = GeoUtils.mortonUnhashLon(enc2);
|
||||||
|
assertEquals(latEnc, latEnc2, 0.0);
|
||||||
|
assertEquals(lonEnc, lonEnc2, 0.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,12 +112,29 @@ public class TestLatLonPointQueries extends BaseGeoPointTestCase {
|
||||||
boolean small = random().nextBoolean();
|
boolean small = random().nextBoolean();
|
||||||
for(int iter=0;iter<iters;iter++) {
|
for(int iter=0;iter<iters;iter++) {
|
||||||
double lat = randomLat(small);
|
double lat = randomLat(small);
|
||||||
double latQuantized = LatLonPoint.decodeLat(LatLonPoint.encodeLat(lat));
|
double latEnc = LatLonPoint.decodeLat(LatLonPoint.encodeLat(lat));
|
||||||
assertEquals(lat, latQuantized, LatLonPoint.TOLERANCE);
|
assertEquals("lat=" + lat + " latEnc=" + latEnc + " diff=" + (lat - latEnc), lat, latEnc, LatLonPoint.TOLERANCE);
|
||||||
|
|
||||||
double lon = randomLon(small);
|
double lon = randomLon(small);
|
||||||
double lonQuantized = LatLonPoint.decodeLon(LatLonPoint.encodeLon(lon));
|
double lonEnc = LatLonPoint.decodeLon(LatLonPoint.encodeLon(lon));
|
||||||
assertEquals(lon, lonQuantized, LatLonPoint.TOLERANCE);
|
assertEquals("lon=" + lon + " lonEnc=" + lonEnc + " diff=" + (lon - lonEnc), lon, lonEnc, LatLonPoint.TOLERANCE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testScaleUnscaleIsStable() throws Exception {
|
||||||
|
int iters = atLeast(1000);
|
||||||
|
boolean small = random().nextBoolean();
|
||||||
|
for(int iter=0;iter<iters;iter++) {
|
||||||
|
double lat = randomLat(small);
|
||||||
|
double lon = randomLon(small);
|
||||||
|
|
||||||
|
double latEnc = LatLonPoint.decodeLat(LatLonPoint.encodeLat(lat));
|
||||||
|
double lonEnc = LatLonPoint.decodeLon(LatLonPoint.encodeLon(lon));
|
||||||
|
|
||||||
|
double latEnc2 = LatLonPoint.decodeLat(LatLonPoint.encodeLat(latEnc));
|
||||||
|
double lonEnc2 = LatLonPoint.decodeLon(LatLonPoint.encodeLon(lonEnc));
|
||||||
|
assertEquals(latEnc, latEnc2, 0.0);
|
||||||
|
assertEquals(lonEnc, lonEnc2, 0.0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -781,4 +781,3 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
|
||||||
return dir;
|
return dir;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,7 +166,7 @@ public class SyncStrategy {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
List<String> syncWith = new ArrayList<>();
|
List<String> syncWith = new ArrayList<>(nodes.size());
|
||||||
for (ZkCoreNodeProps node : nodes) {
|
for (ZkCoreNodeProps node : nodes) {
|
||||||
syncWith.add(node.getCoreUrl());
|
syncWith.add(node.getCoreUrl());
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,7 +162,7 @@ abstract class FacetParser<FacetRequestT extends FacetRequest> {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected RuntimeException err(String msg) {
|
protected RuntimeException err(String msg) {
|
||||||
return new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg + " ,path="+getPathStr());
|
return new SolrException(SolrException.ErrorCode.BAD_REQUEST, msg + " , path="+getPathStr());
|
||||||
}
|
}
|
||||||
|
|
||||||
public abstract FacetRequest parse(Object o) throws SyntaxError;
|
public abstract FacetRequest parse(Object o) throws SyntaxError;
|
||||||
|
@ -192,7 +192,7 @@ abstract class FacetParser<FacetRequestT extends FacetRequest> {
|
||||||
} else if (parsedValue instanceof AggValueSource) {
|
} else if (parsedValue instanceof AggValueSource) {
|
||||||
facet.addStat(key, (AggValueSource)parsedValue);
|
facet.addStat(key, (AggValueSource)parsedValue);
|
||||||
} else {
|
} else {
|
||||||
throw new RuntimeException("Huh? TODO: " + parsedValue);
|
throw err("Unknown facet type key=" + key + " class=" + (parsedValue == null ? "null" : parsedValue.getClass().getName()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -248,7 +248,11 @@ abstract class FacetParser<FacetRequestT extends FacetRequest> {
|
||||||
return parseRangeFacet(key, args);
|
return parseRangeFacet(key, args);
|
||||||
}
|
}
|
||||||
|
|
||||||
return parseStat(key, type, args);
|
AggValueSource stat = parseStat(key, type, args);
|
||||||
|
if (stat == null) {
|
||||||
|
throw err("Unknown facet or stat. key=" + key + " type=" + type + " args=" + args);
|
||||||
|
}
|
||||||
|
return stat;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,8 +28,8 @@ import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import com.tdunning.math.stats.AVLTreeDigest;
|
import com.tdunning.math.stats.AVLTreeDigest;
|
||||||
|
import org.apache.solr.common.SolrException;
|
||||||
import org.apache.solr.util.hll.HLL;
|
import org.apache.solr.util.hll.HLL;
|
||||||
import org.apache.lucene.queryparser.flexible.standard.processors.NumericQueryNodeProcessor;
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.apache.lucene.util.packed.GrowableWriter;
|
import org.apache.lucene.util.packed.GrowableWriter;
|
||||||
import org.apache.lucene.util.packed.PackedInts;
|
import org.apache.lucene.util.packed.PackedInts;
|
||||||
|
@ -1258,6 +1258,26 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testErrors() throws Exception {
|
||||||
|
doTestErrors(Client.localClient());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void doTestErrors(Client client) throws Exception {
|
||||||
|
ModifiableSolrParams p = params("rows", "0");
|
||||||
|
client.deleteByQuery("*:*", null);
|
||||||
|
|
||||||
|
try {
|
||||||
|
client.testJQ(params("ignore_exception", "true", "q", "*:*"
|
||||||
|
, "json.facet", "{f:{type:ignore_exception_aaa, field:bbbbbb}}"
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} catch (SolrException e) {
|
||||||
|
assertTrue( e.getMessage().contains("ignore_exception_aaa") );
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
public void XtestPercentiles() {
|
public void XtestPercentiles() {
|
||||||
|
|
Loading…
Reference in New Issue