mirror of https://github.com/apache/lucene.git
Merge branch 'apache-https-master' into jira/solr-8593
This commit is contained in:
commit
aaee7513e6
|
@ -15,7 +15,7 @@
|
|||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
under the License.
|
||||
-->
|
||||
<rdf:RDF xml:lang="en"
|
||||
xmlns="http://usefulinc.com/ns/doap#"
|
||||
|
@ -67,6 +67,11 @@
|
|||
</maintainer>
|
||||
|
||||
<release>
|
||||
<Version>
|
||||
<name>lucene-6.4.0</name>
|
||||
<created>2017-01-23</created>
|
||||
<revision>6.4.0</revision>
|
||||
</Version>
|
||||
<Version>
|
||||
<name>lucene-6.3.0</name>
|
||||
<created>2016-11-08</created>
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
under the License.
|
||||
-->
|
||||
<rdf:RDF xml:lang="en"
|
||||
xmlns="http://usefulinc.com/ns/doap#"
|
||||
|
@ -65,8 +65,13 @@
|
|||
<foaf:mbox rdf:resource="mailto:dev@lucene.apache.org"/>
|
||||
</foaf:Person>
|
||||
</maintainer>
|
||||
|
||||
|
||||
<release>
|
||||
<Version>
|
||||
<name>solr-6.4.0</name>
|
||||
<created>2017-01-23</created>
|
||||
<revision>6.4.0</revision>
|
||||
</Version>
|
||||
<Version>
|
||||
<name>solr-6.3.0</name>
|
||||
<created>2016-11-08</created>
|
||||
|
|
|
@ -71,6 +71,12 @@ API Changes
|
|||
* LUCENE-7637: TermInSetQuery requires that all terms come from the same field.
|
||||
(Adrien Grand)
|
||||
|
||||
* LUCENE-7644: FieldComparatorSource.newComparator() and
|
||||
SortField.getComparator() no longer throw IOException (Alan Woodward)
|
||||
|
||||
* LUCENE-7643: Replaced doc-values queries in lucene/sandbox with factory
|
||||
methods on the *DocValuesField classes. (Adrien Grand)
|
||||
|
||||
New Features
|
||||
|
||||
* LUCENE-7623: Add FunctionScoreQuery and FunctionMatchQuery (Alan Woodward,
|
||||
|
@ -93,12 +99,25 @@ Improvements
|
|||
should be run, eg. using points or doc values depending on costs of other
|
||||
parts of the query. (Adrien Grand)
|
||||
|
||||
* LUCENE-7643: IndexOrDocValuesQuery allows to execute range queries using
|
||||
either points or doc values depending on which one is more efficient.
|
||||
(Adrien Grand)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-7641: Optimized point range queries to compute documents that do not
|
||||
match the range on single-valued fields when more than half the documents in
|
||||
the index would match. (Adrien Grand)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-7651: Fix Javadocs build for Java 8u121 by injecting "Google Code
|
||||
Prettify" without adding Javascript to Javadocs's -bottom parameter.
|
||||
Also update Prettify to latest version to fix Google Chrome issue.
|
||||
(Uwe Schindler)
|
||||
|
||||
* LUCENE-7653: Update randomizedtesting to version 2.5.0. (Dawid Weiss)
|
||||
|
||||
======================= Lucene 6.4.0 =======================
|
||||
|
||||
API Changes
|
||||
|
|
|
@ -41,31 +41,17 @@ public abstract class BaseCharFilter extends CharFilter {
|
|||
/** Retrieve the corrected offset. */
|
||||
@Override
|
||||
protected int correct(int currentOff) {
|
||||
if (offsets == null || currentOff < offsets[0]) {
|
||||
if (offsets == null) {
|
||||
return currentOff;
|
||||
}
|
||||
|
||||
int hi = size - 1;
|
||||
if(currentOff >= offsets[hi])
|
||||
return currentOff + diffs[hi];
|
||||
|
||||
int lo = 0;
|
||||
int mid = -1;
|
||||
|
||||
while (hi >= lo) {
|
||||
mid = (lo + hi) >>> 1;
|
||||
if (currentOff < offsets[mid])
|
||||
hi = mid - 1;
|
||||
else if (currentOff > offsets[mid])
|
||||
lo = mid + 1;
|
||||
else
|
||||
return currentOff + diffs[mid];
|
||||
int index = Arrays.binarySearch(offsets, 0, size, currentOff);
|
||||
if (index < -1) {
|
||||
index = -2 - index;
|
||||
}
|
||||
|
||||
if (currentOff < offsets[mid])
|
||||
return mid == 0 ? currentOff : currentOff + diffs[mid-1];
|
||||
else
|
||||
return currentOff + diffs[mid];
|
||||
final int diff = index < 0 ? 0 : diffs[index];
|
||||
return currentOff + diff;
|
||||
}
|
||||
|
||||
protected int getLastCumulativeDiff() {
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.lucene.analysis.miscellaneous.LimitTokenPositionFilter;
|
|||
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter.StemmerOverrideMap;
|
||||
import org.apache.lucene.analysis.miscellaneous.StemmerOverrideFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.payloads.IdentityEncoder;
|
||||
|
@ -152,6 +153,8 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
|
|||
ValidatingTokenFilter.class,
|
||||
// TODO: needs to be a tokenizer, doesnt handle graph inputs properly (a shingle or similar following will then cause pain)
|
||||
WordDelimiterFilter.class,
|
||||
// Cannot correct offsets when a char filter had changed them:
|
||||
WordDelimiterGraphFilter.class,
|
||||
// clones of core's filters:
|
||||
org.apache.lucene.analysis.core.StopFilter.class,
|
||||
org.apache.lucene.analysis.core.LowerCaseFilter.class)) {
|
||||
|
|
|
@ -291,7 +291,9 @@ public class TestBackwardsCompatibility extends LuceneTestCase {
|
|||
"6.2.1-cfs",
|
||||
"6.2.1-nocfs",
|
||||
"6.3.0-cfs",
|
||||
"6.3.0-nocfs"
|
||||
"6.3.0-nocfs",
|
||||
"6.4.0-cfs",
|
||||
"6.4.0-nocfs"
|
||||
};
|
||||
|
||||
final String[] unsupportedNames = {
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -976,6 +976,7 @@
|
|||
shuffleOnSlave="true"
|
||||
leaveTemporary="${junit4.leaveTemporary}"
|
||||
seed="${tests.seed}"
|
||||
onNonEmptyWorkDirectory="wipe"
|
||||
|
||||
heartbeat="${tests.heartbeat}"
|
||||
uniqueSuiteNames="false"
|
||||
|
@ -1073,6 +1074,7 @@
|
|||
<propertyref prefix="tests.leaveTemporary" />
|
||||
<propertyref prefix="tests.leavetemporary" />
|
||||
<propertyref prefix="solr.test.leavetmpdir" />
|
||||
<propertyref prefix="solr.tests.preferPointFields"/>
|
||||
</syspropertyset>
|
||||
|
||||
<!-- Pass randomized settings to the forked JVM. -->
|
||||
|
@ -2077,7 +2079,7 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
|
|||
</condition>
|
||||
<antcall target="download-java8-javadoc-packagelist"/>
|
||||
<delete file="@{destdir}/stylesheet.css" failonerror="false"/>
|
||||
<copy todir="@{destdir}" file="${prettify.dir}/prettify.js" overwrite="false" />
|
||||
<delete file="@{destdir}/script.js" failonerror="false"/>
|
||||
<record name="@{destdir}/log_javadoc.txt" action="start" append="no"/>
|
||||
<javadoc
|
||||
overview="@{overview}"
|
||||
|
@ -2106,20 +2108,6 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
|
|||
<link offline="true" href="${javadoc.link}" packagelistLoc="${javadoc.packagelist.dir}/java8"/>
|
||||
<bottom><![CDATA[
|
||||
<i>Copyright © ${year} Apache Software Foundation. All Rights Reserved.</i>
|
||||
<script src='{@docRoot}/prettify.js' type='text/javascript'></script>
|
||||
<script type='text/javascript'>
|
||||
(function(){
|
||||
var oldonload = window.onload;
|
||||
if (typeof oldonload != 'function') {
|
||||
window.onload = prettyPrint;
|
||||
} else {
|
||||
window.onload = function() {
|
||||
oldonload();
|
||||
prettyPrint();
|
||||
}
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
]]></bottom>
|
||||
|
||||
<sources />
|
||||
|
@ -2130,10 +2118,14 @@ ${ant.project.name}.test.dependencies=${test.classpath.list}
|
|||
</javadoc>
|
||||
<record name="@{destdir}/log_javadoc.txt" action="stop"/>
|
||||
|
||||
<!-- append prettify.css -->
|
||||
<concat destfile="@{destdir}/stylesheet.css" append="true">
|
||||
<!-- append prettify to scripts and css -->
|
||||
<concat destfile="@{destdir}/stylesheet.css" append="true" fixlastline="true" encoding="UTF-8">
|
||||
<filelist dir="${prettify.dir}" files="prettify.css"/>
|
||||
</concat>
|
||||
<concat destfile="@{destdir}/script.js" append="true" fixlastline="true" encoding="UTF-8">
|
||||
<filelist dir="${prettify.dir}" files="prettify.js inject-javadocs.js"/>
|
||||
</concat>
|
||||
<fixcrlf srcdir="@{destdir}" includes="stylesheet.css script.js" eol="lf" fixlast="true" encoding="UTF-8" />
|
||||
|
||||
<delete>
|
||||
<fileset file="@{destdir}/log_javadoc.txt">
|
||||
|
|
|
@ -17,7 +17,15 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -54,4 +62,44 @@ public class NumericDocValuesField extends Field {
|
|||
super(name, TYPE);
|
||||
fieldsData = Long.valueOf(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a range query that matches all documents whose value is between
|
||||
* {@code lowerValue} and {@code upperValue} included.
|
||||
* <p>
|
||||
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
|
||||
* by setting {@code lowerValue = Long.MIN_VALUE} or {@code upperValue = Long.MAX_VALUE}.
|
||||
* <p>
|
||||
* Ranges are inclusive. For exclusive ranges, pass {@code Math.addExact(lowerValue, 1)}
|
||||
* or {@code Math.addExact(upperValue, -1)}.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link LongPoint#newRangeQuery}.
|
||||
*/
|
||||
public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
|
||||
return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
|
||||
@Override
|
||||
SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
|
||||
NumericDocValues values = reader.getNumericDocValues(field);
|
||||
if (values == null) {
|
||||
return null;
|
||||
}
|
||||
return DocValues.singleton(values);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching an exact long value.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link LongPoint#newExactQuery}.
|
||||
*/
|
||||
public static Query newExactQuery(String field, long value) {
|
||||
return newRangeQuery(field, value, value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,14 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
|
@ -59,4 +66,39 @@ public class SortedDocValuesField extends Field {
|
|||
super(name, TYPE);
|
||||
fieldsData = bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a range query that matches all documents whose value is between
|
||||
* {@code lowerValue} and {@code upperValue} included.
|
||||
* <p>
|
||||
* You can have half-open ranges by setting {@code lowerValue = null}
|
||||
* or {@code upperValue = null}.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link BinaryPoint#newRangeQuery}.
|
||||
*/
|
||||
public static Query newRangeQuery(String field,
|
||||
BytesRef lowerValue, BytesRef upperValue,
|
||||
boolean lowerInclusive, boolean upperInclusive) {
|
||||
return new SortedSetDocValuesRangeQuery(field, lowerValue, upperValue, lowerInclusive, upperInclusive) {
|
||||
@Override
|
||||
SortedSetDocValues getValues(LeafReader reader, String field) throws IOException {
|
||||
return DocValues.singleton(DocValues.getSorted(reader, field));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching an exact {@link BytesRef} value.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link BinaryPoint#newExactQuery}.
|
||||
*/
|
||||
public static Query newExactQuery(String field, BytesRef value) {
|
||||
return newRangeQuery(field, value, value, true, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,15 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
/**
|
||||
* <p>
|
||||
|
@ -63,4 +71,50 @@ public class SortedNumericDocValuesField extends Field {
|
|||
super(name, TYPE);
|
||||
fieldsData = Long.valueOf(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a range query that matches all documents whose value is between
|
||||
* {@code lowerValue} and {@code upperValue} included.
|
||||
* <p>
|
||||
* You can have half-open ranges (which are in fact </≤ or >/≥ queries)
|
||||
* by setting {@code lowerValue = Long.MIN_VALUE} or {@code upperValue = Long.MAX_VALUE}.
|
||||
* <p>
|
||||
* Ranges are inclusive. For exclusive ranges, pass {@code Math.addExact(lowerValue, 1)}
|
||||
* or {@code Math.addExact(upperValue, -1)}.
|
||||
* <p>This query also works with fields that have indexed
|
||||
* {@link NumericDocValuesField}s.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link LongPoint#newRangeQuery}.
|
||||
*/
|
||||
public static Query newRangeQuery(String field, long lowerValue, long upperValue) {
|
||||
return new SortedNumericDocValuesRangeQuery(field, lowerValue, upperValue) {
|
||||
@Override
|
||||
SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException {
|
||||
FieldInfo info = reader.getFieldInfos().fieldInfo(field);
|
||||
if (info == null) {
|
||||
// Queries have some optimizations when one sub scorer returns null rather
|
||||
// than a scorer that does not match any documents
|
||||
return null;
|
||||
}
|
||||
return DocValues.getSortedNumeric(reader, field);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching an exact long value.
|
||||
* <p>This query also works with fields that have indexed
|
||||
* {@link NumericDocValuesField}s.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link LongPoint#newExactQuery}.
|
||||
*/
|
||||
public static Query newExactQuery(String field, long value) {
|
||||
return newRangeQuery(field, value, value);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.FieldValueQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
|
||||
abstract class SortedNumericDocValuesRangeQuery extends Query {
|
||||
|
||||
private final String field;
|
||||
private final long lowerValue;
|
||||
private final long upperValue;
|
||||
|
||||
SortedNumericDocValuesRangeQuery(String field, long lowerValue, long upperValue) {
|
||||
this.field = Objects.requireNonNull(field);
|
||||
this.lowerValue = lowerValue;
|
||||
this.upperValue = upperValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
SortedNumericDocValuesRangeQuery that = (SortedNumericDocValuesRangeQuery) obj;
|
||||
return Objects.equals(field, that.field)
|
||||
&& lowerValue == that.lowerValue
|
||||
&& upperValue == that.upperValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = classHash();
|
||||
h = 31 * h + field.hashCode();
|
||||
h = 31 * h + Long.hashCode(lowerValue);
|
||||
h = 31 * h + Long.hashCode(upperValue);
|
||||
return h;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
if (this.field.equals(field) == false) {
|
||||
b.append(this.field).append(":");
|
||||
}
|
||||
return b
|
||||
.append("[")
|
||||
.append(lowerValue)
|
||||
.append(" TO ")
|
||||
.append(upperValue)
|
||||
.append("]")
|
||||
.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (lowerValue == Long.MIN_VALUE && upperValue == Long.MAX_VALUE) {
|
||||
return new FieldValueQuery(field);
|
||||
}
|
||||
return super.rewrite(reader);
|
||||
}
|
||||
|
||||
abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException;
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
SortedNumericDocValues values = getValues(context.reader(), field);
|
||||
if (values == null) {
|
||||
return null;
|
||||
}
|
||||
final NumericDocValues singleton = DocValues.unwrapSingleton(values);
|
||||
final TwoPhaseIterator iterator;
|
||||
if (singleton != null) {
|
||||
iterator = new TwoPhaseIterator(singleton) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
final long value = singleton.longValue();
|
||||
return value >= lowerValue && value <= upperValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
};
|
||||
} else {
|
||||
iterator = new TwoPhaseIterator(values) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
for (int i = 0, count = values.docValueCount(); i < count; ++i) {
|
||||
final long value = values.nextValue();
|
||||
if (value < lowerValue) {
|
||||
continue;
|
||||
}
|
||||
// Values are sorted, so the first value that is >= lowerValue is our best candidate
|
||||
return value <= upperValue;
|
||||
}
|
||||
return false; // all values were < lowerValue
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
};
|
||||
}
|
||||
return new ConstantScoreScorer(this, score(), iterator);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
|
@ -17,7 +17,14 @@
|
|||
package org.apache.lucene.document;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
|
@ -60,4 +67,40 @@ public class SortedSetDocValuesField extends Field {
|
|||
super(name, TYPE);
|
||||
fieldsData = bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a range query that matches all documents whose value is between
|
||||
* {@code lowerValue} and {@code upperValue}.
|
||||
* <p>This query also works with fields that have indexed
|
||||
* {@link SortedDocValuesField}s.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link BinaryPoint#newRangeQuery}.
|
||||
*/
|
||||
public static Query newRangeQuery(String field,
|
||||
BytesRef lowerValue, BytesRef upperValue,
|
||||
boolean lowerInclusive, boolean upperInclusive) {
|
||||
return new SortedSetDocValuesRangeQuery(field, lowerValue, upperValue, lowerInclusive, upperInclusive) {
|
||||
@Override
|
||||
SortedSetDocValues getValues(LeafReader reader, String field) throws IOException {
|
||||
return DocValues.getSortedSet(reader, field);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching an exact {@link BytesRef} value.
|
||||
* <p>This query also works with fields that have indexed
|
||||
* {@link SortedDocValuesField}s.
|
||||
* <p><b>NOTE</b>: Such queries cannot efficiently advance to the next match,
|
||||
* which makes them slow if they are not ANDed with a selective query. As a
|
||||
* consequence, they are best used wrapped in an {@link IndexOrDocValuesQuery},
|
||||
* alongside a range query that executes on points, such as
|
||||
* {@link BinaryPoint#newExactQuery}.
|
||||
*/
|
||||
public static Query newExactQuery(String field, BytesRef value) {
|
||||
return newRangeQuery(field, value, value, true, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.FieldValueQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
abstract class SortedSetDocValuesRangeQuery extends Query {
|
||||
|
||||
private final String field;
|
||||
private final BytesRef lowerValue;
|
||||
private final BytesRef upperValue;
|
||||
private final boolean lowerInclusive;
|
||||
private final boolean upperInclusive;
|
||||
|
||||
SortedSetDocValuesRangeQuery(String field,
|
||||
BytesRef lowerValue, BytesRef upperValue,
|
||||
boolean lowerInclusive, boolean upperInclusive) {
|
||||
this.field = Objects.requireNonNull(field);
|
||||
this.lowerValue = lowerValue;
|
||||
this.upperValue = upperValue;
|
||||
this.lowerInclusive = lowerInclusive && lowerValue != null;
|
||||
this.upperInclusive = upperInclusive && upperValue != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
SortedSetDocValuesRangeQuery that = (SortedSetDocValuesRangeQuery) obj;
|
||||
return Objects.equals(field, that.field)
|
||||
&& Objects.equals(lowerValue, that.lowerValue)
|
||||
&& Objects.equals(upperValue, that.upperValue)
|
||||
&& lowerInclusive == that.lowerInclusive
|
||||
&& upperInclusive == that.upperInclusive;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int h = classHash();
|
||||
h = 31 * h + field.hashCode();
|
||||
h = 31 * h + Objects.hashCode(lowerValue);
|
||||
h = 31 * h + Objects.hashCode(upperValue);
|
||||
h = 31 * h + Boolean.hashCode(lowerInclusive);
|
||||
h = 31 * h + Boolean.hashCode(upperInclusive);
|
||||
return h;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
if (this.field.equals(field) == false) {
|
||||
b.append(this.field).append(":");
|
||||
}
|
||||
return b
|
||||
.append(lowerInclusive ? "[" : "{")
|
||||
.append(lowerValue == null ? "*" : lowerValue)
|
||||
.append(" TO ")
|
||||
.append(upperValue == null ? "*" : upperValue)
|
||||
.append(upperInclusive ? "]" : "}")
|
||||
.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (lowerValue == null && upperValue == null) {
|
||||
return new FieldValueQuery(field);
|
||||
}
|
||||
return super.rewrite(reader);
|
||||
}
|
||||
|
||||
abstract SortedSetDocValues getValues(LeafReader reader, String field) throws IOException;
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
SortedSetDocValues values = getValues(context.reader(), field);
|
||||
if (values == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final long minOrd;
|
||||
if (lowerValue == null) {
|
||||
minOrd = 0;
|
||||
} else {
|
||||
final long ord = values.lookupTerm(lowerValue);
|
||||
if (ord < 0) {
|
||||
minOrd = -1 - ord;
|
||||
} else if (lowerInclusive) {
|
||||
minOrd = ord;
|
||||
} else {
|
||||
minOrd = ord + 1;
|
||||
}
|
||||
}
|
||||
|
||||
final long maxOrd;
|
||||
if (upperValue == null) {
|
||||
maxOrd = values.getValueCount() - 1;
|
||||
} else {
|
||||
final long ord = values.lookupTerm(upperValue);
|
||||
if (ord < 0) {
|
||||
maxOrd = -2 - ord;
|
||||
} else if (upperInclusive) {
|
||||
maxOrd = ord;
|
||||
} else {
|
||||
maxOrd = ord - 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (minOrd > maxOrd) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final SortedDocValues singleton = null; // TODO: LUCENE-7649, re-consider optimization that broke SOLR-10013
|
||||
// final SortedDocValues singleton = DocValues.unwrapSingleton(values);
|
||||
final TwoPhaseIterator iterator;
|
||||
if (singleton != null) {
|
||||
assert false : "imposible code -- or: someone re-enabled singleton optinization w/o reading the whole method";
|
||||
iterator = new TwoPhaseIterator(singleton) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
final long ord = singleton.ordValue();
|
||||
return ord >= minOrd && ord <= maxOrd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
};
|
||||
} else {
|
||||
iterator = new TwoPhaseIterator(values) {
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
|
||||
if (ord < minOrd) {
|
||||
continue;
|
||||
}
|
||||
// Values are sorted, so the first ord that is >= minOrd is our best candidate
|
||||
return ord <= maxOrd;
|
||||
}
|
||||
return false; // all ords were < minOrd
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
};
|
||||
}
|
||||
return new ConstantScoreScorer(this, score(), iterator);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
|
@ -343,7 +343,7 @@ public abstract class DoubleValuesSource {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Double> newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
int sortPos, boolean reversed) {
|
||||
return new FieldComparator.DoubleComparator(numHits, fieldname, 0.0){
|
||||
|
||||
LeafReaderContext ctx;
|
||||
|
|
|
@ -17,8 +17,6 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Provides a {@link FieldComparator} for custom field sorting.
|
||||
*
|
||||
|
@ -33,9 +31,7 @@ public abstract class FieldComparatorSource {
|
|||
* @param fieldname
|
||||
* Name of the field to create comparator for.
|
||||
* @return FieldComparator.
|
||||
* @throws IOException
|
||||
* If an error occurs reading the index.
|
||||
*/
|
||||
public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed)
|
||||
throws IOException;
|
||||
public abstract FieldComparator<?> newComparator(String fieldname, int numHits, int sortPos, boolean reversed);
|
||||
|
||||
}
|
||||
|
|
|
@ -58,8 +58,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
private final int oneReverseMul;
|
||||
private final FieldComparator<?> oneComparator;
|
||||
|
||||
public OneComparatorFieldValueHitQueue(SortField[] fields, int size)
|
||||
throws IOException {
|
||||
public OneComparatorFieldValueHitQueue(SortField[] fields, int size) {
|
||||
super(fields, size);
|
||||
|
||||
assert fields.length == 1;
|
||||
|
@ -96,8 +95,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
*/
|
||||
private static final class MultiComparatorsFieldValueHitQueue<T extends FieldValueHitQueue.Entry> extends FieldValueHitQueue<T> {
|
||||
|
||||
public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size)
|
||||
throws IOException {
|
||||
public MultiComparatorsFieldValueHitQueue(SortField[] fields, int size) {
|
||||
super(fields, size);
|
||||
}
|
||||
|
||||
|
@ -123,7 +121,7 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
}
|
||||
|
||||
// prevent instantiation and extension.
|
||||
private FieldValueHitQueue(SortField[] fields, int size) throws IOException {
|
||||
private FieldValueHitQueue(SortField[] fields, int size) {
|
||||
super(size);
|
||||
// When we get here, fields.length is guaranteed to be > 0, therefore no
|
||||
// need to check it again.
|
||||
|
@ -154,9 +152,8 @@ public abstract class FieldValueHitQueue<T extends FieldValueHitQueue.Entry> ext
|
|||
* priority first); cannot be <code>null</code> or empty
|
||||
* @param size
|
||||
* The number of hits to retain. Must be greater than zero.
|
||||
* @throws IOException if there is a low-level IO error
|
||||
*/
|
||||
public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(SortField[] fields, int size) throws IOException {
|
||||
public static <T extends FieldValueHitQueue.Entry> FieldValueHitQueue<T> create(SortField[] fields, int size) {
|
||||
|
||||
if (fields.length == 0) {
|
||||
throw new IllegalArgumentException("Sort must contain at least one field");
|
||||
|
|
|
@ -17,29 +17,66 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
|
||||
/**
|
||||
* A query that uses either an index (points or terms) or doc values in order
|
||||
* to run a range query, depending which one is more efficient.
|
||||
* A query that uses either an index structure (points or terms) or doc values
|
||||
* in order to run a query, depending which one is more efficient. This is
|
||||
* typically useful for range queries, whose {@link Weight#scorer} is costly
|
||||
* to create since it usually needs to sort large lists of doc ids. For
|
||||
* instance, for a field that both indexed {@link LongPoint}s and
|
||||
* {@link SortedNumericDocValuesField}s with the same values, an efficient
|
||||
* range query could be created by doing:
|
||||
* <pre class="prettyprint">
|
||||
* String field;
|
||||
* long minValue, maxValue;
|
||||
* Query pointQuery = LongPoint.newRangeQuery(field, minValue, maxValue);
|
||||
* Query dvQuery = SortedNumericDocValuesField.newRangeQuery(field, minValue, maxValue);
|
||||
* Query query = new IndexOrDocValuesQuery(pointQuery, dvQuery);
|
||||
* </pre>
|
||||
* The above query will be efficient as it will use points in the case that they
|
||||
* perform better, ie. when we need a good lead iterator that will be almost
|
||||
* entirely consumed; and doc values otherwise, ie. in the case that another
|
||||
* part of the query is already leading iteration but we still need the ability
|
||||
* to verify that some documents match.
|
||||
* <p><b>NOTE</b>This query currently only works well with point range/exact
|
||||
* queries and their equivalent doc values queries.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public final class IndexOrDocValuesQuery extends Query {
|
||||
|
||||
private final Query indexQuery, dvQuery;
|
||||
|
||||
/**
|
||||
* Constructor that takes both a query that executes on an index structure
|
||||
* like the inverted index or the points tree, and another query that
|
||||
* executes on doc values. Both queries must match the same documents and
|
||||
* attribute constant scores.
|
||||
* Create an {@link IndexOrDocValuesQuery}. Both provided queries must match
|
||||
* the same documents and give the same scores.
|
||||
* @param indexQuery a query that has a good iterator but whose scorer may be costly to create
|
||||
* @param dvQuery a query whose scorer is cheap to create that can quickly check whether a given document matches
|
||||
*/
|
||||
public IndexOrDocValuesQuery(Query indexQuery, Query dvQuery) {
|
||||
this.indexQuery = indexQuery;
|
||||
this.dvQuery = dvQuery;
|
||||
}
|
||||
|
||||
/** Return the wrapped query that may be costly to initialize but has a good
|
||||
* iterator. */
|
||||
public Query getIndexQuery() {
|
||||
return indexQuery;
|
||||
}
|
||||
|
||||
/** Return the wrapped query that may be slow at identifying all matching
|
||||
* documents, but which is cheap to initialize and can efficiently
|
||||
* verify that some documents match. */
|
||||
public Query getRandomAccessQuery() {
|
||||
return dvQuery;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return indexQuery.toString(field);
|
||||
|
@ -76,16 +113,29 @@ public final class IndexOrDocValuesQuery extends Query {
|
|||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
final Weight indexWeight = indexQuery.createWeight(searcher, needsScores, boost);
|
||||
final Weight dvWeight = dvQuery.createWeight(searcher, needsScores, boost);
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
return new Weight(this) {
|
||||
@Override
|
||||
public void extractTerms(Set<Term> terms) {
|
||||
indexWeight.extractTerms(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
// We need to check a single doc, so the dv query should perform better
|
||||
return dvWeight.explain(context, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
|
||||
// Bulk scorers need to consume the entire set of docs, so using an
|
||||
// index structure should perform better
|
||||
return indexWeight.bulkScorer(context);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException {
|
||||
final ScorerSupplier indexScorerSupplier = indexWeight.scorerSupplier(context);
|
||||
final ScorerSupplier dvScorerSupplier = dvWeight.scorerSupplier(context);
|
||||
final ScorerSupplier dvScorerSupplier = dvWeight.scorerSupplier(context);
|
||||
if (indexScorerSupplier == null || dvScorerSupplier == null) {
|
||||
return null;
|
||||
}
|
|
@ -172,7 +172,7 @@ public abstract class LongValuesSource {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Long> newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
int sortPos, boolean reversed) {
|
||||
return new FieldComparator.LongComparator(numHits, fieldname, 0L){
|
||||
|
||||
LeafReaderContext ctx;
|
||||
|
|
|
@ -335,7 +335,7 @@ public class SortField {
|
|||
* optimize themselves when they are the primary sort.
|
||||
* @return {@link FieldComparator} to use when sorting
|
||||
*/
|
||||
public FieldComparator<?> getComparator(final int numHits, final int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(final int numHits, final int sortPos) {
|
||||
|
||||
switch (type) {
|
||||
case SCORE:
|
||||
|
|
|
@ -136,7 +136,7 @@ public class SortedNumericSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
switch(type) {
|
||||
case INT:
|
||||
return new FieldComparator.IntComparator(numHits, getField(), (Integer) missingValue) {
|
||||
|
|
|
@ -118,7 +118,7 @@ public class SortedSetSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
return new FieldComparator.TermOrdValComparator(numHits, getField(), missingValue == STRING_LAST) {
|
||||
@Override
|
||||
protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) throws IOException {
|
||||
|
|
|
@ -19,8 +19,6 @@ package org.apache.lucene.search;
|
|||
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/** Represents hits returned by {@link
|
||||
* IndexSearcher#search(Query,int)}. */
|
||||
public class TopDocs {
|
||||
|
@ -123,7 +121,7 @@ public class TopDocs {
|
|||
final FieldComparator<?>[] comparators;
|
||||
final int[] reverseMul;
|
||||
|
||||
public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException {
|
||||
public MergeSortQueue(Sort sort, TopDocs[] shardHits) {
|
||||
super(shardHits.length);
|
||||
this.shardHits = new ScoreDoc[shardHits.length][];
|
||||
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
|
||||
|
@ -196,7 +194,7 @@ public class TopDocs {
|
|||
* the provided TopDocs, sorting by score. Each {@link TopDocs}
|
||||
* instance must be sorted.
|
||||
* @lucene.experimental */
|
||||
public static TopDocs merge(int topN, TopDocs[] shardHits) throws IOException {
|
||||
public static TopDocs merge(int topN, TopDocs[] shardHits) {
|
||||
return merge(0, topN, shardHits);
|
||||
}
|
||||
|
||||
|
@ -205,7 +203,7 @@ public class TopDocs {
|
|||
* {@code start} top docs. This is typically useful for pagination.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public static TopDocs merge(int start, int topN, TopDocs[] shardHits) throws IOException {
|
||||
public static TopDocs merge(int start, int topN, TopDocs[] shardHits) {
|
||||
return mergeAux(null, start, topN, shardHits);
|
||||
}
|
||||
|
||||
|
@ -216,7 +214,7 @@ public class TopDocs {
|
|||
* filled (ie, <code>fillFields=true</code> must be
|
||||
* passed to {@link TopFieldCollector#create}).
|
||||
* @lucene.experimental */
|
||||
public static TopFieldDocs merge(Sort sort, int topN, TopFieldDocs[] shardHits) throws IOException {
|
||||
public static TopFieldDocs merge(Sort sort, int topN, TopFieldDocs[] shardHits) {
|
||||
return merge(sort, 0, topN, shardHits);
|
||||
}
|
||||
|
||||
|
@ -225,7 +223,7 @@ public class TopDocs {
|
|||
* {@code start} top docs. This is typically useful for pagination.
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public static TopFieldDocs merge(Sort sort, int start, int topN, TopFieldDocs[] shardHits) throws IOException {
|
||||
public static TopFieldDocs merge(Sort sort, int start, int topN, TopFieldDocs[] shardHits) {
|
||||
if (sort == null) {
|
||||
throw new IllegalArgumentException("sort must be non-null when merging field-docs");
|
||||
}
|
||||
|
@ -234,7 +232,7 @@ public class TopDocs {
|
|||
|
||||
/** Auxiliary method used by the {@link #merge} impls. A sort value of null
|
||||
* is used to indicate that docs should be sorted by score. */
|
||||
private static TopDocs mergeAux(Sort sort, int start, int size, TopDocs[] shardHits) throws IOException {
|
||||
private static TopDocs mergeAux(Sort sort, int start, int size, TopDocs[] shardHits) {
|
||||
final PriorityQueue<ShardRef> queue;
|
||||
if (sort == null) {
|
||||
queue = new ScoreMergeSortQueue(shardHits);
|
||||
|
|
|
@ -475,11 +475,9 @@ public abstract class TopFieldCollector extends TopDocsCollector<Entry> {
|
|||
* <code>trackDocScores</code> to true as well.
|
||||
* @return a {@link TopFieldCollector} instance which will sort the results by
|
||||
* the sort criteria.
|
||||
* @throws IOException if there is a low-level I/O error
|
||||
*/
|
||||
public static TopFieldCollector create(Sort sort, int numHits, FieldDoc after,
|
||||
boolean fillFields, boolean trackDocScores, boolean trackMaxScore)
|
||||
throws IOException {
|
||||
boolean fillFields, boolean trackDocScores, boolean trackMaxScore) {
|
||||
|
||||
if (sort.fields.length == 0) {
|
||||
throw new IllegalArgumentException("Sort must contain at least one field");
|
||||
|
|
|
@ -31,7 +31,9 @@ import org.apache.lucene.index.BasePointsFormatTestCase;
|
|||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MockRandomMergePolicy;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.SegmentReadState;
|
||||
import org.apache.lucene.index.SegmentWriteState;
|
||||
|
@ -97,7 +99,13 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
|
||||
public void testEstimatePointCount() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
// Avoid mockRandomMP since it may cause non-optimal merges that make the
|
||||
// number of points per leaf hard to predict
|
||||
while (iwc.getMergePolicy() instanceof MockRandomMergePolicy) {
|
||||
iwc.setMergePolicy(newMergePolicy());
|
||||
}
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
byte[] pointValue = new byte[3];
|
||||
byte[] uniquePointValue = new byte[3];
|
||||
random().nextBytes(uniquePointValue);
|
||||
|
@ -245,25 +253,28 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
}));
|
||||
|
||||
// If only one point matches, then the point count is (actualMaxPointsInLeafNode + 1) / 2
|
||||
assertEquals((actualMaxPointsInLeafNode + 1) / 2,
|
||||
points.estimatePointCount(new IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) throws IOException {}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) throws IOException {}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
for (int dim = 0; dim < 2; ++dim) {
|
||||
if (StringHelper.compare(3, uniquePointValue[0], 0, maxPackedValue, dim * 3) > 0 ||
|
||||
StringHelper.compare(3, uniquePointValue[0], 0, minPackedValue, dim * 3) < 0) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
}
|
||||
// in general, or maybe 2x that if the point is a split value
|
||||
final long pointCount = points.estimatePointCount(new IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) throws IOException {}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) throws IOException {}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
for (int dim = 0; dim < 2; ++dim) {
|
||||
if (StringHelper.compare(3, uniquePointValue[dim], 0, maxPackedValue, dim * 3) > 0 ||
|
||||
StringHelper.compare(3, uniquePointValue[dim], 0, minPackedValue, dim * 3) < 0) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
}
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
}));
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
});
|
||||
assertTrue(""+pointCount,
|
||||
pointCount == (actualMaxPointsInLeafNode + 1) / 2 || // common case
|
||||
pointCount == 2*((actualMaxPointsInLeafNode + 1) / 2)); // if the point is a split value
|
||||
|
||||
r.close();
|
||||
dir.close();
|
||||
|
|
|
@ -0,0 +1,271 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocValuesQueries extends LuceneTestCase {
|
||||
|
||||
public void testDuelPointRangeSortedNumericRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeNumericRangeQuery(true, 1);
|
||||
}
|
||||
|
||||
public void testDuelPointRangeMultivaluedSortedNumericRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeNumericRangeQuery(true, 3);
|
||||
}
|
||||
|
||||
public void testDuelPointRangeNumericRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeNumericRangeQuery(false, 1);
|
||||
}
|
||||
|
||||
private void doTestDuelPointRangeNumericRangeQuery(boolean sortedNumeric, int maxValuesPerDoc) throws IOException {
|
||||
final int iters = atLeast(10);
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
if (sortedNumeric) {
|
||||
doc.add(new SortedNumericDocValuesField("dv", value));
|
||||
} else {
|
||||
doc.add(new NumericDocValuesField("dv", value));
|
||||
}
|
||||
doc.add(new LongPoint("idx", value));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
|
||||
}
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader, false);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000);
|
||||
final long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000);
|
||||
final Query q1 = LongPoint.newRangeQuery("idx", min, max);
|
||||
final Query q2;
|
||||
if (sortedNumeric) {
|
||||
q2 = SortedNumericDocValuesField.newRangeQuery("dv", min, max);
|
||||
} else {
|
||||
q2 = NumericDocValuesField.newRangeQuery("dv", min, max);
|
||||
}
|
||||
assertSameMatches(searcher, q1, q2, false);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void doTestDuelPointRangeSortedRangeQuery(boolean sortedSet, int maxValuesPerDoc) throws IOException {
|
||||
final int iters = atLeast(10);
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = TestUtil.nextInt(random(), 0, maxValuesPerDoc);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
byte[] encoded = new byte[Long.BYTES];
|
||||
LongPoint.encodeDimension(value, encoded, 0);
|
||||
if (sortedSet) {
|
||||
doc.add(new SortedSetDocValuesField("dv", new BytesRef(encoded)));
|
||||
} else {
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef(encoded)));
|
||||
}
|
||||
doc.add(new LongPoint("idx", value));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
|
||||
}
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader, false);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
long min = random().nextBoolean() ? Long.MIN_VALUE : TestUtil.nextLong(random(), -100, 10000);
|
||||
long max = random().nextBoolean() ? Long.MAX_VALUE : TestUtil.nextLong(random(), -100, 10000);
|
||||
byte[] encodedMin = new byte[Long.BYTES];
|
||||
byte[] encodedMax = new byte[Long.BYTES];
|
||||
LongPoint.encodeDimension(min, encodedMin, 0);
|
||||
LongPoint.encodeDimension(max, encodedMax, 0);
|
||||
boolean includeMin = true;
|
||||
boolean includeMax = true;
|
||||
if (random().nextBoolean()) {
|
||||
includeMin = false;
|
||||
min++;
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
includeMax = false;
|
||||
max--;
|
||||
}
|
||||
final Query q1 = LongPoint.newRangeQuery("idx", min, max);
|
||||
final Query q2;
|
||||
if (sortedSet) {
|
||||
q2 = SortedSetDocValuesField.newRangeQuery("dv",
|
||||
min == Long.MIN_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMin),
|
||||
max == Long.MAX_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMax),
|
||||
includeMin, includeMax);
|
||||
} else {
|
||||
q2 = SortedDocValuesField.newRangeQuery("dv",
|
||||
min == Long.MIN_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMin),
|
||||
max == Long.MAX_VALUE && random().nextBoolean() ? null : new BytesRef(encodedMax),
|
||||
includeMin, includeMax);
|
||||
}
|
||||
assertSameMatches(searcher, q1, q2, false);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
public void testDuelPointRangeSortedSetRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeSortedRangeQuery(true, 1);
|
||||
}
|
||||
|
||||
public void testDuelPointRangeMultivaluedSortedSetRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeSortedRangeQuery(true, 3);
|
||||
}
|
||||
|
||||
public void testDuelPointRangeSortedRangeQuery() throws IOException {
|
||||
doTestDuelPointRangeSortedRangeQuery(false, 1);
|
||||
}
|
||||
|
||||
private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
|
||||
final int maxDoc = searcher.getIndexReader().maxDoc();
|
||||
final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
|
||||
final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
|
||||
assertEquals(td1.totalHits, td2.totalHits);
|
||||
for (int i = 0; i < td1.scoreDocs.length; ++i) {
|
||||
assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
|
||||
if (scores) {
|
||||
assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testEquals() {
|
||||
Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
|
||||
QueryUtils.checkEqual(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 5));
|
||||
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 3, 6));
|
||||
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("foo", 4, 5));
|
||||
QueryUtils.checkUnequal(q1, SortedNumericDocValuesField.newRangeQuery("bar", 3, 5));
|
||||
|
||||
Query q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true);
|
||||
QueryUtils.checkEqual(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true));
|
||||
QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("baz"), new BytesRef("baz"), true, true));
|
||||
QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("bar"), true, true));
|
||||
QueryUtils.checkUnequal(q2, SortedSetDocValuesField.newRangeQuery("quux", new BytesRef("bar"), new BytesRef("baz"), true, true));
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
Query q1 = SortedNumericDocValuesField.newRangeQuery("foo", 3, 5);
|
||||
assertEquals("foo:[3 TO 5]", q1.toString());
|
||||
assertEquals("[3 TO 5]", q1.toString("foo"));
|
||||
assertEquals("foo:[3 TO 5]", q1.toString("bar"));
|
||||
|
||||
Query q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), true, true);
|
||||
assertEquals("foo:[[62 61 72] TO [62 61 7a]]", q2.toString());
|
||||
q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, true);
|
||||
assertEquals("foo:{[62 61 72] TO [62 61 7a]]", q2.toString());
|
||||
q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), new BytesRef("baz"), false, false);
|
||||
assertEquals("foo:{[62 61 72] TO [62 61 7a]}", q2.toString());
|
||||
q2 = SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("bar"), null, true, true);
|
||||
assertEquals("foo:[[62 61 72] TO *}", q2.toString());
|
||||
q2 = SortedSetDocValuesField.newRangeQuery("foo", null, new BytesRef("baz"), true, true);
|
||||
assertEquals("foo:{* TO [62 61 7a]]", q2.toString());
|
||||
assertEquals("{* TO [62 61 7a]]", q2.toString("foo"));
|
||||
assertEquals("foo:{* TO [62 61 7a]]", q2.toString("bar"));
|
||||
}
|
||||
|
||||
public void testMissingField() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
iw.addDocument(new Document());
|
||||
IndexReader reader = iw.getReader();
|
||||
iw.close();
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
for (Query query : Arrays.asList(
|
||||
NumericDocValuesField.newRangeQuery("foo", 2, 4),
|
||||
SortedNumericDocValuesField.newRangeQuery("foo", 2, 4),
|
||||
SortedDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()),
|
||||
SortedSetDocValuesField.newRangeQuery("foo", new BytesRef("abc"), new BytesRef("bcd"), random().nextBoolean(), random().nextBoolean()))) {
|
||||
Weight w = searcher.createNormalizedWeight(query, random().nextBoolean());
|
||||
assertNull(w.scorer(searcher.getIndexReader().leaves().get(0)));
|
||||
}
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testSortedNumericNPE() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
double[] nums = {-1.7147449030215377E-208, -1.6887024655302576E-11, 1.534911516604164E113, 0.0,
|
||||
2.6947996404505155E-166, -2.649722021970773E306, 6.138239235731689E-198, 2.3967090122610808E111};
|
||||
for (int i = 0; i < nums.length; ++i) {
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedNumericDocValuesField("dv", NumericUtils.doubleToSortableLong(nums[i])));
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader);
|
||||
iw.close();
|
||||
|
||||
final long lo = NumericUtils.doubleToSortableLong(8.701032080293731E-226);
|
||||
final long hi = NumericUtils.doubleToSortableLong(2.0801416404385346E-41);
|
||||
|
||||
Query query = SortedNumericDocValuesField.newRangeQuery("dv", lo, hi);
|
||||
// TODO: assert expected matches
|
||||
searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
|
||||
|
||||
// swap order, should still work
|
||||
query = SortedNumericDocValuesField.newRangeQuery("dv", hi, lo);
|
||||
// TODO: assert expected matches
|
||||
searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -17,20 +17,26 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.FieldValueHitQueue.Entry;
|
||||
import org.apache.lucene.search.similarities.ClassicSimilarity;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestElevationComparator extends LuceneTestCase {
|
||||
|
||||
|
@ -144,7 +150,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) {
|
||||
return new FieldComparator<Integer>() {
|
||||
|
||||
private final int[] values = new int[numHits];
|
||||
|
|
|
@ -64,21 +64,21 @@ public class TestIndexOrDocValuesQuery extends LuceneTestCase {
|
|||
// The term query is more selective, so the IndexOrDocValuesQuery should use doc values
|
||||
final Query q1 = new BooleanQuery.Builder()
|
||||
.add(new TermQuery(new Term("f1", "foo")), Occur.MUST)
|
||||
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), new DocValuesNumbersQuery("f2", 2L)), Occur.MUST)
|
||||
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 2), NumericDocValuesField.newRangeQuery("f2", 2L, 2L)), Occur.MUST)
|
||||
.build();
|
||||
|
||||
final Weight w1 = searcher.createNormalizedWeight(q1, random().nextBoolean());
|
||||
final Scorer s1 = w1.scorer(reader.leaves().get(0));
|
||||
final Scorer s1 = w1.scorer(searcher.getIndexReader().leaves().get(0));
|
||||
assertNotNull(s1.twoPhaseIterator()); // means we use doc values
|
||||
|
||||
// The term query is less selective, so the IndexOrDocValuesQuery should use points
|
||||
final Query q2 = new BooleanQuery.Builder()
|
||||
.add(new TermQuery(new Term("f1", "bar")), Occur.MUST)
|
||||
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), new DocValuesNumbersQuery("f2", 42L)), Occur.MUST)
|
||||
.add(new IndexOrDocValuesQuery(LongPoint.newExactQuery("f2", 42), NumericDocValuesField.newRangeQuery("f2", 42L, 42L)), Occur.MUST)
|
||||
.build();
|
||||
|
||||
final Weight w2 = searcher.createNormalizedWeight(q2, random().nextBoolean());
|
||||
final Scorer s2 = w2.scorer(reader.leaves().get(0));
|
||||
final Scorer s2 = w2.scorer(searcher.getIndexReader().leaves().get(0));
|
||||
assertNull(s2.twoPhaseIterator()); // means we use points
|
||||
|
||||
reader.close();
|
|
@ -1173,23 +1173,26 @@ public class TestBKD extends LuceneTestCase {
|
|||
}));
|
||||
|
||||
// If only one point matches, then the point count is (actualMaxPointsInLeafNode + 1) / 2
|
||||
assertEquals((actualMaxPointsInLeafNode + 1) / 2,
|
||||
points.estimatePointCount(new IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) throws IOException {}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) throws IOException {}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
if (StringHelper.compare(3, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
|
||||
StringHelper.compare(3, uniquePointValue, 0, minPackedValue, 0) < 0) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
}
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
}));
|
||||
// in general, or maybe 2x that if the point is a split value
|
||||
final long pointCount = points.estimatePointCount(new IntersectVisitor() {
|
||||
@Override
|
||||
public void visit(int docID, byte[] packedValue) throws IOException {}
|
||||
|
||||
@Override
|
||||
public void visit(int docID) throws IOException {}
|
||||
|
||||
@Override
|
||||
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
if (StringHelper.compare(numBytesPerDim, uniquePointValue, 0, maxPackedValue, 0) > 0 ||
|
||||
StringHelper.compare(numBytesPerDim, uniquePointValue, 0, minPackedValue, 0) < 0) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
}
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
});
|
||||
assertTrue(""+pointCount,
|
||||
pointCount == (actualMaxPointsInLeafNode + 1) / 2 || // common case
|
||||
pointCount == 2*((actualMaxPointsInLeafNode + 1) / 2)); // if the point is a split value
|
||||
|
||||
pointsIn.close();
|
||||
dir.close();
|
||||
|
|
|
@ -216,7 +216,7 @@ public class BlockGroupingCollector extends SimpleCollector {
|
|||
* @param lastDocPerGroup a {@link Weight} that marks the
|
||||
* last document in each group.
|
||||
*/
|
||||
public BlockGroupingCollector(Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) throws IOException {
|
||||
public BlockGroupingCollector(Sort groupSort, int topNGroups, boolean needsScores, Weight lastDocPerGroup) {
|
||||
|
||||
if (topNGroups < 1) {
|
||||
throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
|
||||
|
|
|
@ -67,10 +67,9 @@ abstract public class FirstPassGroupingCollector<T> extends SimpleCollector {
|
|||
* ie, if you want to groupSort by relevance use
|
||||
* Sort.RELEVANCE.
|
||||
* @param topNGroups How many top groups to keep.
|
||||
* @throws IOException If I/O related errors occur
|
||||
*/
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public FirstPassGroupingCollector(Sort groupSort, int topNGroups) throws IOException {
|
||||
public FirstPassGroupingCollector(Sort groupSort, int topNGroups) {
|
||||
if (topNGroups < 1) {
|
||||
throw new IllegalArgumentException("topNGroups must be >= 1 (got " + topNGroups + ")");
|
||||
}
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
*/
|
||||
package org.apache.lucene.search.grouping;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -167,7 +166,7 @@ public class SearchGroup<T> {
|
|||
public final int[] reversed;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public GroupComparator(Sort groupSort) throws IOException {
|
||||
public GroupComparator(Sort groupSort) {
|
||||
final SortField[] sortFields = groupSort.getSort();
|
||||
comparators = new FieldComparator[sortFields.length];
|
||||
reversed = new int[sortFields.length];
|
||||
|
@ -208,7 +207,7 @@ public class SearchGroup<T> {
|
|||
private final NavigableSet<MergedGroup<T>> queue;
|
||||
private final Map<T,MergedGroup<T>> groupsSeen;
|
||||
|
||||
public GroupMerger(Sort groupSort) throws IOException {
|
||||
public GroupMerger(Sort groupSort) {
|
||||
groupComp = new GroupComparator<>(groupSort);
|
||||
queue = new TreeSet<>(groupComp);
|
||||
groupsSeen = new HashMap<>();
|
||||
|
@ -340,8 +339,7 @@ public class SearchGroup<T> {
|
|||
*
|
||||
* <p>NOTE: this returns null if the topGroups is empty.
|
||||
*/
|
||||
public static <T> Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> topGroups, int offset, int topN, Sort groupSort)
|
||||
throws IOException {
|
||||
public static <T> Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> topGroups, int offset, int topN, Sort groupSort) {
|
||||
if (topGroups.isEmpty()) {
|
||||
return null;
|
||||
} else {
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
*/
|
||||
package org.apache.lucene.search.grouping;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
@ -97,8 +95,7 @@ public class TopGroups<T> {
|
|||
* <b>NOTE</b>: the topDocs in each GroupDocs is actually
|
||||
* an instance of TopDocsAndShards
|
||||
*/
|
||||
public static <T> TopGroups<T> merge(TopGroups<T>[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN, ScoreMergeMode scoreMergeMode)
|
||||
throws IOException {
|
||||
public static <T> TopGroups<T> merge(TopGroups<T>[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN, ScoreMergeMode scoreMergeMode) {
|
||||
|
||||
//System.out.println("TopGroups.merge");
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
/cglib/cglib-nodep = 2.2
|
||||
/com.adobe.xmp/xmpcore = 5.1.2
|
||||
|
||||
com.carrotsearch.randomizedtesting.version = 2.4.0
|
||||
com.carrotsearch.randomizedtesting.version = 2.5.0
|
||||
/com.carrotsearch.randomizedtesting/junit4-ant = ${com.carrotsearch.randomizedtesting.version}
|
||||
/com.carrotsearch.randomizedtesting/randomizedtesting-runner = ${com.carrotsearch.randomizedtesting.version}
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public class ToParentBlockJoinCollector implements Collector {
|
|||
* not be null. If you pass true trackScores, all
|
||||
* ToParentBlockQuery instances must not use
|
||||
* ScoreMode.None. */
|
||||
public ToParentBlockJoinCollector(Sort sort, int numParentHits, boolean trackScores, boolean trackMaxScore) throws IOException {
|
||||
public ToParentBlockJoinCollector(Sort sort, int numParentHits, boolean trackScores, boolean trackMaxScore) {
|
||||
// TODO: allow null sort to be specialized to relevance
|
||||
// only collector
|
||||
this.sort = sort;
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ToParentBlockJoinSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
switch (getType()) {
|
||||
case STRING:
|
||||
return getStringComparator(numHits);
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
0222eb23dd6f45541acf6a5ac69cd9e9bdce25d2
|
|
@ -0,0 +1 @@
|
|||
2d00ff1042ae258f33830f26f9b30fc3a43d37e1
|
|
@ -229,7 +229,7 @@ public abstract class ValueSource {
|
|||
|
||||
@Override
|
||||
public FieldComparator<Double> newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
int sortPos, boolean reversed) {
|
||||
return new ValueSourceComparator(context, numHits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,11 +16,9 @@
|
|||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.geo.GeoUtils;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.geo.GeoUtils;
|
||||
|
||||
/**
|
||||
* Sorts by distance from an origin location.
|
||||
|
@ -42,7 +40,7 @@ final class LatLonPointSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
return new LatLonPointDistanceComparator(getField(), latitude, longitude, numHits);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,276 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* A range query that works on top of the doc values APIs. Such queries are
|
||||
* usually slow since they do not use an inverted index. However, in the
|
||||
* dense case where most documents match this query, it <b>might</b> be as
|
||||
* fast or faster than a regular {@link PointRangeQuery}.
|
||||
*
|
||||
* <b>NOTE:</b> This query is typically best used within a
|
||||
* {@link IndexOrDocValuesQuery} alongside a query that uses an indexed
|
||||
* structure such as {@link PointValues points} or {@link Terms terms},
|
||||
* which allows to run the query on doc values when that would be more
|
||||
* efficient, and using an index otherwise.
|
||||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public final class DocValuesRangeQuery extends Query {
|
||||
|
||||
/** Create a new numeric range query on a numeric doc-values field. The field
|
||||
* must has been indexed with either {@link DocValuesType#NUMERIC} or
|
||||
* {@link DocValuesType#SORTED_NUMERIC} doc values. */
|
||||
public static Query newLongRange(String field, Long lowerVal, Long upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new DocValuesRangeQuery(field, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
/** Create a new numeric range query on a numeric doc-values field. The field
|
||||
* must has been indexed with {@link DocValuesType#SORTED} or
|
||||
* {@link DocValuesType#SORTED_SET} doc values. */
|
||||
public static Query newBytesRefRange(String field, BytesRef lowerVal, BytesRef upperVal, boolean includeLower, boolean includeUpper) {
|
||||
return new DocValuesRangeQuery(field, deepCopyOf(lowerVal), deepCopyOf(upperVal), includeLower, includeUpper);
|
||||
}
|
||||
|
||||
private static BytesRef deepCopyOf(BytesRef b) {
|
||||
if (b == null) {
|
||||
return null;
|
||||
} else {
|
||||
return BytesRef.deepCopyOf(b);
|
||||
}
|
||||
}
|
||||
|
||||
private final String field;
|
||||
private final Object lowerVal, upperVal;
|
||||
private final boolean includeLower, includeUpper;
|
||||
|
||||
private DocValuesRangeQuery(String field, Object lowerVal, Object upperVal, boolean includeLower, boolean includeUpper) {
|
||||
this.field = Objects.requireNonNull(field);
|
||||
this.lowerVal = lowerVal;
|
||||
this.upperVal = upperVal;
|
||||
this.includeLower = includeLower;
|
||||
this.includeUpper = includeUpper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return sameClassAs(other) &&
|
||||
equalsTo(getClass().cast(other));
|
||||
}
|
||||
|
||||
private boolean equalsTo(DocValuesRangeQuery other) {
|
||||
return field.equals(other.field) &&
|
||||
Objects.equals(lowerVal, other.lowerVal) &&
|
||||
Objects.equals(upperVal, other.upperVal) &&
|
||||
includeLower == other.includeLower &&
|
||||
includeUpper == other.includeUpper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 31 * classHash() + Objects.hash(field, lowerVal, upperVal, includeLower, includeUpper);
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
public Object getLowerVal() {
|
||||
return lowerVal;
|
||||
}
|
||||
|
||||
public Object getUpperVal() {
|
||||
return upperVal;
|
||||
}
|
||||
|
||||
public boolean isIncludeLower() {
|
||||
return includeLower;
|
||||
}
|
||||
|
||||
public boolean isIncludeUpper() {
|
||||
return includeUpper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (this.field.equals(field) == false) {
|
||||
sb.append(this.field).append(':');
|
||||
}
|
||||
sb.append(includeLower ? '[' : '{');
|
||||
sb.append(lowerVal == null ? "*" : lowerVal.toString());
|
||||
sb.append(" TO ");
|
||||
sb.append(upperVal == null ? "*" : upperVal.toString());
|
||||
sb.append(includeUpper ? ']' : '}');
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (lowerVal == null && upperVal == null) {
|
||||
return new FieldValueQuery(field);
|
||||
}
|
||||
return super.rewrite(reader);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
if (lowerVal == null && upperVal == null) {
|
||||
throw new IllegalStateException("Both min and max values must not be null, call rewrite first");
|
||||
}
|
||||
|
||||
return new ConstantScoreWeight(DocValuesRangeQuery.this, boost) {
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
final TwoPhaseIterator iterator = createTwoPhaseIterator(context);
|
||||
if (iterator == null) {
|
||||
return null;
|
||||
}
|
||||
return new ConstantScoreScorer(this, score(), iterator);
|
||||
}
|
||||
|
||||
private TwoPhaseIterator createTwoPhaseIterator(LeafReaderContext context) throws IOException {
|
||||
if (lowerVal instanceof Long || upperVal instanceof Long) {
|
||||
|
||||
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field);
|
||||
|
||||
final long min;
|
||||
if (lowerVal == null) {
|
||||
min = Long.MIN_VALUE;
|
||||
} else if (includeLower) {
|
||||
min = (long) lowerVal;
|
||||
} else {
|
||||
if ((long) lowerVal == Long.MAX_VALUE) {
|
||||
return null;
|
||||
}
|
||||
min = 1 + (long) lowerVal;
|
||||
}
|
||||
|
||||
final long max;
|
||||
if (upperVal == null) {
|
||||
max = Long.MAX_VALUE;
|
||||
} else if (includeUpper) {
|
||||
max = (long) upperVal;
|
||||
} else {
|
||||
if ((long) upperVal == Long.MIN_VALUE) {
|
||||
return null;
|
||||
}
|
||||
max = -1 + (long) upperVal;
|
||||
}
|
||||
|
||||
if (min > max) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
final int count = values.docValueCount();
|
||||
assert count > 0;
|
||||
for (int i = 0; i < count; ++i) {
|
||||
final long value = values.nextValue();
|
||||
if (value >= min && value <= max) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
} else if (lowerVal instanceof BytesRef || upperVal instanceof BytesRef) {
|
||||
|
||||
final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field);
|
||||
|
||||
final long minOrd;
|
||||
if (lowerVal == null) {
|
||||
minOrd = 0;
|
||||
} else {
|
||||
final long ord = values.lookupTerm((BytesRef) lowerVal);
|
||||
if (ord < 0) {
|
||||
minOrd = -1 - ord;
|
||||
} else if (includeLower) {
|
||||
minOrd = ord;
|
||||
} else {
|
||||
minOrd = ord + 1;
|
||||
}
|
||||
}
|
||||
|
||||
final long maxOrd;
|
||||
if (upperVal == null) {
|
||||
maxOrd = values.getValueCount() - 1;
|
||||
} else {
|
||||
final long ord = values.lookupTerm((BytesRef) upperVal);
|
||||
if (ord < 0) {
|
||||
maxOrd = -2 - ord;
|
||||
} else if (includeUpper) {
|
||||
maxOrd = ord;
|
||||
} else {
|
||||
maxOrd = ord - 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (minOrd > maxOrd) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new TwoPhaseIterator(values) {
|
||||
|
||||
@Override
|
||||
public boolean matches() throws IOException {
|
||||
for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) {
|
||||
if (ord >= minOrd && ord <= maxOrd) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float matchCost() {
|
||||
return 2; // 2 comparisons
|
||||
}
|
||||
};
|
||||
|
||||
} else {
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
|
@ -1,307 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedNumericDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
||||
public class TestDocValuesRangeQuery extends LuceneTestCase {
|
||||
|
||||
public void testDuelNumericRangeQuery() throws IOException {
|
||||
final int iters = atLeast(10);
|
||||
for (int iter = 0; iter < iters; ++iter) {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = random().nextInt(2);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
doc.add(new SortedNumericDocValuesField("dv", value));
|
||||
doc.add(new LongPoint("idx", value));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
|
||||
}
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader, false);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final Long min = TestUtil.nextLong(random(), -100, 1000);
|
||||
final Long max = TestUtil.nextLong(random(), -100, 1000);
|
||||
final Query q1 = LongPoint.newRangeQuery("idx", min, max);
|
||||
final Query q2 = DocValuesRangeQuery.newLongRange("dv", min, max, true, true);
|
||||
assertSameMatches(searcher, q1, q2, false);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static BytesRef toSortableBytes(Long l) {
|
||||
if (l == null) {
|
||||
return null;
|
||||
} else {
|
||||
byte[] bytes = new byte[Long.BYTES];
|
||||
NumericUtils.longToSortableBytes(l, bytes, 0);
|
||||
return new BytesRef(bytes);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDuelNumericSorted() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = random().nextInt(3);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
doc.add(new SortedNumericDocValuesField("dv1", value));
|
||||
doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(DocValuesRangeQuery.newLongRange("dv1", 0L, 10L, true, true));
|
||||
}
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final Long min = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
|
||||
final Long max = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
|
||||
final boolean minInclusive = random().nextBoolean();
|
||||
final boolean maxInclusive = random().nextBoolean();
|
||||
final Query q1 = DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive);
|
||||
final Query q2 = DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive);
|
||||
assertSameMatches(searcher, q1, q2, true);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testScore() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = random().nextInt(3);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
doc.add(new SortedNumericDocValuesField("dv1", value));
|
||||
doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(DocValuesRangeQuery.newLongRange("dv1", 0L, 10L, true, true));
|
||||
}
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final Long min = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
|
||||
final Long max = random().nextBoolean() ? null : TestUtil.nextLong(random(), -100, 1000);
|
||||
final boolean minInclusive = random().nextBoolean();
|
||||
final boolean maxInclusive = random().nextBoolean();
|
||||
|
||||
final float boost = random().nextFloat() * 10;
|
||||
|
||||
final Query q1 = new BoostQuery(DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive), boost);
|
||||
final Query csq1 = new BoostQuery(new ConstantScoreQuery(DocValuesRangeQuery.newLongRange("dv1", min, max, minInclusive, maxInclusive)), boost);
|
||||
assertSameMatches(searcher, q1, csq1, true);
|
||||
|
||||
final Query q2 = new BoostQuery(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive), boost);
|
||||
final Query csq2 = new BoostQuery(new ConstantScoreQuery(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), minInclusive, maxInclusive)), boost);
|
||||
assertSameMatches(searcher, q2, csq2, true);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testApproximation() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
final int numDocs = atLeast(100);
|
||||
for (int i = 0; i < numDocs; ++i) {
|
||||
Document doc = new Document();
|
||||
final int numValues = random().nextInt(3);
|
||||
for (int j = 0; j < numValues; ++j) {
|
||||
final long value = TestUtil.nextLong(random(), -100, 10000);
|
||||
doc.add(new SortedNumericDocValuesField("dv1", value));
|
||||
doc.add(new SortedSetDocValuesField("dv2", toSortableBytes(value)));
|
||||
doc.add(new LongPoint("idx", value));
|
||||
doc.add(new StringField("f", random().nextBoolean() ? "a" : "b", Store.NO));
|
||||
}
|
||||
iw.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
iw.deleteDocuments(LongPoint.newRangeQuery("idx", 0L, 10L));
|
||||
}
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader, false);
|
||||
iw.close();
|
||||
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
final Long min = TestUtil.nextLong(random(), -100, 1000);
|
||||
final Long max = TestUtil.nextLong(random(), -100, 1000);
|
||||
|
||||
BooleanQuery.Builder ref = new BooleanQuery.Builder();
|
||||
ref.add(LongPoint.newRangeQuery("idx", min, max), Occur.FILTER);
|
||||
ref.add(new TermQuery(new Term("f", "a")), Occur.MUST);
|
||||
|
||||
BooleanQuery.Builder bq1 = new BooleanQuery.Builder();
|
||||
bq1.add(DocValuesRangeQuery.newLongRange("dv1", min, max, true, true), Occur.FILTER);
|
||||
bq1.add(new TermQuery(new Term("f", "a")), Occur.MUST);
|
||||
|
||||
assertSameMatches(searcher, ref.build(), bq1.build(), true);
|
||||
|
||||
BooleanQuery.Builder bq2 = new BooleanQuery.Builder();
|
||||
bq2.add(DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(min), toSortableBytes(max), true, true), Occur.FILTER);
|
||||
bq2.add(new TermQuery(new Term("f", "a")), Occur.MUST);
|
||||
|
||||
assertSameMatches(searcher, ref.build(), bq2.build(), true);
|
||||
}
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
private void assertSameMatches(IndexSearcher searcher, Query q1, Query q2, boolean scores) throws IOException {
|
||||
final int maxDoc = searcher.getIndexReader().maxDoc();
|
||||
final TopDocs td1 = searcher.search(q1, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
|
||||
final TopDocs td2 = searcher.search(q2, maxDoc, scores ? Sort.RELEVANCE : Sort.INDEXORDER);
|
||||
assertEquals(td1.totalHits, td2.totalHits);
|
||||
for (int i = 0; i < td1.scoreDocs.length; ++i) {
|
||||
assertEquals(td1.scoreDocs[i].doc, td2.scoreDocs[i].doc);
|
||||
if (scores) {
|
||||
assertEquals(td1.scoreDocs[i].score, td2.scoreDocs[i].score, 10e-7);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
assertEquals("f:[2 TO 5]", DocValuesRangeQuery.newLongRange("f", 2L, 5L, true, true).toString());
|
||||
assertEquals("f:{2 TO 5]", DocValuesRangeQuery.newLongRange("f", 2L, 5L, false, true).toString());
|
||||
assertEquals("f:{2 TO 5}", DocValuesRangeQuery.newLongRange("f", 2L, 5L, false, false).toString());
|
||||
assertEquals("f:{* TO 5}", DocValuesRangeQuery.newLongRange("f", null, 5L, false, false).toString());
|
||||
assertEquals("f:[2 TO *}", DocValuesRangeQuery.newLongRange("f", 2L, null, true, false).toString());
|
||||
|
||||
BytesRef min = new BytesRef("a");
|
||||
BytesRef max = new BytesRef("b");
|
||||
assertEquals("f:[[61] TO [62]]", DocValuesRangeQuery.newBytesRefRange("f", min, max, true, true).toString());
|
||||
assertEquals("f:{[61] TO [62]]", DocValuesRangeQuery.newBytesRefRange("f", min, max, false, true).toString());
|
||||
assertEquals("f:{[61] TO [62]}", DocValuesRangeQuery.newBytesRefRange("f", min, max, false, false).toString());
|
||||
assertEquals("f:{* TO [62]}", DocValuesRangeQuery.newBytesRefRange("f", null, max, false, false).toString());
|
||||
assertEquals("f:[[61] TO *}", DocValuesRangeQuery.newBytesRefRange("f", min, null, true, false).toString());
|
||||
}
|
||||
|
||||
public void testDocValuesRangeSupportsApproximation() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
Document doc = new Document();
|
||||
doc.add(new NumericDocValuesField("dv1", 5L));
|
||||
doc.add(new SortedDocValuesField("dv2", toSortableBytes(42L)));
|
||||
iw.addDocument(doc);
|
||||
iw.commit();
|
||||
final IndexReader reader = iw.getReader();
|
||||
final LeafReaderContext ctx = reader.leaves().get(0);
|
||||
final IndexSearcher searcher = newSearcher(reader);
|
||||
iw.close();
|
||||
|
||||
Query q1 = DocValuesRangeQuery.newLongRange("dv1", 0L, 100L, random().nextBoolean(), random().nextBoolean());
|
||||
Weight w = searcher.createNormalizedWeight(q1, true);
|
||||
Scorer s = w.scorer(ctx);
|
||||
assertNotNull(s.twoPhaseIterator());
|
||||
|
||||
Query q2 = DocValuesRangeQuery.newBytesRefRange("dv2", toSortableBytes(0L), toSortableBytes(100L), random().nextBoolean(), random().nextBoolean());
|
||||
w = searcher.createNormalizedWeight(q2, true);
|
||||
s = w.scorer(ctx);
|
||||
assertNotNull(s.twoPhaseIterator());
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLongRangeBoundaryValues() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
|
||||
|
||||
Document doc = new Document();
|
||||
doc.add(new SortedNumericDocValuesField("dv", 100l));
|
||||
iw.addDocument(doc);
|
||||
|
||||
doc = new Document();
|
||||
doc.add(new SortedNumericDocValuesField("dv", 200l));
|
||||
iw.addDocument(doc);
|
||||
|
||||
iw.commit();
|
||||
|
||||
final IndexReader reader = iw.getReader();
|
||||
final IndexSearcher searcher = newSearcher(reader, false);
|
||||
iw.close();
|
||||
|
||||
Long min = Long.MIN_VALUE;
|
||||
Long max = Long.MIN_VALUE;
|
||||
Query query = DocValuesRangeQuery.newLongRange("dv", min, max, true, false);
|
||||
TopDocs td = searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
|
||||
assertEquals(0, td.totalHits);
|
||||
|
||||
min = Long.MAX_VALUE;
|
||||
max = Long.MAX_VALUE;
|
||||
query = DocValuesRangeQuery.newLongRange("dv", min, max, false, true);
|
||||
td = searcher.search(query, searcher.reader.maxDoc(), Sort.INDEXORDER);
|
||||
assertEquals(0, td.totalHits);
|
||||
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -26,7 +26,6 @@ use warnings;
|
|||
|
||||
my $jira_url_prefix = 'http://issues.apache.org/jira/browse/';
|
||||
my $github_pull_request_prefix = 'https://github.com/apache/lucene-solr/pull/';
|
||||
my $bugzilla_url_prefix = 'http://issues.apache.org/bugzilla/show_bug.cgi?id=';
|
||||
my $month_regex = &setup_month_regex;
|
||||
my %month_nums = &setup_month_nums;
|
||||
my %lucene_bugzilla_jira_map = &setup_lucene_bugzilla_jira_map;
|
||||
|
@ -643,6 +642,7 @@ sub markup_trailing_attribution {
|
|||
(?!inverse\ )
|
||||
(?![Tt]he\ )
|
||||
(?!use\ the\ bug\ number)
|
||||
(?!e\.?g\.?\b)
|
||||
[^()"]+?\))\s*$}
|
||||
{\n${extra_newline}<span class="attrib">$1</span>}x) {
|
||||
# If attribution is not found, then look for attribution with a
|
||||
|
@ -668,6 +668,7 @@ sub markup_trailing_attribution {
|
|||
(?!inverse\ )
|
||||
(?![Tt]he\ )
|
||||
(?!use\ the\ bug\ number)
|
||||
(?!e\.?g\.?\b)
|
||||
[^()"]+?\)))
|
||||
((?:\.|(?i:\.?\s*Issue\s+\d{3,}|LUCENE-\d+)\.?)\s*)$}
|
||||
{
|
||||
|
|
|
@ -16,11 +16,8 @@
|
|||
*/
|
||||
package org.apache.lucene.spatial3d;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
||||
import org.apache.lucene.spatial3d.geom.GeoOutsideDistance;
|
||||
|
||||
/**
|
||||
|
@ -42,7 +39,7 @@ final class Geo3DPointOutsideSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
return new Geo3DPointOutsideDistanceComparator(getField(), distanceShape, numHits);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,11 +16,8 @@
|
|||
*/
|
||||
package org.apache.lucene.spatial3d;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
||||
import org.apache.lucene.spatial3d.geom.GeoDistanceShape;
|
||||
|
||||
/**
|
||||
|
@ -42,7 +39,7 @@ final class Geo3DPointSortField extends SortField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) throws IOException {
|
||||
public FieldComparator<?> getComparator(int numHits, int sortPos) {
|
||||
return new Geo3DPointDistanceComparator(getField(), distanceShape, numHits);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,4 +14,14 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.sourceDecorator({keywords:"bytes,default,double,enum,extend,extensions,false,group,import,max,message,option,optional,package,repeated,required,returns,rpc,service,syntax,to,true",types:/^(bool|(double|s?fixed|[su]?int)(32|64)|float|string)\b/,cStyleComments:!0}),["proto"]);
|
||||
!function(){
|
||||
var oldonload = window.onload;
|
||||
if (typeof oldonload != 'function') {
|
||||
window.onload = prettyPrint;
|
||||
} else {
|
||||
window.onload = function() {
|
||||
oldonload();
|
||||
prettyPrint();
|
||||
}
|
||||
}
|
||||
}();
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["com",/^#[^\n\r]*/,null,"#"],["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,null,'"']],[["kwd",/^(?:ADS|AD|AUG|BZF|BZMF|CAE|CAF|CA|CCS|COM|CS|DAS|DCA|DCOM|DCS|DDOUBL|DIM|DOUBLE|DTCB|DTCF|DV|DXCH|EDRUPT|EXTEND|INCR|INDEX|NDX|INHINT|LXCH|MASK|MSK|MP|MSU|NOOP|OVSK|QXCH|RAND|READ|RELINT|RESUME|RETURN|ROR|RXOR|SQUARE|SU|TCR|TCAA|OVSK|TCF|TC|TS|WAND|WOR|WRITE|XCH|XLQ|XXALQ|ZL|ZQ|ADD|ADZ|SUB|SUZ|MPY|MPR|MPZ|DVP|COM|ABS|CLA|CLZ|LDQ|STO|STQ|ALS|LLS|LRS|TRA|TSQ|TMI|TOV|AXT|TIX|DLY|INP|OUT)\s/,
|
||||
null],["typ",/^(?:-?GENADR|=MINUS|2BCADR|VN|BOF|MM|-?2CADR|-?[1-6]DNADR|ADRES|BBCON|[ES]?BANK=?|BLOCK|BNKSUM|E?CADR|COUNT\*?|2?DEC\*?|-?DNCHAN|-?DNPTR|EQUALS|ERASE|MEMORY|2?OCT|REMADR|SETLOC|SUBRO|ORG|BSS|BES|SYN|EQU|DEFINE|END)\s/,null],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[!-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["apollo","agc","aea"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\f\r ]+/,null," \t\r\n"]],[["str",/^"(?:[^\n\f\r"\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*"/,null],["str",/^'(?:[^\n\f\r'\\]|\\(?:\r\n?|\n|\f)|\\[\S\s])*'/,null],["lang-css-str",/^url\(([^"')]*)\)/i],["kwd",/^(?:url|rgb|!important|@import|@page|@media|@charset|inherit)(?=[^\w-]|$)/i,null],["lang-css-kw",/^(-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*)\s*:/i],["com",/^\/\*[^*]*\*+(?:[^*/][^*]*\*+)*\//],["com",
|
||||
/^(?:<\!--|--\>)/],["lit",/^(?:\d+|\d*\.\d+)(?:%|[a-z]+)?/i],["lit",/^#[\da-f]{3,6}/i],["pln",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i],["pun",/^[^\s\w"']+/]]),["css"]);PR.registerLangHandler(PR.createSimpleLexer([],[["kwd",/^-?(?:[_a-z]|\\[\da-f]+ ?)(?:[\w-]|\\\\[\da-f]+ ?)*/i]]),["css-kw"]);PR.registerLangHandler(PR.createSimpleLexer([],[["str",/^[^"')]+/]]),["css-str"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t-\r ]+/,null,"\t\n\r "],["str",/^"(?:[^\n\f\r"\\]|\\[\S\s])*(?:"|$)/,null,'"'],["str",/^'(?:[^\n\f\r'\\]|\\[^&])'?/,null,"'"],["lit",/^(?:0o[0-7]+|0x[\da-f]+|\d+(?:\.\d+)?(?:e[+-]?\d+)?)/i,null,"0123456789"]],[["com",/^(?:--+[^\n\f\r]*|{-(?:[^-]|-+[^}-])*-})/],["kwd",/^(?:case|class|data|default|deriving|do|else|if|import|in|infix|infixl|infixr|instance|let|module|newtype|of|then|type|where|_)(?=[^\d'A-Za-z]|$)/,
|
||||
null],["pln",/^(?:[A-Z][\w']*\.)*[A-Za-z][\w']*/],["pun",/^[^\d\t-\r "'A-Za-z]+/]]),["hs"]);
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
var a=null;
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["opn",/^\(+/,a,"("],["clo",/^\)+/,a,")"],["com",/^;[^\n\r]*/,a,";"],["pln",/^[\t\n\r \xa0]+/,a,"\t\n\r \xa0"],["str",/^"(?:[^"\\]|\\[\S\s])*(?:"|$)/,a,'"']],[["kwd",/^(?:block|c[ad]+r|catch|con[ds]|def(?:ine|un)|do|eq|eql|equal|equalp|eval-when|flet|format|go|if|labels|lambda|let|load-time-value|locally|macrolet|multiple-value-call|nil|progn|progv|quote|require|return-from|setq|symbol-macrolet|t|tagbody|the|throw|unwind)\b/,a],
|
||||
["lit",/^[+-]?(?:[#0]x[\da-f]+|\d+\/\d+|(?:\.\d+|\d+(?:\.\d*)?)(?:[de][+-]?\d+)?)/i],["lit",/^'(?:-*(?:\w|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?)?/],["pln",/^-*(?:[_a-z]|\\[!-~])(?:[\w-]*|\\[!-~])[!=?]?/i],["pun",/^[^\w\t\n\r "'-);\\\xa0]+/]]),["cl","el","lisp","scm"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$))/,null,"\"'"]],[["com",/^--(?:\[(=*)\[[\S\s]*?(?:]\1]|$)|[^\n\r]*)/],["str",/^\[(=*)\[[\S\s]*?(?:]\1]|$)/],["kwd",/^(?:and|break|do|else|elseif|end|false|for|function|if|in|local|nil|not|or|repeat|return|then|true|until|while)\b/,null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],
|
||||
["pln",/^[_a-z]\w*/i],["pun",/^[^\w\t\n\r \xa0][^\w\t\n\r "'+=\xa0-]*/]]),["lua"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["com",/^#(?:if[\t\n\r \xa0]+(?:[$_a-z][\w']*|``[^\t\n\r`]*(?:``|$))|else|endif|light)/i,null,"#"],["str",/^(?:"(?:[^"\\]|\\[\S\s])*(?:"|$)|'(?:[^'\\]|\\[\S\s])(?:'|$))/,null,"\"'"]],[["com",/^(?:\/\/[^\n\r]*|\(\*[\S\s]*?\*\))/],["kwd",/^(?:abstract|and|as|assert|begin|class|default|delegate|do|done|downcast|downto|elif|else|end|exception|extern|false|finally|for|fun|function|if|in|inherit|inline|interface|internal|lazy|let|match|member|module|mutable|namespace|new|null|of|open|or|override|private|public|rec|return|static|struct|then|to|true|try|type|upcast|use|val|void|when|while|with|yield|asr|land|lor|lsl|lsr|lxor|mod|sig|atomic|break|checked|component|const|constraint|constructor|continue|eager|event|external|fixed|functor|global|include|method|mixin|object|parallel|process|protected|pure|sealed|trait|virtual|volatile)\b/],
|
||||
["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^(?:[_a-z][\w']*[!#?]?|``[^\t\n\r`]*(?:``|$))/i],["pun",/^[^\w\t\n\r "'\xa0]+/]]),["fs","ml"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0]+/,null,"\t\n\r Â\xa0"],["str",/^(?:"(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*')/,null,"\"'"]],[["com",/^(?:--[^\n\r]*|\/\*[\S\s]*?(?:\*\/|$))/],["kwd",/^(?:add|all|alter|and|any|as|asc|authorization|backup|begin|between|break|browse|bulk|by|cascade|case|check|checkpoint|close|clustered|coalesce|collate|column|commit|compute|constraint|contains|containstable|continue|convert|create|cross|current|current_date|current_time|current_timestamp|current_user|cursor|database|dbcc|deallocate|declare|default|delete|deny|desc|disk|distinct|distributed|double|drop|dummy|dump|else|end|errlvl|escape|except|exec|execute|exists|exit|fetch|file|fillfactor|for|foreign|freetext|freetexttable|from|full|function|goto|grant|group|having|holdlock|identity|identitycol|identity_insert|if|in|index|inner|insert|intersect|into|is|join|key|kill|left|like|lineno|load|match|merge|national|nocheck|nonclustered|not|null|nullif|of|off|offsets|on|open|opendatasource|openquery|openrowset|openxml|option|or|order|outer|over|percent|plan|precision|primary|print|proc|procedure|public|raiserror|read|readtext|reconfigure|references|replication|restore|restrict|return|revoke|right|rollback|rowcount|rowguidcol|rule|save|schema|select|session_user|set|setuser|shutdown|some|statistics|system_user|table|textsize|then|to|top|tran|transaction|trigger|truncate|tsequal|union|unique|update|updatetext|use|user|using|values|varying|view|waitfor|when|where|while|with|writetext)(?=[^\w-]|$)/i,
|
||||
null],["lit",/^[+-]?(?:0x[\da-f]+|(?:\.\d+|\d+(?:\.\d*)?)(?:e[+-]?\d+)?)/i],["pln",/^[_a-z][\w-]*/i],["pun",/^[^\w\t\n\r "'\xa0][^\w\t\n\r "'+\xa0-]*/]]),["sql"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\t\n\r \xa0\u2028\u2029]+/,null,"\t\n\r Â\xa0

"],["str",/^(?:["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})(?:["\u201c\u201d]c|$)|["\u201c\u201d](?:[^"\u201c\u201d]|["\u201c\u201d]{2})*(?:["\u201c\u201d]|$))/i,null,'"“â€<C3A2>'],["com",/^['\u2018\u2019].*/,null,"'‘’"]],[["kwd",/^(?:addhandler|addressof|alias|and|andalso|ansi|as|assembly|auto|boolean|byref|byte|byval|call|case|catch|cbool|cbyte|cchar|cdate|cdbl|cdec|char|cint|class|clng|cobj|const|cshort|csng|cstr|ctype|date|decimal|declare|default|delegate|dim|directcast|do|double|each|else|elseif|end|endif|enum|erase|error|event|exit|finally|for|friend|function|get|gettype|gosub|goto|handles|if|implements|imports|in|inherits|integer|interface|is|let|lib|like|long|loop|me|mod|module|mustinherit|mustoverride|mybase|myclass|namespace|new|next|not|notinheritable|notoverridable|object|on|option|optional|or|orelse|overloads|overridable|overrides|paramarray|preserve|private|property|protected|public|raiseevent|readonly|redim|removehandler|resume|return|select|set|shadows|shared|short|single|static|step|stop|string|structure|sub|synclock|then|throw|to|try|typeof|unicode|until|variant|wend|when|while|with|withevents|writeonly|xor|endif|gosub|let|variant|wend)\b/i,
|
||||
null],["com",/^rem.*/i],["lit",/^(?:true\b|false\b|nothing\b|\d+(?:e[+-]?\d+[dfr]?|[dfilrs])?|(?:&h[\da-f]+|&o[0-7]+)[ils]?|\d*\.\d+(?:e[+-]?\d+)?[dfr]?|#\s+(?:\d+[/-]\d+[/-]\d+(?:\s+\d+:\d+(?::\d+)?(\s*(?:am|pm))?)?|\d+:\d+(?::\d+)?(\s*(?:am|pm))?)\s+#)/i],["pln",/^(?:(?:[a-z]|_\w)\w*|\[(?:[a-z]|_\w)\w*])/i],["pun",/^[^\w\t\n\r "'[\]\xa0\u2018\u2019\u201c\u201d\u2028\u2029]+/],["pun",/^(?:\[|])/]]),["vb","vbs"]);
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["pln",/^[\d\t a-gi-z\xa0]+/,null,"\t Â\xa0abcdefgijklmnopqrstuvwxyz0123456789"],["pun",/^[*=[\]^~]+/,null,"=*~^[]"]],[["lang-wiki.meta",/(?:^^|\r\n?|\n)(#[a-z]+)\b/],["lit",/^[A-Z][a-z][\da-z]+[A-Z][a-z][^\W_]+\b/],["lang-",/^{{{([\S\s]+?)}}}/],["lang-",/^`([^\n\r`]+)`/],["str",/^https?:\/\/[^\s#/?]*(?:\/[^\s#?]*)?(?:\?[^\s#]*)?(?:#\S*)?/i],["pln",/^(?:\r\n|[\S\s])[^\n\r#*=A-[^`h{~]*/]]),["wiki"]);
|
||||
PR.registerLangHandler(PR.createSimpleLexer([["kwd",/^#[a-z]+/i,null,"#"]],[]),["wiki.meta"]);
|
|
@ -1,17 +1,17 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
Copyright (C) 2006 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
.pln{color:#000}@media screen{.str{color:#080}.kwd{color:#008}.com{color:#800}.typ{color:#606}.lit{color:#066}.pun,.opn,.clo{color:#660}.tag{color:#008}.atn{color:#606}.atv{color:#080}.dec,.var{color:#606}.fun{color:red}}@media print,projection{.str{color:#060}.kwd{color:#006;font-weight:bold}.com{color:#600;font-style:italic}.typ{color:#404;font-weight:bold}.lit{color:#044}.pun,.opn,.clo{color:#440}.tag{color:#006;font-weight:bold}.atn{color:#404}.atv{color:#060}}pre.prettyprint{padding:2px;border:1px solid #888}ol.linenums{margin-top:0;margin-bottom:0}li.L0,li.L1,li.L2,li.L3,li.L5,li.L6,li.L7,li.L8{list-style-type:none}li.L1,li.L3,li.L5,li.L7,li.L9{background:#eee}
|
|
@ -1,44 +1,46 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
var q=null;window.PR_SHOULD_USE_CONTINUATION=!0;
|
||||
(function(){function L(a){function m(a){var f=a.charCodeAt(0);if(f!==92)return f;var b=a.charAt(1);return(f=r[b])?f:"0"<=b&&b<="7"?parseInt(a.substring(1),8):b==="u"||b==="x"?parseInt(a.substring(2),16):a.charCodeAt(1)}function e(a){if(a<32)return(a<16?"\\x0":"\\x")+a.toString(16);a=String.fromCharCode(a);if(a==="\\"||a==="-"||a==="["||a==="]")a="\\"+a;return a}function h(a){for(var f=a.substring(1,a.length-1).match(/\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\S\s]|[^\\]/g),a=
|
||||
[],b=[],o=f[0]==="^",c=o?1:0,i=f.length;c<i;++c){var j=f[c];if(/\\[bdsw]/i.test(j))a.push(j);else{var j=m(j),d;c+2<i&&"-"===f[c+1]?(d=m(f[c+2]),c+=2):d=j;b.push([j,d]);d<65||j>122||(d<65||j>90||b.push([Math.max(65,j)|32,Math.min(d,90)|32]),d<97||j>122||b.push([Math.max(97,j)&-33,Math.min(d,122)&-33]))}}b.sort(function(a,f){return a[0]-f[0]||f[1]-a[1]});f=[];j=[NaN,NaN];for(c=0;c<b.length;++c)i=b[c],i[0]<=j[1]+1?j[1]=Math.max(j[1],i[1]):f.push(j=i);b=["["];o&&b.push("^");b.push.apply(b,a);for(c=0;c<
|
||||
f.length;++c)i=f[c],b.push(e(i[0])),i[1]>i[0]&&(i[1]+1>i[0]&&b.push("-"),b.push(e(i[1])));b.push("]");return b.join("")}function y(a){for(var f=a.source.match(/\[(?:[^\\\]]|\\[\S\s])*]|\\u[\dA-Fa-f]{4}|\\x[\dA-Fa-f]{2}|\\\d+|\\[^\dux]|\(\?[!:=]|[()^]|[^()[\\^]+/g),b=f.length,d=[],c=0,i=0;c<b;++c){var j=f[c];j==="("?++i:"\\"===j.charAt(0)&&(j=+j.substring(1))&&j<=i&&(d[j]=-1)}for(c=1;c<d.length;++c)-1===d[c]&&(d[c]=++t);for(i=c=0;c<b;++c)j=f[c],j==="("?(++i,d[i]===void 0&&(f[c]="(?:")):"\\"===j.charAt(0)&&
|
||||
(j=+j.substring(1))&&j<=i&&(f[c]="\\"+d[i]);for(i=c=0;c<b;++c)"^"===f[c]&&"^"!==f[c+1]&&(f[c]="");if(a.ignoreCase&&s)for(c=0;c<b;++c)j=f[c],a=j.charAt(0),j.length>=2&&a==="["?f[c]=h(j):a!=="\\"&&(f[c]=j.replace(/[A-Za-z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return f.join("")}for(var t=0,s=!1,l=!1,p=0,d=a.length;p<d;++p){var g=a[p];if(g.ignoreCase)l=!0;else if(/[a-z]/i.test(g.source.replace(/\\u[\da-f]{4}|\\x[\da-f]{2}|\\[^UXux]/gi,""))){s=!0;l=!1;break}}for(var r=
|
||||
{b:8,t:9,n:10,v:11,f:12,r:13},n=[],p=0,d=a.length;p<d;++p){g=a[p];if(g.global||g.multiline)throw Error(""+g);n.push("(?:"+y(g)+")")}return RegExp(n.join("|"),l?"gi":"g")}function M(a){function m(a){switch(a.nodeType){case 1:if(e.test(a.className))break;for(var g=a.firstChild;g;g=g.nextSibling)m(g);g=a.nodeName;if("BR"===g||"LI"===g)h[s]="\n",t[s<<1]=y++,t[s++<<1|1]=a;break;case 3:case 4:g=a.nodeValue,g.length&&(g=p?g.replace(/\r\n?/g,"\n"):g.replace(/[\t\n\r ]+/g," "),h[s]=g,t[s<<1]=y,y+=g.length,
|
||||
t[s++<<1|1]=a)}}var e=/(?:^|\s)nocode(?:\s|$)/,h=[],y=0,t=[],s=0,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=document.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);m(a);return{a:h.join("").replace(/\n$/,""),c:t}}function B(a,m,e,h){m&&(a={a:m,d:a},e(a),h.push.apply(h,a.e))}function x(a,m){function e(a){for(var l=a.d,p=[l,"pln"],d=0,g=a.a.match(y)||[],r={},n=0,z=g.length;n<z;++n){var f=g[n],b=r[f],o=void 0,c;if(typeof b===
|
||||
"string")c=!1;else{var i=h[f.charAt(0)];if(i)o=f.match(i[1]),b=i[0];else{for(c=0;c<t;++c)if(i=m[c],o=f.match(i[1])){b=i[0];break}o||(b="pln")}if((c=b.length>=5&&"lang-"===b.substring(0,5))&&!(o&&typeof o[1]==="string"))c=!1,b="src";c||(r[f]=b)}i=d;d+=f.length;if(c){c=o[1];var j=f.indexOf(c),k=j+c.length;o[2]&&(k=f.length-o[2].length,j=k-c.length);b=b.substring(5);B(l+i,f.substring(0,j),e,p);B(l+i+j,c,C(b,c),p);B(l+i+k,f.substring(k),e,p)}else p.push(l+i,b)}a.e=p}var h={},y;(function(){for(var e=a.concat(m),
|
||||
l=[],p={},d=0,g=e.length;d<g;++d){var r=e[d],n=r[3];if(n)for(var k=n.length;--k>=0;)h[n.charAt(k)]=r;r=r[1];n=""+r;p.hasOwnProperty(n)||(l.push(r),p[n]=q)}l.push(/[\S\s]/);y=L(l)})();var t=m.length;return e}function u(a){var m=[],e=[];a.tripleQuotedStrings?m.push(["str",/^(?:'''(?:[^'\\]|\\[\S\s]|''?(?=[^']))*(?:'''|$)|"""(?:[^"\\]|\\[\S\s]|""?(?=[^"]))*(?:"""|$)|'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$))/,q,"'\""]):a.multiLineStrings?m.push(["str",/^(?:'(?:[^'\\]|\\[\S\s])*(?:'|$)|"(?:[^"\\]|\\[\S\s])*(?:"|$)|`(?:[^\\`]|\\[\S\s])*(?:`|$))/,
|
||||
q,"'\"`"]):m.push(["str",/^(?:'(?:[^\n\r'\\]|\\.)*(?:'|$)|"(?:[^\n\r"\\]|\\.)*(?:"|$))/,q,"\"'"]);a.verbatimStrings&&e.push(["str",/^@"(?:[^"]|"")*(?:"|$)/,q]);var h=a.hashComments;h&&(a.cStyleComments?(h>1?m.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,q,"#"]):m.push(["com",/^#(?:(?:define|elif|else|endif|error|ifdef|include|ifndef|line|pragma|undef|warning)\b|[^\n\r]*)/,q,"#"]),e.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h|[a-z]\w*)>/,q])):m.push(["com",/^#[^\n\r]*/,
|
||||
q,"#"]));a.cStyleComments&&(e.push(["com",/^\/\/[^\n\r]*/,q]),e.push(["com",/^\/\*[\S\s]*?(?:\*\/|$)/,q]));a.regexLiterals&&e.push(["lang-regex",/^(?:^^\.?|[!+-]|!=|!==|#|%|%=|&|&&|&&=|&=|\(|\*|\*=|\+=|,|-=|->|\/|\/=|:|::|;|<|<<|<<=|<=|=|==|===|>|>=|>>|>>=|>>>|>>>=|[?@[^]|\^=|\^\^|\^\^=|{|\||\|=|\|\||\|\|=|~|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\s*(\/(?=[^*/])(?:[^/[\\]|\\[\S\s]|\[(?:[^\\\]]|\\[\S\s])*(?:]|$))+\/)/]);(h=a.types)&&e.push(["typ",h]);a=(""+a.keywords).replace(/^ | $/g,
|
||||
"");a.length&&e.push(["kwd",RegExp("^(?:"+a.replace(/[\s,]+/g,"|")+")\\b"),q]);m.push(["pln",/^\s+/,q," \r\n\t\xa0"]);e.push(["lit",/^@[$_a-z][\w$@]*/i,q],["typ",/^(?:[@_]?[A-Z]+[a-z][\w$@]*|\w+_t\b)/,q],["pln",/^[$_a-z][\w$@]*/i,q],["lit",/^(?:0x[\da-f]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+-]?\d+)?)[a-z]*/i,q,"0123456789"],["pln",/^\\[\S\s]?/,q],["pun",/^.[^\s\w"-$'./@\\`]*/,q]);return x(m,e)}function D(a,m){function e(a){switch(a.nodeType){case 1:if(k.test(a.className))break;if("BR"===a.nodeName)h(a),
|
||||
a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)e(a);break;case 3:case 4:if(p){var b=a.nodeValue,d=b.match(t);if(d){var c=b.substring(0,d.index);a.nodeValue=c;(b=b.substring(d.index+d[0].length))&&a.parentNode.insertBefore(s.createTextNode(b),a.nextSibling);h(a);c||a.parentNode.removeChild(a)}}}}function h(a){function b(a,d){var e=d?a.cloneNode(!1):a,f=a.parentNode;if(f){var f=b(f,1),g=a.nextSibling;f.appendChild(e);for(var h=g;h;h=g)g=h.nextSibling,f.appendChild(h)}return e}
|
||||
for(;!a.nextSibling;)if(a=a.parentNode,!a)return;for(var a=b(a.nextSibling,0),e;(e=a.parentNode)&&e.nodeType===1;)a=e;d.push(a)}var k=/(?:^|\s)nocode(?:\s|$)/,t=/\r\n?|\n/,s=a.ownerDocument,l;a.currentStyle?l=a.currentStyle.whiteSpace:window.getComputedStyle&&(l=s.defaultView.getComputedStyle(a,q).getPropertyValue("white-space"));var p=l&&"pre"===l.substring(0,3);for(l=s.createElement("LI");a.firstChild;)l.appendChild(a.firstChild);for(var d=[l],g=0;g<d.length;++g)e(d[g]);m===(m|0)&&d[0].setAttribute("value",
|
||||
m);var r=s.createElement("OL");r.className="linenums";for(var n=Math.max(0,m-1|0)||0,g=0,z=d.length;g<z;++g)l=d[g],l.className="L"+(g+n)%10,l.firstChild||l.appendChild(s.createTextNode("\xa0")),r.appendChild(l);a.appendChild(r)}function k(a,m){for(var e=m.length;--e>=0;){var h=m[e];A.hasOwnProperty(h)?window.console&&console.warn("cannot override language handler %s",h):A[h]=a}}function C(a,m){if(!a||!A.hasOwnProperty(a))a=/^\s*</.test(m)?"default-markup":"default-code";return A[a]}function E(a){var m=
|
||||
a.g;try{var e=M(a.h),h=e.a;a.a=h;a.c=e.c;a.d=0;C(m,h)(a);var k=/\bMSIE\b/.test(navigator.userAgent),m=/\n/g,t=a.a,s=t.length,e=0,l=a.c,p=l.length,h=0,d=a.e,g=d.length,a=0;d[g]=s;var r,n;for(n=r=0;n<g;)d[n]!==d[n+2]?(d[r++]=d[n++],d[r++]=d[n++]):n+=2;g=r;for(n=r=0;n<g;){for(var z=d[n],f=d[n+1],b=n+2;b+2<=g&&d[b+1]===f;)b+=2;d[r++]=z;d[r++]=f;n=b}for(d.length=r;h<p;){var o=l[h+2]||s,c=d[a+2]||s,b=Math.min(o,c),i=l[h+1],j;if(i.nodeType!==1&&(j=t.substring(e,b))){k&&(j=j.replace(m,"\r"));i.nodeValue=
|
||||
j;var u=i.ownerDocument,v=u.createElement("SPAN");v.className=d[a+1];var x=i.parentNode;x.replaceChild(v,i);v.appendChild(i);e<o&&(l[h+1]=i=u.createTextNode(t.substring(b,o)),x.insertBefore(i,v.nextSibling))}e=b;e>=o&&(h+=2);e>=c&&(a+=2)}}catch(w){"console"in window&&console.log(w&&w.stack?w.stack:w)}}var v=["break,continue,do,else,for,if,return,while"],w=[[v,"auto,case,char,const,default,double,enum,extern,float,goto,int,long,register,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],
|
||||
"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],F=[w,"alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,dynamic_cast,explicit,export,friend,inline,late_check,mutable,namespace,nullptr,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],G=[w,"abstract,boolean,byte,extends,final,finally,implements,import,instanceof,null,native,package,strictfp,super,synchronized,throws,transient"],
|
||||
H=[G,"as,base,by,checked,decimal,delegate,descending,dynamic,event,fixed,foreach,from,group,implicit,in,interface,internal,into,is,lock,object,out,override,orderby,params,partial,readonly,ref,sbyte,sealed,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,var"],w=[w,"debugger,eval,export,function,get,null,set,undefined,var,with,Infinity,NaN"],I=[v,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],
|
||||
J=[v,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],v=[v,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],K=/^(DIR|FILE|vector|(de|priority_)?queue|list|stack|(const_)?iterator|(multi)?(set|map)|bitset|u?(int|float)\d*)/,N=/\S/,O=u({keywords:[F,H,w,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END"+
|
||||
I,J,v],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),A={};k(O,["default-code"]);k(x([],[["pln",/^[^<?]+/],["dec",/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\S\s]*?(?:--\>|$)/],["lang-",/^<\?([\S\s]+?)(?:\?>|$)/],["lang-",/^<%([\S\s]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\S\s]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\S\s]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\S\s]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),
|
||||
["default-markup","htm","html","mxml","xhtml","xml","xsl"]);k(x([["pln",/^\s+/,q," \t\r\n"],["atv",/^(?:"[^"]*"?|'[^']*'?)/,q,"\"'"]],[["tag",/^^<\/?[a-z](?:[\w-.:]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^\s"'>]*(?:[^\s"'/>]|\/(?=\s)))/],["pun",/^[/<->]+/],["lang-js",/^on\w+\s*=\s*"([^"]+)"/i],["lang-js",/^on\w+\s*=\s*'([^']+)'/i],["lang-js",/^on\w+\s*=\s*([^\s"'>]+)/i],["lang-css",/^style\s*=\s*"([^"]+)"/i],["lang-css",/^style\s*=\s*'([^']+)'/i],["lang-css",
|
||||
/^style\s*=\s*([^\s"'>]+)/i]]),["in.tag"]);k(x([],[["atv",/^[\S\s]+/]]),["uq.val"]);k(u({keywords:F,hashComments:!0,cStyleComments:!0,types:K}),["c","cc","cpp","cxx","cyc","m"]);k(u({keywords:"null,true,false"}),["json"]);k(u({keywords:H,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:K}),["cs"]);k(u({keywords:G,cStyleComments:!0}),["java"]);k(u({keywords:v,hashComments:!0,multiLineStrings:!0}),["bsh","csh","sh"]);k(u({keywords:I,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),
|
||||
["cv","py"]);k(u({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["perl","pl","pm"]);k(u({keywords:J,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb"]);k(u({keywords:w,cStyleComments:!0,regexLiterals:!0}),["js"]);k(u({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,true,try,unless,until,when,while,yes",
|
||||
hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,regexLiterals:!0}),["coffee"]);k(x([],[["str",/^[\S\s]+/]]),["regex"]);window.prettyPrintOne=function(a,m,e){var h=document.createElement("PRE");h.innerHTML=a;e&&D(h,e);E({g:m,i:e,h:h});return h.innerHTML};window.prettyPrint=function(a){function m(){for(var e=window.PR_SHOULD_USE_CONTINUATION?l.now()+250:Infinity;p<h.length&&l.now()<e;p++){var n=h[p],k=n.className;if(k.indexOf("prettyprint")>=0){var k=k.match(g),f,b;if(b=
|
||||
!k){b=n;for(var o=void 0,c=b.firstChild;c;c=c.nextSibling)var i=c.nodeType,o=i===1?o?b:c:i===3?N.test(c.nodeValue)?b:o:o;b=(f=o===b?void 0:o)&&"CODE"===f.tagName}b&&(k=f.className.match(g));k&&(k=k[1]);b=!1;for(o=n.parentNode;o;o=o.parentNode)if((o.tagName==="pre"||o.tagName==="code"||o.tagName==="xmp")&&o.className&&o.className.indexOf("prettyprint")>=0){b=!0;break}b||((b=(b=n.className.match(/\blinenums\b(?::(\d+))?/))?b[1]&&b[1].length?+b[1]:!0:!1)&&D(n,b),d={g:k,h:n,i:b},E(d))}}p<h.length?setTimeout(m,
|
||||
250):a&&a()}for(var e=[document.getElementsByTagName("pre"),document.getElementsByTagName("code"),document.getElementsByTagName("xmp")],h=[],k=0;k<e.length;++k)for(var t=0,s=e[k].length;t<s;++t)h.push(e[k][t]);var e=q,l=Date;l.now||(l={now:function(){return+new Date}});var p=0,d,g=/\blang(?:uage)?-([\w.]+)(?!\S)/;m()};window.PR={createSimpleLexer:x,registerLangHandler:k,sourceDecorator:u,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",
|
||||
PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ"}})();
|
||||
!function(){/*
|
||||
|
||||
Copyright (C) 2006 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
window.PR_SHOULD_USE_CONTINUATION=!0;
|
||||
(function(){function T(a){function d(e){var b=e.charCodeAt(0);if(92!==b)return b;var a=e.charAt(1);return(b=w[a])?b:"0"<=a&&"7">=a?parseInt(e.substring(1),8):"u"===a||"x"===a?parseInt(e.substring(2),16):e.charCodeAt(1)}function f(e){if(32>e)return(16>e?"\\x0":"\\x")+e.toString(16);e=String.fromCharCode(e);return"\\"===e||"-"===e||"]"===e||"^"===e?"\\"+e:e}function b(e){var b=e.substring(1,e.length-1).match(/\\u[0-9A-Fa-f]{4}|\\x[0-9A-Fa-f]{2}|\\[0-3][0-7]{0,2}|\\[0-7]{1,2}|\\[\s\S]|-|[^-\\]/g);e=
|
||||
[];var a="^"===b[0],c=["["];a&&c.push("^");for(var a=a?1:0,g=b.length;a<g;++a){var h=b[a];if(/\\[bdsw]/i.test(h))c.push(h);else{var h=d(h),k;a+2<g&&"-"===b[a+1]?(k=d(b[a+2]),a+=2):k=h;e.push([h,k]);65>k||122<h||(65>k||90<h||e.push([Math.max(65,h)|32,Math.min(k,90)|32]),97>k||122<h||e.push([Math.max(97,h)&-33,Math.min(k,122)&-33]))}}e.sort(function(e,a){return e[0]-a[0]||a[1]-e[1]});b=[];g=[];for(a=0;a<e.length;++a)h=e[a],h[0]<=g[1]+1?g[1]=Math.max(g[1],h[1]):b.push(g=h);for(a=0;a<b.length;++a)h=b[a],
|
||||
c.push(f(h[0])),h[1]>h[0]&&(h[1]+1>h[0]&&c.push("-"),c.push(f(h[1])));c.push("]");return c.join("")}function v(e){for(var a=e.source.match(/(?:\[(?:[^\x5C\x5D]|\\[\s\S])*\]|\\u[A-Fa-f0-9]{4}|\\x[A-Fa-f0-9]{2}|\\[0-9]+|\\[^ux0-9]|\(\?[:!=]|[\(\)\^]|[^\x5B\x5C\(\)\^]+)/g),c=a.length,d=[],g=0,h=0;g<c;++g){var k=a[g];"("===k?++h:"\\"===k.charAt(0)&&(k=+k.substring(1))&&(k<=h?d[k]=-1:a[g]=f(k))}for(g=1;g<d.length;++g)-1===d[g]&&(d[g]=++A);for(h=g=0;g<c;++g)k=a[g],"("===k?(++h,d[h]||(a[g]="(?:")):"\\"===
|
||||
k.charAt(0)&&(k=+k.substring(1))&&k<=h&&(a[g]="\\"+d[k]);for(g=0;g<c;++g)"^"===a[g]&&"^"!==a[g+1]&&(a[g]="");if(e.ignoreCase&&n)for(g=0;g<c;++g)k=a[g],e=k.charAt(0),2<=k.length&&"["===e?a[g]=b(k):"\\"!==e&&(a[g]=k.replace(/[a-zA-Z]/g,function(a){a=a.charCodeAt(0);return"["+String.fromCharCode(a&-33,a|32)+"]"}));return a.join("")}for(var A=0,n=!1,l=!1,m=0,c=a.length;m<c;++m){var p=a[m];if(p.ignoreCase)l=!0;else if(/[a-z]/i.test(p.source.replace(/\\u[0-9a-f]{4}|\\x[0-9a-f]{2}|\\[^ux]/gi,""))){n=!0;
|
||||
l=!1;break}}for(var w={b:8,t:9,n:10,v:11,f:12,r:13},r=[],m=0,c=a.length;m<c;++m){p=a[m];if(p.global||p.multiline)throw Error(""+p);r.push("(?:"+v(p)+")")}return new RegExp(r.join("|"),l?"gi":"g")}function U(a,d){function f(a){var c=a.nodeType;if(1==c){if(!b.test(a.className)){for(c=a.firstChild;c;c=c.nextSibling)f(c);c=a.nodeName.toLowerCase();if("br"===c||"li"===c)v[l]="\n",n[l<<1]=A++,n[l++<<1|1]=a}}else if(3==c||4==c)c=a.nodeValue,c.length&&(c=d?c.replace(/\r\n?/g,"\n"):c.replace(/[ \t\r\n]+/g,
|
||||
" "),v[l]=c,n[l<<1]=A,A+=c.length,n[l++<<1|1]=a)}var b=/(?:^|\s)nocode(?:\s|$)/,v=[],A=0,n=[],l=0;f(a);return{a:v.join("").replace(/\n$/,""),c:n}}function J(a,d,f,b,v){f&&(a={h:a,l:1,j:null,m:null,a:f,c:null,i:d,g:null},b(a),v.push.apply(v,a.g))}function V(a){for(var d=void 0,f=a.firstChild;f;f=f.nextSibling)var b=f.nodeType,d=1===b?d?a:f:3===b?W.test(f.nodeValue)?a:d:d;return d===a?void 0:d}function G(a,d){function f(a){for(var l=a.i,m=a.h,c=[l,"pln"],p=0,w=a.a.match(v)||[],r={},e=0,t=w.length;e<
|
||||
t;++e){var z=w[e],q=r[z],g=void 0,h;if("string"===typeof q)h=!1;else{var k=b[z.charAt(0)];if(k)g=z.match(k[1]),q=k[0];else{for(h=0;h<A;++h)if(k=d[h],g=z.match(k[1])){q=k[0];break}g||(q="pln")}!(h=5<=q.length&&"lang-"===q.substring(0,5))||g&&"string"===typeof g[1]||(h=!1,q="src");h||(r[z]=q)}k=p;p+=z.length;if(h){h=g[1];var B=z.indexOf(h),D=B+h.length;g[2]&&(D=z.length-g[2].length,B=D-h.length);q=q.substring(5);J(m,l+k,z.substring(0,B),f,c);J(m,l+k+B,h,K(q,h),c);J(m,l+k+D,z.substring(D),f,c)}else c.push(l+
|
||||
k,q)}a.g=c}var b={},v;(function(){for(var f=a.concat(d),l=[],m={},c=0,p=f.length;c<p;++c){var w=f[c],r=w[3];if(r)for(var e=r.length;0<=--e;)b[r.charAt(e)]=w;w=w[1];r=""+w;m.hasOwnProperty(r)||(l.push(w),m[r]=null)}l.push(/[\0-\uffff]/);v=T(l)})();var A=d.length;return f}function y(a){var d=[],f=[];a.tripleQuotedStrings?d.push(["str",/^(?:\'\'\'(?:[^\'\\]|\\[\s\S]|\'{1,2}(?=[^\']))*(?:\'\'\'|$)|\"\"\"(?:[^\"\\]|\\[\s\S]|\"{1,2}(?=[^\"]))*(?:\"\"\"|$)|\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$))/,
|
||||
null,"'\""]):a.multiLineStrings?d.push(["str",/^(?:\'(?:[^\\\']|\\[\s\S])*(?:\'|$)|\"(?:[^\\\"]|\\[\s\S])*(?:\"|$)|\`(?:[^\\\`]|\\[\s\S])*(?:\`|$))/,null,"'\"`"]):d.push(["str",/^(?:\'(?:[^\\\'\r\n]|\\.)*(?:\'|$)|\"(?:[^\\\"\r\n]|\\.)*(?:\"|$))/,null,"\"'"]);a.verbatimStrings&&f.push(["str",/^@\"(?:[^\"]|\"\")*(?:\"|$)/,null]);var b=a.hashComments;b&&(a.cStyleComments?(1<b?d.push(["com",/^#(?:##(?:[^#]|#(?!##))*(?:###|$)|.*)/,null,"#"]):d.push(["com",/^#(?:(?:define|e(?:l|nd)if|else|error|ifn?def|include|line|pragma|undef|warning)\b|[^\r\n]*)/,
|
||||
null,"#"]),f.push(["str",/^<(?:(?:(?:\.\.\/)*|\/?)(?:[\w-]+(?:\/[\w-]+)+)?[\w-]+\.h(?:h|pp|\+\+)?|[a-z]\w*)>/,null])):d.push(["com",/^#[^\r\n]*/,null,"#"]));a.cStyleComments&&(f.push(["com",/^\/\/[^\r\n]*/,null]),f.push(["com",/^\/\*[\s\S]*?(?:\*\/|$)/,null]));if(b=a.regexLiterals){var v=(b=1<b?"":"\n\r")?".":"[\\S\\s]";f.push(["lang-regex",RegExp("^(?:^^\\.?|[+-]|[!=]=?=?|\\#|%=?|&&?=?|\\(|\\*=?|[+\\-]=|->|\\/=?|::?|<<?=?|>>?>?=?|,|;|\\?|@|\\[|~|{|\\^\\^?=?|\\|\\|?=?|break|case|continue|delete|do|else|finally|instanceof|return|throw|try|typeof)\\s*("+
|
||||
("/(?=[^/*"+b+"])(?:[^/\\x5B\\x5C"+b+"]|\\x5C"+v+"|\\x5B(?:[^\\x5C\\x5D"+b+"]|\\x5C"+v+")*(?:\\x5D|$))+/")+")")])}(b=a.types)&&f.push(["typ",b]);b=(""+a.keywords).replace(/^ | $/g,"");b.length&&f.push(["kwd",new RegExp("^(?:"+b.replace(/[\s,]+/g,"|")+")\\b"),null]);d.push(["pln",/^\s+/,null," \r\n\t\u00a0"]);b="^.[^\\s\\w.$@'\"`/\\\\]*";a.regexLiterals&&(b+="(?!s*/)");f.push(["lit",/^@[a-z_$][a-z_$@0-9]*/i,null],["typ",/^(?:[@_]?[A-Z]+[a-z][A-Za-z_$@0-9]*|\w+_t\b)/,null],["pln",/^[a-z_$][a-z_$@0-9]*/i,
|
||||
null],["lit",/^(?:0x[a-f0-9]+|(?:\d(?:_\d+)*\d*(?:\.\d*)?|\.\d\+)(?:e[+\-]?\d+)?)[a-z]*/i,null,"0123456789"],["pln",/^\\[\s\S]?/,null],["pun",new RegExp(b),null]);return G(d,f)}function L(a,d,f){function b(a){var c=a.nodeType;if(1==c&&!A.test(a.className))if("br"===a.nodeName)v(a),a.parentNode&&a.parentNode.removeChild(a);else for(a=a.firstChild;a;a=a.nextSibling)b(a);else if((3==c||4==c)&&f){var d=a.nodeValue,q=d.match(n);q&&(c=d.substring(0,q.index),a.nodeValue=c,(d=d.substring(q.index+q[0].length))&&
|
||||
a.parentNode.insertBefore(l.createTextNode(d),a.nextSibling),v(a),c||a.parentNode.removeChild(a))}}function v(a){function b(a,c){var d=c?a.cloneNode(!1):a,k=a.parentNode;if(k){var k=b(k,1),e=a.nextSibling;k.appendChild(d);for(var f=e;f;f=e)e=f.nextSibling,k.appendChild(f)}return d}for(;!a.nextSibling;)if(a=a.parentNode,!a)return;a=b(a.nextSibling,0);for(var d;(d=a.parentNode)&&1===d.nodeType;)a=d;c.push(a)}for(var A=/(?:^|\s)nocode(?:\s|$)/,n=/\r\n?|\n/,l=a.ownerDocument,m=l.createElement("li");a.firstChild;)m.appendChild(a.firstChild);
|
||||
for(var c=[m],p=0;p<c.length;++p)b(c[p]);d===(d|0)&&c[0].setAttribute("value",d);var w=l.createElement("ol");w.className="linenums";d=Math.max(0,d-1|0)||0;for(var p=0,r=c.length;p<r;++p)m=c[p],m.className="L"+(p+d)%10,m.firstChild||m.appendChild(l.createTextNode("\u00a0")),w.appendChild(m);a.appendChild(w)}function t(a,d){for(var f=d.length;0<=--f;){var b=d[f];I.hasOwnProperty(b)?E.console&&console.warn("cannot override language handler %s",b):I[b]=a}}function K(a,d){a&&I.hasOwnProperty(a)||(a=/^\s*</.test(d)?
|
||||
"default-markup":"default-code");return I[a]}function M(a){var d=a.j;try{var f=U(a.h,a.l),b=f.a;a.a=b;a.c=f.c;a.i=0;K(d,b)(a);var v=/\bMSIE\s(\d+)/.exec(navigator.userAgent),v=v&&8>=+v[1],d=/\n/g,A=a.a,n=A.length,f=0,l=a.c,m=l.length,b=0,c=a.g,p=c.length,w=0;c[p]=n;var r,e;for(e=r=0;e<p;)c[e]!==c[e+2]?(c[r++]=c[e++],c[r++]=c[e++]):e+=2;p=r;for(e=r=0;e<p;){for(var t=c[e],z=c[e+1],q=e+2;q+2<=p&&c[q+1]===z;)q+=2;c[r++]=t;c[r++]=z;e=q}c.length=r;var g=a.h;a="";g&&(a=g.style.display,g.style.display="none");
|
||||
try{for(;b<m;){var h=l[b+2]||n,k=c[w+2]||n,q=Math.min(h,k),B=l[b+1],D;if(1!==B.nodeType&&(D=A.substring(f,q))){v&&(D=D.replace(d,"\r"));B.nodeValue=D;var N=B.ownerDocument,u=N.createElement("span");u.className=c[w+1];var y=B.parentNode;y.replaceChild(u,B);u.appendChild(B);f<h&&(l[b+1]=B=N.createTextNode(A.substring(q,h)),y.insertBefore(B,u.nextSibling))}f=q;f>=h&&(b+=2);f>=k&&(w+=2)}}finally{g&&(g.style.display=a)}}catch(x){E.console&&console.log(x&&x.stack||x)}}var E=window,C=["break,continue,do,else,for,if,return,while"],
|
||||
F=[[C,"auto,case,char,const,default,double,enum,extern,float,goto,inline,int,long,register,restrict,short,signed,sizeof,static,struct,switch,typedef,union,unsigned,void,volatile"],"catch,class,delete,false,import,new,operator,private,protected,public,this,throw,true,try,typeof"],H=[F,"alignas,alignof,align_union,asm,axiom,bool,concept,concept_map,const_cast,constexpr,decltype,delegate,dynamic_cast,explicit,export,friend,generic,late_check,mutable,namespace,noexcept,noreturn,nullptr,property,reinterpret_cast,static_assert,static_cast,template,typeid,typename,using,virtual,where"],
|
||||
O=[F,"abstract,assert,boolean,byte,extends,finally,final,implements,import,instanceof,interface,null,native,package,strictfp,super,synchronized,throws,transient"],P=[F,"abstract,add,alias,as,ascending,async,await,base,bool,by,byte,checked,decimal,delegate,descending,dynamic,event,finally,fixed,foreach,from,get,global,group,implicit,in,interface,internal,into,is,join,let,lock,null,object,out,override,orderby,params,partial,readonly,ref,remove,sbyte,sealed,select,set,stackalloc,string,select,uint,ulong,unchecked,unsafe,ushort,value,var,virtual,where,yield"],
|
||||
F=[F,"abstract,async,await,constructor,debugger,enum,eval,export,function,get,implements,instanceof,interface,let,null,set,undefined,var,with,yield,Infinity,NaN"],Q=[C,"and,as,assert,class,def,del,elif,except,exec,finally,from,global,import,in,is,lambda,nonlocal,not,or,pass,print,raise,try,with,yield,False,True,None"],R=[C,"alias,and,begin,case,class,def,defined,elsif,end,ensure,false,in,module,next,nil,not,or,redo,rescue,retry,self,super,then,true,undef,unless,until,when,yield,BEGIN,END"],C=[C,"case,done,elif,esac,eval,fi,function,in,local,set,then,until"],
|
||||
S=/^(DIR|FILE|array|vector|(de|priority_)?queue|(forward_)?list|stack|(const_)?(reverse_)?iterator|(unordered_)?(multi)?(set|map)|bitset|u?(int|float)\d*)\b/,W=/\S/,X=y({keywords:[H,P,O,F,"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",Q,R,C],hashComments:!0,cStyleComments:!0,multiLineStrings:!0,regexLiterals:!0}),I={};t(X,["default-code"]);t(G([],[["pln",/^[^<?]+/],["dec",
|
||||
/^<!\w[^>]*(?:>|$)/],["com",/^<\!--[\s\S]*?(?:-\->|$)/],["lang-",/^<\?([\s\S]+?)(?:\?>|$)/],["lang-",/^<%([\s\S]+?)(?:%>|$)/],["pun",/^(?:<[%?]|[%?]>)/],["lang-",/^<xmp\b[^>]*>([\s\S]+?)<\/xmp\b[^>]*>/i],["lang-js",/^<script\b[^>]*>([\s\S]*?)(<\/script\b[^>]*>)/i],["lang-css",/^<style\b[^>]*>([\s\S]*?)(<\/style\b[^>]*>)/i],["lang-in.tag",/^(<\/?[a-z][^<>]*>)/i]]),"default-markup htm html mxml xhtml xml xsl".split(" "));t(G([["pln",/^[\s]+/,null," \t\r\n"],["atv",/^(?:\"[^\"]*\"?|\'[^\']*\'?)/,null,
|
||||
"\"'"]],[["tag",/^^<\/?[a-z](?:[\w.:-]*\w)?|\/?>$/i],["atn",/^(?!style[\s=]|on)[a-z](?:[\w:-]*\w)?/i],["lang-uq.val",/^=\s*([^>\'\"\s]*(?:[^>\'\"\s\/]|\/(?=\s)))/],["pun",/^[=<>\/]+/],["lang-js",/^on\w+\s*=\s*\"([^\"]+)\"/i],["lang-js",/^on\w+\s*=\s*\'([^\']+)\'/i],["lang-js",/^on\w+\s*=\s*([^\"\'>\s]+)/i],["lang-css",/^style\s*=\s*\"([^\"]+)\"/i],["lang-css",/^style\s*=\s*\'([^\']+)\'/i],["lang-css",/^style\s*=\s*([^\"\'>\s]+)/i]]),["in.tag"]);t(G([],[["atv",/^[\s\S]+/]]),["uq.val"]);t(y({keywords:H,
|
||||
hashComments:!0,cStyleComments:!0,types:S}),"c cc cpp cxx cyc m".split(" "));t(y({keywords:"null,true,false"}),["json"]);t(y({keywords:P,hashComments:!0,cStyleComments:!0,verbatimStrings:!0,types:S}),["cs"]);t(y({keywords:O,cStyleComments:!0}),["java"]);t(y({keywords:C,hashComments:!0,multiLineStrings:!0}),["bash","bsh","csh","sh"]);t(y({keywords:Q,hashComments:!0,multiLineStrings:!0,tripleQuotedStrings:!0}),["cv","py","python"]);t(y({keywords:"caller,delete,die,do,dump,elsif,eval,exit,foreach,for,goto,if,import,last,local,my,next,no,our,print,package,redo,require,sub,undef,unless,until,use,wantarray,while,BEGIN,END",
|
||||
hashComments:!0,multiLineStrings:!0,regexLiterals:2}),["perl","pl","pm"]);t(y({keywords:R,hashComments:!0,multiLineStrings:!0,regexLiterals:!0}),["rb","ruby"]);t(y({keywords:F,cStyleComments:!0,regexLiterals:!0}),["javascript","js","ts","typescript"]);t(y({keywords:"all,and,by,catch,class,else,extends,false,finally,for,if,in,is,isnt,loop,new,no,not,null,of,off,on,or,return,super,then,throw,true,try,unless,until,when,while,yes",hashComments:3,cStyleComments:!0,multilineStrings:!0,tripleQuotedStrings:!0,
|
||||
regexLiterals:!0}),["coffee"]);t(G([],[["str",/^[\s\S]+/]]),["regex"]);var Y=E.PR={createSimpleLexer:G,registerLangHandler:t,sourceDecorator:y,PR_ATTRIB_NAME:"atn",PR_ATTRIB_VALUE:"atv",PR_COMMENT:"com",PR_DECLARATION:"dec",PR_KEYWORD:"kwd",PR_LITERAL:"lit",PR_NOCODE:"nocode",PR_PLAIN:"pln",PR_PUNCTUATION:"pun",PR_SOURCE:"src",PR_STRING:"str",PR_TAG:"tag",PR_TYPE:"typ",prettyPrintOne:E.prettyPrintOne=function(a,d,f){f=f||!1;d=d||null;var b=document.createElement("div");b.innerHTML="<pre>"+a+"</pre>";
|
||||
b=b.firstChild;f&&L(b,f,!0);M({j:d,m:f,h:b,l:1,a:null,i:null,c:null,g:null});return b.innerHTML},prettyPrint:E.prettyPrint=function(a,d){function f(){for(var b=E.PR_SHOULD_USE_CONTINUATION?c.now()+250:Infinity;p<t.length&&c.now()<b;p++){for(var d=t[p],l=g,m=d;m=m.previousSibling;){var n=m.nodeType,u=(7===n||8===n)&&m.nodeValue;if(u?!/^\??prettify\b/.test(u):3!==n||/\S/.test(m.nodeValue))break;if(u){l={};u.replace(/\b(\w+)=([\w:.%+-]+)/g,function(a,b,c){l[b]=c});break}}m=d.className;if((l!==g||r.test(m))&&
|
||||
!e.test(m)){n=!1;for(u=d.parentNode;u;u=u.parentNode)if(q.test(u.tagName)&&u.className&&r.test(u.className)){n=!0;break}if(!n){d.className+=" prettyprinted";n=l.lang;if(!n){var n=m.match(w),C;!n&&(C=V(d))&&z.test(C.tagName)&&(n=C.className.match(w));n&&(n=n[1])}if(y.test(d.tagName))u=1;else var u=d.currentStyle,x=v.defaultView,u=(u=u?u.whiteSpace:x&&x.getComputedStyle?x.getComputedStyle(d,null).getPropertyValue("white-space"):0)&&"pre"===u.substring(0,3);x=l.linenums;(x="true"===x||+x)||(x=(x=m.match(/\blinenums\b(?::(\d+))?/))?
|
||||
x[1]&&x[1].length?+x[1]:!0:!1);x&&L(d,x,u);M({j:n,h:d,m:x,l:u,a:null,i:null,c:null,g:null})}}}p<t.length?E.setTimeout(f,250):"function"===typeof a&&a()}for(var b=d||document.body,v=b.ownerDocument||document,b=[b.getElementsByTagName("pre"),b.getElementsByTagName("code"),b.getElementsByTagName("xmp")],t=[],n=0;n<b.length;++n)for(var l=0,m=b[n].length;l<m;++l)t.push(b[n][l]);var b=null,c=Date;c.now||(c={now:function(){return+new Date}});var p=0,w=/\blang(?:uage)?-([\w.]+)(?!\S)/,r=/\bprettyprint\b/,
|
||||
e=/\bprettyprinted\b/,y=/pre|xmp/i,z=/^code$/i,q=/^(?:pre|code|xmp)$/i,g={};f()}},H=E.define;"function"===typeof H&&H.amd&&H("google-code-prettify",[],function(){return Y})})();}()
|
||||
|
|
|
@ -74,6 +74,17 @@ Optimizations
|
|||
* SOLR-9584: Support Solr being proxied with another endpoint than default /solr, by using relative links
|
||||
in AdminUI javascripts (Yun Jie Zhou via janhoy)
|
||||
|
||||
* SOLR-9996: Unstored IntPointField returns Long type (Ishan Chattopadhyaya)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
* SOLR-8396: Add support for PointFields in Solr (Ishan Chattopadhyaya, Tomás Fernández Löbbe)
|
||||
|
||||
* SOLR-10011: Refactor PointField & TrieField to now have a common base class, NumericFieldType. The
|
||||
TrieField.TrieTypes and PointField.PointTypes are now consolidated to NumericFieldType.NumberType. This
|
||||
refactoring also fixes a bug whereby PointFields were not using DocValues for range queries for
|
||||
indexed=false, docValues=true fields.
|
||||
|
||||
================== 6.5.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
@ -90,6 +101,16 @@ Jetty 9.3.14.v20161028
|
|||
Detailed Change List
|
||||
----------------------
|
||||
|
||||
New Features
|
||||
----------------------
|
||||
|
||||
* SOLR-9836: Add ability to recover from leader when index corruption is detected on SolrCore creation.
|
||||
(Mike Drob via Mark Miller)
|
||||
|
||||
* SOLR-9926: Allow passing arbitrary java system properties to zkcli. (Hrishikesh Gadre via Mark Miller)
|
||||
|
||||
* SOLR-9885: Allow pre-startup Solr log management in Solr bin scripts to be disabled. (Mano Kovacs via Mark Miller)
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
|
||||
|
@ -98,12 +119,20 @@ Bug Fixes
|
|||
* SOLR-9977: Fix config bug in DistribDocExpirationUpdateProcessorTest that allowed false assumptions
|
||||
about when index version changes (hossman)
|
||||
|
||||
* SOLR-9979: Macro expansion should not be done in shard requests (Tomás Fernández Löbbe)
|
||||
|
||||
* SOLR-9114: NPE using TermVectorComponent, MoreLikeThisComponent in combination with ExactStatsCache (Cao Manh Dat, Varun Thacker)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
|
||||
* SOLR-9941: Clear the deletes lists at UpdateLog before replaying from log. This prevents redundantly pre-applying
|
||||
DBQs, during the log replay, to every update in the log as if the DBQs were out of order. (hossman, Ishan Chattopadhyaya)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
* SOLR-9980: Expose configVersion in core admin status (Jessica Cheng Mallet via Tomás Fernández Löbbe)
|
||||
|
||||
================== 6.4.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
@ -223,15 +252,15 @@ New Features
|
|||
|
||||
* SOLR-9805: Use metrics-jvm library to instrument jvm internals such as GC, memory usage and others. (shalin)
|
||||
|
||||
* SOLR-9812: SOLR-9911, SOLR-9960: Added a new /admin/metrics API to return all metrics collected by Solr via API.
|
||||
* SOLR-9812, SOLR-9911, SOLR-9960: Added a new /admin/metrics API to return all metrics collected by Solr via API.
|
||||
API supports four optional multi-valued parameters:
|
||||
* 'group' (all,jvm,jetty,node,core),
|
||||
* 'type' (all,counter,timer,gauge,histogram),
|
||||
* 'prefix' that filters the returned metrics,
|
||||
* 'registry' that selects one or more registries by prefix (eg. solr.jvm,solr.core.collection1)
|
||||
Example: http://localhost:8983/solr/admin/metrics?group=jvm,jetty&type=counter
|
||||
Example: http://localhost:8983/solr/admin/metrics?group=jvm&prefix=buffers,os
|
||||
Example: http://localhost:8983/solr/admin/metrics?registry=solr.node,solr.core&prefix=ADMIN
|
||||
- 'group' (all,jvm,jetty,node,core),
|
||||
- 'type' (all,counter,timer,gauge,histogram),
|
||||
- 'prefix' that filters the returned metrics,
|
||||
- 'registry' that selects one or more registries by prefix (eg. solr.jvm,solr.core.collection1)
|
||||
- Example: http://localhost:8983/solr/admin/metrics?group=jvm,jetty&type=counter
|
||||
- Example: http://localhost:8983/solr/admin/metrics?group=jvm&prefix=buffers,os
|
||||
- Example: http://localhost:8983/solr/admin/metrics?registry=solr.node,solr.core&prefix=ADMIN
|
||||
(shalin, ab)
|
||||
|
||||
* SOLR-9884: Add version to segments handler output (Steven Bower via Erick Erickson)
|
||||
|
|
|
@ -1480,11 +1480,12 @@ if [ ! -e "$SOLR_HOME" ]; then
|
|||
echo -e "\nSolr home directory $SOLR_HOME not found!\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_gc_logs || echo "Failed archiving old GC logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_console_logs || echo "Failed archiving old console logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -rotate_solr_logs 9 || echo "Failed rotating old solr logs"
|
||||
if [ "${SOLR_LOG_PRESTART_ROTATION:=true}" == "true" ]; then
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -remove_old_solr_logs 7 || echo "Failed removing old solr logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_gc_logs || echo "Failed archiving old GC logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -archive_console_logs || echo "Failed archiving old console logs"
|
||||
run_tool utils -s "$DEFAULT_SERVER_DIR" -l "$SOLR_LOGS_DIR" -rotate_solr_logs 9 || echo "Failed rotating old solr logs"
|
||||
fi
|
||||
|
||||
java_ver_out=`echo "$("$JAVA" -version 2>&1)"`
|
||||
JAVA_VERSION=`echo $java_ver_out | grep "java version" | awk '{ print substr($3, 2, length($3)-2); }'`
|
||||
|
|
|
@ -939,10 +939,15 @@ IF ERRORLEVEL 1 (
|
|||
)
|
||||
|
||||
REM Clean up and rotate logs
|
||||
call :run_utils "-remove_old_solr_logs 7" || echo "Failed removing old solr logs"
|
||||
call :run_utils "-archive_gc_logs" || echo "Failed archiving old GC logs"
|
||||
call :run_utils "-archive_console_logs" || echo "Failed archiving old console logs"
|
||||
call :run_utils "-rotate_solr_logs 9" || echo "Failed rotating old solr logs"
|
||||
IF [%SOLR_LOG_PRESTART_ROTATION%] == [] (
|
||||
set SOLR_LOG_PRESTART_ROTATION=true
|
||||
)
|
||||
IF [%SOLR_LOG_PRESTART_ROTATION%] == [true] (
|
||||
call :run_utils "-remove_old_solr_logs 7" || echo "Failed removing old solr logs"
|
||||
call :run_utils "-archive_gc_logs" || echo "Failed archiving old GC logs"
|
||||
call :run_utils "-archive_console_logs" || echo "Failed archiving old console logs"
|
||||
call :run_utils "-rotate_solr_logs 9" || echo "Failed rotating old solr logs"
|
||||
)
|
||||
|
||||
IF NOT "%ZK_HOST%"=="" set SOLR_MODE=solrcloud
|
||||
|
||||
|
|
|
@ -75,6 +75,11 @@ REM set SOLR_LOG_LEVEL=INFO
|
|||
REM Location where Solr should write logs to. Absolute or relative to solr start dir
|
||||
REM set SOLR_LOGS_DIR=logs
|
||||
|
||||
REM Enables log rotation, cleanup, and archiving before starting Solr. Setting SOLR_LOG_PRESTART_ROTATION=false will skip start
|
||||
REM time rotation of logs, and the archiving of the last GC and console log files. It does not affect Log4j configuration. This
|
||||
REM pre-startup rotation may need to be disabled depending how much you customize the default logging setup.
|
||||
REM set SOLR_LOG_PRESTART_ROTATION=true
|
||||
|
||||
REM Set the host interface to listen on. Jetty will listen on all interfaces (0.0.0.0) by default.
|
||||
REM This must be an IPv4 ("a.b.c.d") or bracketed IPv6 ("[x::y]") address, not a hostname!
|
||||
REM set SOLR_JETTY_HOST=0.0.0.0
|
||||
|
|
|
@ -91,6 +91,11 @@
|
|||
# Location where Solr should write logs to. Absolute or relative to solr start dir
|
||||
#SOLR_LOGS_DIR=logs
|
||||
|
||||
# Enables log rotation, cleanup, and archiving during start. Setting SOLR_LOG_PRESTART_ROTATION=false will skip start
|
||||
# time rotation of logs, and the archiving of the last GC and console log files. It does not affect Log4j configuration.
|
||||
# This pre-startup rotation may need to be disabled depending how much you customize the default logging setup.
|
||||
#SOLR_LOG_PRESTART_ROTATION=true
|
||||
|
||||
# Sets the port Solr binds to, default is 8983
|
||||
#SOLR_PORT=8983
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.apache.lucene.collation.ICUCollationKeyAnalyzer;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.DocValuesRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
|
@ -272,13 +271,8 @@ public class ICUCollationField extends FieldType {
|
|||
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
|
||||
BytesRef high = part2 == null ? null : getCollationKey(f, part2);
|
||||
if (!field.indexed() && field.hasDocValues()) {
|
||||
if (field.multiValued()) {
|
||||
return DocValuesRangeQuery.newBytesRefRange(
|
||||
field.getName(), low, high, minInclusive, maxInclusive);
|
||||
} else {
|
||||
return DocValuesRangeQuery.newBytesRefRange(
|
||||
field.getName(), low, high, minInclusive, maxInclusive);
|
||||
}
|
||||
return SortedSetDocValuesField.newRangeQuery(
|
||||
field.getName(), low, high, minInclusive, maxInclusive);
|
||||
} else {
|
||||
return new TermRangeQuery(field.getName(), low, high, minInclusive, maxInclusive);
|
||||
}
|
||||
|
|
|
@ -20,9 +20,12 @@ import java.io.IOException;
|
|||
import java.lang.invoke.MethodHandles;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Date;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
@ -39,22 +42,29 @@ import com.google.common.collect.Maps;
|
|||
import org.apache.http.auth.AuthSchemeProvider;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.config.Lookup;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
|
||||
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder;
|
||||
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.AuthSchemeRegistryProvider;
|
||||
import org.apache.solr.client.solrj.impl.SolrHttpClientContextBuilder.CredentialsProviderProvider;
|
||||
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
import org.apache.solr.cloud.Overseer;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Replica.State;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.IOUtils;
|
||||
import org.apache.solr.common.util.Utils;
|
||||
import org.apache.solr.core.DirectoryFactory.DirContext;
|
||||
import org.apache.solr.core.backup.repository.BackupRepository;
|
||||
import org.apache.solr.core.backup.repository.BackupRepositoryFactory;
|
||||
import org.apache.solr.handler.RequestHandlerBase;
|
||||
import org.apache.solr.handler.SnapShooter;
|
||||
import org.apache.solr.handler.admin.CollectionsHandler;
|
||||
import org.apache.solr.handler.admin.ConfigSetsHandler;
|
||||
import org.apache.solr.handler.admin.CoreAdminHandler;
|
||||
|
@ -166,6 +176,8 @@ public class CoreContainer {
|
|||
|
||||
protected MetricsHandler metricsHandler;
|
||||
|
||||
private enum CoreInitFailedAction { fromleader, none }
|
||||
|
||||
/**
|
||||
* This method instantiates a new instance of {@linkplain BackupRepository}.
|
||||
*
|
||||
|
@ -911,7 +923,11 @@ public class CoreContainer {
|
|||
|
||||
ConfigSet coreConfig = coreConfigService.getConfig(dcore);
|
||||
log.info("Creating SolrCore '{}' using configuration from {}", dcore.getName(), coreConfig.getName());
|
||||
core = new SolrCore(dcore, coreConfig);
|
||||
try {
|
||||
core = new SolrCore(dcore, coreConfig);
|
||||
} catch (SolrException e) {
|
||||
core = processCoreCreateException(e, dcore, coreConfig);
|
||||
}
|
||||
|
||||
// always kick off recovery if we are in non-Cloud mode
|
||||
if (!isZooKeeperAware() && core.getUpdateHandler().getUpdateLog() != null) {
|
||||
|
@ -923,14 +939,12 @@ public class CoreContainer {
|
|||
return core;
|
||||
} catch (Exception e) {
|
||||
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
|
||||
log.error("Error creating core [{}]: {}", dcore.getName(), e.getMessage(), e);
|
||||
final SolrException solrException = new SolrException(ErrorCode.SERVER_ERROR, "Unable to create core [" + dcore.getName() + "]", e);
|
||||
if(core != null && !core.isClosed())
|
||||
IOUtils.closeQuietly(core);
|
||||
throw solrException;
|
||||
} catch (Throwable t) {
|
||||
SolrException e = new SolrException(ErrorCode.SERVER_ERROR, "JVM Error creating core [" + dcore.getName() + "]: " + t.getMessage(), t);
|
||||
log.error("Error creating core [{}]: {}", dcore.getName(), t.getMessage(), t);
|
||||
coreInitFailures.put(dcore.getName(), new CoreLoadFailure(dcore, e));
|
||||
if(core != null && !core.isClosed())
|
||||
IOUtils.closeQuietly(core);
|
||||
|
@ -938,7 +952,96 @@ public class CoreContainer {
|
|||
} finally {
|
||||
MDCLoggingContext.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Take action when we failed to create a SolrCore. If error is due to corrupt index, try to recover. Various recovery
|
||||
* strategies can be specified via system properties "-DCoreInitFailedAction={fromleader, none}"
|
||||
*
|
||||
* @see CoreInitFailedAction
|
||||
*
|
||||
* @param original
|
||||
* the problem seen when loading the core the first time.
|
||||
* @param dcore
|
||||
* core descriptor for the core to create
|
||||
* @param coreConfig
|
||||
* core config for the core to create
|
||||
* @return if possible
|
||||
* @throws SolrException
|
||||
* rethrows the original exception if we will not attempt to recover, throws a new SolrException with the
|
||||
* original exception as a suppressed exception if there is a second problem creating the solr core.
|
||||
*/
|
||||
private SolrCore processCoreCreateException(SolrException original, CoreDescriptor dcore, ConfigSet coreConfig) {
|
||||
// Traverse full chain since CIE may not be root exception
|
||||
Throwable cause = original;
|
||||
while ((cause = cause.getCause()) != null) {
|
||||
if (cause instanceof CorruptIndexException) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If no CorruptIndexExeption, nothing we can try here
|
||||
if (cause == null) throw original;
|
||||
|
||||
CoreInitFailedAction action = CoreInitFailedAction.valueOf(System.getProperty(CoreInitFailedAction.class.getSimpleName(), "none"));
|
||||
log.debug("CorruptIndexException while creating core, will attempt to repair via {}", action);
|
||||
|
||||
switch (action) {
|
||||
case fromleader: // Recovery from leader on a CorruptedIndexException
|
||||
if (isZooKeeperAware()) {
|
||||
CloudDescriptor desc = dcore.getCloudDescriptor();
|
||||
try {
|
||||
Replica leader = getZkController().getClusterState()
|
||||
.getCollection(desc.getCollectionName())
|
||||
.getSlice(desc.getShardId())
|
||||
.getLeader();
|
||||
if (leader != null && leader.getState() == State.ACTIVE) {
|
||||
log.info("Found active leader, will attempt to create fresh core and recover.");
|
||||
resetIndexDirectory(dcore, coreConfig);
|
||||
return new SolrCore(dcore, coreConfig);
|
||||
}
|
||||
} catch (SolrException se) {
|
||||
se.addSuppressed(original);
|
||||
throw se;
|
||||
}
|
||||
}
|
||||
throw original;
|
||||
case none:
|
||||
throw original;
|
||||
default:
|
||||
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
|
||||
action, Arrays.asList(CoreInitFailedAction.values()));
|
||||
throw original;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a new index directory for the a SolrCore, but do so without loading it.
|
||||
*/
|
||||
private void resetIndexDirectory(CoreDescriptor dcore, ConfigSet coreConfig) {
|
||||
SolrConfig config = coreConfig.getSolrConfig();
|
||||
|
||||
String registryName = SolrMetricManager.getRegistryName(SolrInfoMBean.Group.core, dcore.getName());
|
||||
DirectoryFactory df = DirectoryFactory.loadDirectoryFactory(config, this, registryName);
|
||||
String dataDir = SolrCore.findDataDir(df, null, config, dcore);
|
||||
|
||||
String tmpIdxDirName = "index." + new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(new Date());
|
||||
SolrCore.modifyIndexProps(df, dataDir, config, tmpIdxDirName);
|
||||
|
||||
// Free the directory object that we had to create for this
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = df.get(dataDir, DirContext.META_DATA, config.indexConfig.lockType);
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
|
||||
} finally {
|
||||
try {
|
||||
df.release(dir);
|
||||
df.doneWithDirectory(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -383,4 +383,31 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
|||
|
||||
return baseDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new DirectoryFactory instance from the given SolrConfig and tied to the specified core container.
|
||||
*/
|
||||
static DirectoryFactory loadDirectoryFactory(SolrConfig config, CoreContainer cc, String registryName) {
|
||||
final PluginInfo info = config.getPluginInfo(DirectoryFactory.class.getName());
|
||||
final DirectoryFactory dirFactory;
|
||||
if (info != null) {
|
||||
log.debug(info.className);
|
||||
dirFactory = config.getResourceLoader().newInstance(info.className, DirectoryFactory.class);
|
||||
// allow DirectoryFactory instances to access the CoreContainer
|
||||
dirFactory.initCoreContainer(cc);
|
||||
dirFactory.init(info.initArgs);
|
||||
} else {
|
||||
log.debug("solr.NRTCachingDirectoryFactory");
|
||||
dirFactory = new NRTCachingDirectoryFactory();
|
||||
dirFactory.initCoreContainer(cc);
|
||||
}
|
||||
if (config.indexConfig.metricsInfo != null && config.indexConfig.metricsInfo.isEnabled()) {
|
||||
final DirectoryFactory factory = new MetricsDirectoryFactory(cc.getMetricManager(),
|
||||
registryName, dirFactory);
|
||||
factory.init(config.indexConfig.metricsInfo.initArgs);
|
||||
return factory;
|
||||
} else {
|
||||
return dirFactory;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStream;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.URL;
|
||||
|
@ -67,6 +69,7 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
|
@ -148,6 +151,7 @@ import org.apache.solr.update.processor.UpdateRequestProcessorFactory;
|
|||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.NumberUtils;
|
||||
import org.apache.solr.util.PropertiesInputStream;
|
||||
import org.apache.solr.util.PropertiesOutputStream;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
|
||||
import org.apache.solr.util.plugin.PluginInfoInitialized;
|
||||
|
@ -646,27 +650,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
|||
}
|
||||
|
||||
private DirectoryFactory initDirectoryFactory() {
|
||||
final PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
|
||||
final DirectoryFactory dirFactory;
|
||||
if (info != null) {
|
||||
log.debug(info.className);
|
||||
dirFactory = getResourceLoader().newInstance(info.className, DirectoryFactory.class);
|
||||
// allow DirectoryFactory instances to access the CoreContainer
|
||||
dirFactory.initCoreContainer(getCoreDescriptor().getCoreContainer());
|
||||
dirFactory.init(info.initArgs);
|
||||
} else {
|
||||
log.debug("solr.NRTCachingDirectoryFactory");
|
||||
dirFactory = new NRTCachingDirectoryFactory();
|
||||
dirFactory.initCoreContainer(getCoreDescriptor().getCoreContainer());
|
||||
}
|
||||
if (solrConfig.indexConfig.metricsInfo != null && solrConfig.indexConfig.metricsInfo.isEnabled()) {
|
||||
final DirectoryFactory factory = new MetricsDirectoryFactory(coreDescriptor.getCoreContainer().getMetricManager(),
|
||||
coreMetricManager.getRegistryName(), dirFactory);
|
||||
factory.init(solrConfig.indexConfig.metricsInfo.initArgs);
|
||||
return factory;
|
||||
} else {
|
||||
return dirFactory;
|
||||
}
|
||||
return DirectoryFactory.loadDirectoryFactory(solrConfig, getCoreDescriptor().getCoreContainer(), coreMetricManager.getRegistryName());
|
||||
}
|
||||
|
||||
private void initIndexReaderFactory() {
|
||||
|
@ -1145,6 +1129,26 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
|||
}
|
||||
|
||||
private String initDataDir(String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
|
||||
return findDataDir(getDirectoryFactory(), dataDir, config, coreDescriptor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate the data directory for a given config and core descriptor.
|
||||
*
|
||||
* @param directoryFactory
|
||||
* The directory factory to use if necessary to calculate an absolute path. Should be the same as what will
|
||||
* be used to open the data directory later.
|
||||
* @param dataDir
|
||||
* An optional hint to the data directory location. Will be normalized and used if not null.
|
||||
* @param config
|
||||
* A solr config to retrieve the default data directory location, if used.
|
||||
* @param coreDescriptor
|
||||
* descriptor to load the actual data dir from, if not using the defualt.
|
||||
* @return a normalized data directory name
|
||||
* @throws SolrException
|
||||
* if the data directory cannot be loaded from the core descriptor
|
||||
*/
|
||||
static String findDataDir(DirectoryFactory directoryFactory, String dataDir, SolrConfig config, CoreDescriptor coreDescriptor) {
|
||||
if (dataDir == null) {
|
||||
if (coreDescriptor.usingDefaultDataDir()) {
|
||||
dataDir = config.getDataDir();
|
||||
|
@ -1163,6 +1167,80 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
|||
return SolrResourceLoader.normalizeDir(dataDir);
|
||||
}
|
||||
|
||||
|
||||
public boolean modifyIndexProps(String tmpIdxDirName) {
|
||||
return SolrCore.modifyIndexProps(getDirectoryFactory(), getDataDir(), getSolrConfig(), tmpIdxDirName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the index.properties file with the new index sub directory name
|
||||
*/
|
||||
// package private
|
||||
static boolean modifyIndexProps(DirectoryFactory directoryFactory, String dataDir, SolrConfig solrConfig, String tmpIdxDirName) {
|
||||
log.info("Updating index properties... index="+tmpIdxDirName);
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = directoryFactory.get(dataDir, DirContext.META_DATA, solrConfig.indexConfig.lockType);
|
||||
String tmpIdxPropName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime();
|
||||
writeNewIndexProps(dir, tmpIdxPropName, tmpIdxDirName);
|
||||
directoryFactory.renameWithOverwrite(dir, tmpIdxPropName, IndexFetcher.INDEX_PROPERTIES);
|
||||
return true;
|
||||
} catch (IOException e1) {
|
||||
throw new RuntimeException(e1);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
try {
|
||||
directoryFactory.release(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(log, "", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Write the index.properties file with the new index sub directory name
|
||||
* @param dir a data directory (containing an index.properties file)
|
||||
* @param tmpFileName the file name to write the new index.properties to
|
||||
* @param tmpIdxDirName new index directory name
|
||||
*/
|
||||
private static void writeNewIndexProps(Directory dir, String tmpFileName, String tmpIdxDirName) {
|
||||
if (tmpFileName == null) {
|
||||
tmpFileName = IndexFetcher.INDEX_PROPERTIES;
|
||||
}
|
||||
final Properties p = new Properties();
|
||||
|
||||
// Read existing properties
|
||||
try {
|
||||
final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
final InputStream is = new PropertiesInputStream(input);
|
||||
try {
|
||||
p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
|
||||
} catch (Exception e) {
|
||||
log.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(is);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// ignore; file does not exist
|
||||
}
|
||||
|
||||
p.put("index", tmpIdxDirName);
|
||||
|
||||
// Write new properties
|
||||
Writer os = null;
|
||||
try {
|
||||
IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
|
||||
p.store(os, IndexFetcher.INDEX_PROPERTIES);
|
||||
dir.sync(Collections.singleton(tmpFileName));
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(os);
|
||||
}
|
||||
}
|
||||
|
||||
private String initUpdateLogDir(CoreDescriptor coreDescriptor) {
|
||||
String updateLogDir = coreDescriptor.getUlogDir();
|
||||
if (updateLogDir == null) {
|
||||
|
|
|
@ -21,7 +21,6 @@ import java.io.FileNotFoundException;
|
|||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
|
@ -92,7 +91,6 @@ import org.apache.solr.update.UpdateLog;
|
|||
import org.apache.solr.update.VersionInfo;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.util.PropertiesInputStream;
|
||||
import org.apache.solr.util.PropertiesOutputStream;
|
||||
import org.apache.solr.util.RTimer;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
|
@ -460,7 +458,7 @@ public class IndexFetcher {
|
|||
reloadCore = true;
|
||||
downloadConfFiles(confFilesToDownload, latestGeneration);
|
||||
if (isFullCopyNeeded) {
|
||||
successfulInstall = IndexFetcher.modifyIndexProps(solrCore, tmpIdxDirName);
|
||||
successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
|
||||
deleteTmpIdxDir = false;
|
||||
} else {
|
||||
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
|
||||
|
@ -488,7 +486,7 @@ public class IndexFetcher {
|
|||
} else {
|
||||
terminateAndWaitFsyncService();
|
||||
if (isFullCopyNeeded) {
|
||||
successfulInstall = IndexFetcher.modifyIndexProps(solrCore, tmpIdxDirName);
|
||||
successfulInstall = solrCore.modifyIndexProps(tmpIdxDirName);
|
||||
deleteTmpIdxDir = false;
|
||||
} else {
|
||||
successfulInstall = moveIndexFiles(tmpIndexDir, indexDir);
|
||||
|
@ -1189,60 +1187,6 @@ public class IndexFetcher {
|
|||
return new SimpleDateFormat(SnapShooter.DATE_FMT, Locale.ROOT).format(d);
|
||||
}
|
||||
|
||||
/**
|
||||
* If the index is stale by any chance, load index from a different dir in the data dir.
|
||||
*/
|
||||
protected static boolean modifyIndexProps(SolrCore solrCore, String tmpIdxDirName) {
|
||||
LOG.info("New index installed. Updating index properties... index="+tmpIdxDirName);
|
||||
Properties p = new Properties();
|
||||
Directory dir = null;
|
||||
try {
|
||||
dir = solrCore.getDirectoryFactory().get(solrCore.getDataDir(), DirContext.META_DATA, solrCore.getSolrConfig().indexConfig.lockType);
|
||||
if (slowFileExists(dir, IndexFetcher.INDEX_PROPERTIES)){
|
||||
final IndexInput input = dir.openInput(IndexFetcher.INDEX_PROPERTIES, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
|
||||
final InputStream is = new PropertiesInputStream(input);
|
||||
try {
|
||||
p.load(new InputStreamReader(is, StandardCharsets.UTF_8));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Unable to load " + IndexFetcher.INDEX_PROPERTIES, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(is);
|
||||
}
|
||||
}
|
||||
|
||||
String tmpFileName = IndexFetcher.INDEX_PROPERTIES + "." + System.nanoTime();
|
||||
final IndexOutput out = dir.createOutput(tmpFileName, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
p.put("index", tmpIdxDirName);
|
||||
Writer os = null;
|
||||
try {
|
||||
os = new OutputStreamWriter(new PropertiesOutputStream(out), StandardCharsets.UTF_8);
|
||||
p.store(os, tmpFileName);
|
||||
dir.sync(Collections.singleton(tmpFileName));
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unable to write " + IndexFetcher.INDEX_PROPERTIES, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(os);
|
||||
}
|
||||
|
||||
solrCore.getDirectoryFactory().renameWithOverwrite(dir, tmpFileName, IndexFetcher.INDEX_PROPERTIES);
|
||||
return true;
|
||||
|
||||
} catch (IOException e1) {
|
||||
throw new RuntimeException(e1);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
try {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();
|
||||
|
||||
/**
|
||||
|
|
|
@ -101,7 +101,7 @@ public class RestoreCore implements Callable<Boolean> {
|
|||
}
|
||||
}
|
||||
log.debug("Switching directories");
|
||||
IndexFetcher.modifyIndexProps(core, restoreIndexName);
|
||||
core.modifyIndexProps(restoreIndexName);
|
||||
|
||||
boolean success;
|
||||
try {
|
||||
|
|
|
@ -335,6 +335,7 @@ enum CoreAdminOperation implements CoreAdminOp {
|
|||
info.add("uptime", core.getUptimeMs());
|
||||
if (cores.isZooKeeperAware()) {
|
||||
info.add("lastPublished", core.getCoreDescriptor().getCloudDescriptor().getLastPublished().toString().toLowerCase(Locale.ROOT));
|
||||
info.add("configVersion", core.getSolrConfig().getZnodeVersion());
|
||||
}
|
||||
if (isIndexInfoNeeded) {
|
||||
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
|
||||
|
|
|
@ -289,8 +289,6 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
f.add( "schema", getFieldFlags( sfield ) );
|
||||
f.add( "flags", getFieldFlags( field ) );
|
||||
|
||||
Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
|
||||
|
||||
f.add( "value", (ftype==null)?null:ftype.toExternal( field ) );
|
||||
|
||||
// TODO: this really should be "stored"
|
||||
|
@ -301,7 +299,10 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
|
||||
}
|
||||
f.add( "boost", field.boost() );
|
||||
f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
|
||||
if (!ftype.isPointField()) {
|
||||
Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
|
||||
f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
|
||||
}// TODO: Calculate docFreq for point fields
|
||||
|
||||
// If we have a term vector, return that
|
||||
if( field.fieldType().storeTermVectors() ) {
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.Map.Entry;
|
|||
|
||||
import org.apache.commons.lang.ArrayUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
|
@ -47,6 +48,7 @@ import org.apache.solr.common.util.StrUtils;
|
|||
import org.apache.solr.request.SimpleFacets;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.PointField;
|
||||
import org.apache.solr.search.QueryParsing;
|
||||
import org.apache.solr.search.SyntaxError;
|
||||
import org.apache.solr.search.facet.FacetDebugInfo;
|
||||
|
@ -1477,7 +1479,13 @@ public class FacetComponent extends SearchComponent {
|
|||
if (sfc == null) {
|
||||
sfc = new ShardFacetCount();
|
||||
sfc.name = name;
|
||||
sfc.indexed = ftype == null ? sfc.name : ftype.toInternal(sfc.name);
|
||||
if (ftype == null) {
|
||||
sfc.indexed = null;
|
||||
} else if (ftype.isPointField()) {
|
||||
sfc.indexed = ((PointField)ftype).toInternalByteRef(sfc.name);
|
||||
} else {
|
||||
sfc.indexed = new BytesRef(ftype.toInternal(sfc.name));
|
||||
}
|
||||
sfc.termNum = termNum++;
|
||||
counts.put(name, sfc);
|
||||
}
|
||||
|
@ -1553,7 +1561,7 @@ public class FacetComponent extends SearchComponent {
|
|||
public static class ShardFacetCount {
|
||||
public String name;
|
||||
// the indexed form of the name... used for comparisons
|
||||
public String indexed;
|
||||
public BytesRef indexed;
|
||||
public long count;
|
||||
public int termNum; // term number starting at 0 (used in bit arrays)
|
||||
|
||||
|
|
|
@ -221,7 +221,17 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
}
|
||||
super.finishStage(rb);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
if (!params.getBool(COMPONENT_NAME, false)) return;
|
||||
if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) == 0
|
||||
&& (sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) == 0) {
|
||||
sreq.params.set(COMPONENT_NAME, "false");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns NamedList based on the order of
|
||||
* resultIds.shardDoc.positionInResponse
|
||||
|
|
|
@ -185,6 +185,11 @@ public class QueryComponent extends SearchComponent
|
|||
}
|
||||
|
||||
rb.setSortSpec( parser.getSortSpec(true) );
|
||||
for (SchemaField sf:rb.getSortSpec().getSchemaFields()) {
|
||||
if (sf != null && sf.getType().isPointField() && !sf.hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"Can't sort on a point field without docValues");
|
||||
}
|
||||
}
|
||||
rb.setQparser(parser);
|
||||
|
||||
final String cursorStr = rb.req.getParams().get(CursorMarkParams.CURSOR_MARK_PARAM);
|
||||
|
@ -335,11 +340,21 @@ public class QueryComponent extends SearchComponent
|
|||
List<String> idArr = StrUtils.splitSmart(ids, ",", true);
|
||||
int[] luceneIds = new int[idArr.size()];
|
||||
int docs = 0;
|
||||
for (int i=0; i<idArr.size(); i++) {
|
||||
int id = searcher.getFirstMatch(
|
||||
new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
|
||||
if (id >= 0)
|
||||
luceneIds[docs++] = id;
|
||||
if (idField.getType().isPointField()) {
|
||||
for (int i=0; i<idArr.size(); i++) {
|
||||
int id = searcher.search(
|
||||
idField.getType().getFieldQuery(null, idField, idArr.get(i)), 1).scoreDocs[0].doc;
|
||||
if (id >= 0) {
|
||||
luceneIds[docs++] = id;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (int i=0; i<idArr.size(); i++) {
|
||||
int id = searcher.getFirstMatch(
|
||||
new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
|
||||
if (id >= 0)
|
||||
luceneIds[docs++] = id;
|
||||
}
|
||||
}
|
||||
|
||||
DocListAndSet res = new DocListAndSet();
|
||||
|
|
|
@ -16,14 +16,35 @@
|
|||
*/
|
||||
package org.apache.solr.handler.component;
|
||||
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
import com.carrotsearch.hppc.IntIntHashMap;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
|
@ -46,22 +67,22 @@ import org.apache.solr.cloud.ZkController;
|
|||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.QueryElevationParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.search.QueryParsing;
|
||||
import org.apache.solr.search.grouping.GroupingSpecification;
|
||||
import org.apache.solr.util.DOMUtil;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.apache.solr.core.Config;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.response.transform.ElevatedMarkerFactory;
|
||||
import org.apache.solr.response.transform.ExcludedMarkerFactory;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.search.QueryParsing;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.search.SortSpec;
|
||||
import org.apache.solr.search.grouping.GroupingSpecification;
|
||||
import org.apache.solr.util.DOMUtil;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.apache.solr.util.VersionedFile;
|
||||
import org.apache.solr.util.plugin.SolrCoreAware;
|
||||
|
@ -71,29 +92,6 @@ import org.w3c.dom.Node;
|
|||
import org.w3c.dom.NodeList;
|
||||
import org.xml.sax.InputSource;
|
||||
|
||||
import com.carrotsearch.hppc.IntIntHashMap;
|
||||
|
||||
import javax.xml.xpath.XPath;
|
||||
import javax.xml.xpath.XPathConstants;
|
||||
import javax.xml.xpath.XPathExpressionException;
|
||||
import javax.xml.xpath.XPathFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
/**
|
||||
* A component to elevate some documents to the top of the result set.
|
||||
*
|
||||
|
@ -628,7 +626,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator<Integer> newComparator(String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
public FieldComparator<Integer> newComparator(String fieldname, final int numHits, int sortPos, boolean reversed) {
|
||||
return new SimpleFieldComparator<Integer>() {
|
||||
private final int[] values = new int[numHits];
|
||||
private int bottomVal;
|
||||
|
|
|
@ -55,7 +55,6 @@ public class RangeFacetProcessor extends SimpleFacets {
|
|||
*
|
||||
* @see org.apache.solr.common.params.FacetParams#FACET_RANGE
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public NamedList<Object> getFacetRangeCounts() throws IOException, SyntaxError {
|
||||
final NamedList<Object> resOuter = new SimpleOrderedMap<>();
|
||||
|
||||
|
@ -92,7 +91,7 @@ public class RangeFacetProcessor extends SimpleFacets {
|
|||
final FieldType ft = sf.getType();
|
||||
|
||||
if (method.equals(FacetRangeMethod.DV)) {
|
||||
assert ft instanceof TrieField;
|
||||
assert ft instanceof TrieField || ft.isPointField();
|
||||
resOuter.add(key, getFacetRangeCountsDocValues(rangeFacetRequest));
|
||||
} else {
|
||||
resOuter.add(key, getFacetRangeCounts(rangeFacetRequest));
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.solr.common.util.SimpleOrderedMap;
|
|||
import org.apache.solr.schema.DateRangeField;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.PointField;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.schema.TrieDateField;
|
||||
import org.apache.solr.schema.TrieField;
|
||||
|
@ -91,6 +92,11 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
|
|||
DateRangeField.class + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
|
||||
method = FacetParams.FacetRangeMethod.FILTER;
|
||||
}
|
||||
if (method.equals(FacetParams.FacetRangeMethod.DV) && !schemaField.hasDocValues() && (schemaField.getType().isPointField())) {
|
||||
log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported on PointFields without docValues." +
|
||||
"Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
|
||||
method = FacetParams.FacetRangeMethod.FILTER;
|
||||
}
|
||||
|
||||
this.start = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_START);
|
||||
this.end = required.getFieldParam(facetOn, FacetParams.FACET_RANGE_END);
|
||||
|
@ -159,10 +165,33 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
|
|||
default:
|
||||
throw new SolrException
|
||||
(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Unable to range facet on tried field of unexpected type:" + this.facetOn);
|
||||
"Unable to range facet on Trie field of unexpected type:" + this.facetOn);
|
||||
}
|
||||
} else if (ft instanceof DateRangeField) {
|
||||
calc = new DateRangeEndpointCalculator(this, null);
|
||||
} else if (ft.isPointField()) {
|
||||
final PointField pointField = (PointField) ft;
|
||||
switch (pointField.getType()) {
|
||||
case FLOAT:
|
||||
calc = new FloatRangeEndpointCalculator(this);
|
||||
break;
|
||||
case DOUBLE:
|
||||
calc = new DoubleRangeEndpointCalculator(this);
|
||||
break;
|
||||
case INTEGER:
|
||||
calc = new IntegerRangeEndpointCalculator(this);
|
||||
break;
|
||||
case LONG:
|
||||
calc = new LongRangeEndpointCalculator(this);
|
||||
break;
|
||||
case DATE:
|
||||
calc = new DateRangeEndpointCalculator(this, null);
|
||||
break;
|
||||
default:
|
||||
throw new SolrException
|
||||
(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Unable to range facet on Point field of unexpected type:" + this.facetOn);
|
||||
}
|
||||
} else {
|
||||
throw new SolrException
|
||||
(SolrException.ErrorCode.BAD_REQUEST,
|
||||
|
|
|
@ -149,13 +149,7 @@ public class ShardFieldSortedHitQueue extends PriorityQueue<ShardDoc> {
|
|||
}
|
||||
|
||||
Comparator<ShardDoc> comparatorFieldComparator(SortField sortField) {
|
||||
final FieldComparator fieldComparator;
|
||||
try {
|
||||
fieldComparator = sortField.getComparator(0, 0);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Unable to get FieldComparator for sortField " + sortField);
|
||||
}
|
||||
|
||||
final FieldComparator fieldComparator = sortField.getComparator(0, 0);
|
||||
return new ShardComparator(sortField) {
|
||||
// Since the PriorityQueue keeps the biggest elements by default,
|
||||
// we need to reverse the field compare ordering so that the
|
||||
|
|
|
@ -45,6 +45,12 @@ public class StatsComponent extends SearchComponent {
|
|||
rb.setNeedDocSet( true );
|
||||
rb.doStats = true;
|
||||
rb._statsInfo = new StatsInfo(rb);
|
||||
for (StatsField statsField : rb._statsInfo.getStatsFields()) {
|
||||
if (statsField.getSchemaField() != null && statsField.getSchemaField().getType().isPointField() && !statsField.getSchemaField().hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Can't calculate stats on a PointField without docValues");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public class StatsValuesFactory {
|
|||
|
||||
if (TrieDateField.class.isInstance(fieldType)) {
|
||||
return new DateStatsValues(statsField);
|
||||
} else if (TrieField.class.isInstance(fieldType)) {
|
||||
} else if (TrieField.class.isInstance(fieldType) || PointField.class.isInstance(fieldType)) {
|
||||
return new NumericStatsValues(statsField);
|
||||
} else if (StrField.class.isInstance(fieldType)) {
|
||||
return new StringStatsValues(statsField);
|
||||
|
|
|
@ -464,6 +464,15 @@ public class TermVectorComponent extends SearchComponent implements SolrCoreAwar
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyRequest(ResponseBuilder rb, SearchComponent who, ShardRequest sreq) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
if (!params.getBool(COMPONENT_NAME, false)) return;
|
||||
if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) == 0) {
|
||||
sreq.params.set(COMPONENT_NAME, "false");
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////// NamedListInitializedPlugin methods //////////////////////
|
||||
|
||||
@Override
|
||||
|
|
|
@ -65,9 +65,6 @@ public final class SlowCompositeReaderWrapper extends LeafReader {
|
|||
SlowCompositeReaderWrapper(CompositeReader reader, boolean merging) throws IOException {
|
||||
super();
|
||||
in = reader;
|
||||
if (getFieldInfos().hasPointValues()) {
|
||||
throw new IllegalArgumentException("cannot wrap points");
|
||||
}
|
||||
fields = MultiFields.getFields(in);
|
||||
in.registerParentReader(this);
|
||||
this.merging = merging;
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.solr.common.params.CommonParams;
|
|||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.request.IntervalFacets.FacetInterval;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.PointField;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.schema.TrieDateField;
|
||||
import org.apache.solr.search.DocIterator;
|
||||
|
@ -625,6 +626,9 @@ public class IntervalFacets implements Iterable<FacetInterval> {
|
|||
if ("*".equals(value)) {
|
||||
return null;
|
||||
}
|
||||
if (schemaField.getType().isPointField()) {
|
||||
return ((PointField)schemaField.getType()).toInternalByteRef(value);
|
||||
}
|
||||
return new BytesRef(schemaField.getType().toInternal(value));
|
||||
}
|
||||
|
||||
|
|
|
@ -411,6 +411,10 @@ public class SimpleFacets {
|
|||
|
||||
NamedList<Integer> counts;
|
||||
SchemaField sf = searcher.getSchema().getField(field);
|
||||
if (sf.getType().isPointField() && !sf.hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Can't facet on a PointField without docValues");
|
||||
}
|
||||
FieldType ft = sf.getType();
|
||||
|
||||
// determine what type of faceting method to use
|
||||
|
@ -579,6 +583,10 @@ public class SimpleFacets {
|
|||
static FacetMethod selectFacetMethod(SchemaField field, FacetMethod method, Integer mincount) {
|
||||
|
||||
FieldType type = field.getType();
|
||||
if (type.isPointField()) {
|
||||
// Only FCS is supported for PointFields for now
|
||||
return FacetMethod.FCS;
|
||||
}
|
||||
|
||||
/*The user did not specify any preference*/
|
||||
if (method == null) {
|
||||
|
@ -810,12 +818,20 @@ public class SimpleFacets {
|
|||
* @param terms a list of term values (in the specified field) to compute the counts for
|
||||
*/
|
||||
protected NamedList<Integer> getListedTermCounts(String field, final ParsedParams parsed, List<String> terms) throws IOException {
|
||||
FieldType ft = searcher.getSchema().getFieldType(field);
|
||||
SchemaField sf = searcher.getSchema().getField(field);
|
||||
FieldType ft = sf.getType();
|
||||
NamedList<Integer> res = new NamedList<>();
|
||||
for (String term : terms) {
|
||||
String internal = ft.toInternal(term);
|
||||
int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
|
||||
res.add(term, count);
|
||||
if (ft.isPointField()) {
|
||||
for (String term : terms) {
|
||||
int count = searcher.numDocs(ft.getFieldQuery(null, sf, term), parsed.docs);
|
||||
res.add(term, count);
|
||||
}
|
||||
} else {
|
||||
for (String term : terms) {
|
||||
String internal = ft.toInternal(term);
|
||||
int count = searcher.numDocs(new TermQuery(new Term(field, internal)), parsed.docs);
|
||||
res.add(term, count);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
@ -848,7 +864,7 @@ public class SimpleFacets {
|
|||
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing,
|
||||
String sort, String prefix, String contains, boolean ignoreCase, boolean intersectsCheck)
|
||||
throws IOException {
|
||||
|
||||
|
||||
/* :TODO: potential optimization...
|
||||
* cache the Terms with the highest docFreq and try them first
|
||||
* don't enum if we get our max from them
|
||||
|
@ -864,10 +880,12 @@ public class SimpleFacets {
|
|||
fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
|
||||
}
|
||||
|
||||
|
||||
IndexSchema schema = searcher.getSchema();
|
||||
LeafReader r = searcher.getSlowAtomicReader();
|
||||
FieldType ft = schema.getFieldType(field);
|
||||
assert !ft.isPointField(): "Point Fields don't support enum method";
|
||||
|
||||
LeafReader r = searcher.getSlowAtomicReader();
|
||||
|
||||
|
||||
boolean sortByCount = sort.equals("count") || sort.equals("true");
|
||||
final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1;
|
||||
|
@ -1082,6 +1100,9 @@ public class SimpleFacets {
|
|||
if (parsed.params.getBool(GroupParams.GROUP_FACET, false)) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Interval Faceting can't be used with " + GroupParams.GROUP_FACET);
|
||||
}
|
||||
if (schemaField.getType().isPointField() && !schemaField.hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't use interval faceting on a PointField without docValues");
|
||||
}
|
||||
|
||||
SimpleOrderedMap<Integer> fieldResults = new SimpleOrderedMap<Integer>();
|
||||
res.add(parsed.key, fieldResults);
|
||||
|
|
|
@ -147,14 +147,16 @@ public class RequestUtil {
|
|||
newMap.putAll( MultiMapSolrParams.asMultiMap(invariants) );
|
||||
}
|
||||
|
||||
String[] doMacrosStr = newMap.get("expandMacros");
|
||||
boolean doMacros = true;
|
||||
if (doMacrosStr != null) {
|
||||
doMacros = "true".equals(doMacrosStr[0]);
|
||||
}
|
||||
if (!isShard) { // Don't expand macros in shard requests
|
||||
String[] doMacrosStr = newMap.get("expandMacros");
|
||||
boolean doMacros = true;
|
||||
if (doMacrosStr != null) {
|
||||
doMacros = "true".equals(doMacrosStr[0]);
|
||||
}
|
||||
|
||||
if (doMacros) {
|
||||
newMap = MacroExpander.expand(newMap);
|
||||
if (doMacros) {
|
||||
newMap = MacroExpander.expand(newMap);
|
||||
}
|
||||
}
|
||||
// Set these params as soon as possible so if there is an error processing later, things like
|
||||
// "wt=json" will take effect from the defaults.
|
||||
|
|
|
@ -31,8 +31,12 @@ import org.apache.solr.common.SolrException;
|
|||
import org.apache.solr.response.transform.DocTransformer;
|
||||
import org.apache.solr.schema.BinaryField;
|
||||
import org.apache.solr.schema.BoolField;
|
||||
import org.apache.solr.schema.DoublePointField;
|
||||
import org.apache.solr.schema.FieldType;
|
||||
import org.apache.solr.schema.FloatPointField;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.IntPointField;
|
||||
import org.apache.solr.schema.LongPointField;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.apache.solr.schema.StrField;
|
||||
import org.apache.solr.schema.TextField;
|
||||
|
@ -213,6 +217,10 @@ public class DocsStreamer implements Iterator<SolrDocument> {
|
|||
KNOWN_TYPES.add(TrieDoubleField.class);
|
||||
KNOWN_TYPES.add(TrieDateField.class);
|
||||
KNOWN_TYPES.add(BinaryField.class);
|
||||
KNOWN_TYPES.add(IntPointField.class);
|
||||
KNOWN_TYPES.add(LongPointField.class);
|
||||
KNOWN_TYPES.add(DoublePointField.class);
|
||||
KNOWN_TYPES.add(FloatPointField.class);
|
||||
// We do not add UUIDField because UUID object is not a supported type in JavaBinCodec
|
||||
// and if we write UUIDField.toObject, we wouldn't know how to handle it in the client side
|
||||
}
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.lucene.collation.CollationKeyAnalyzer;
|
|||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.DocValuesRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
|
@ -242,7 +241,7 @@ public class CollationField extends FieldType {
|
|||
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
|
||||
BytesRef high = part2 == null ? null : getCollationKey(f, part2);
|
||||
if (!field.indexed() && field.hasDocValues()) {
|
||||
return DocValuesRangeQuery.newBytesRefRange(
|
||||
return SortedSetDocValuesField.newRangeQuery(
|
||||
field.getName(), low, high, minInclusive, maxInclusive);
|
||||
} else {
|
||||
return new TermRangeQuery(field.getName(), low, high, minInclusive, maxInclusive);
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.document.DoublePoint;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.legacy.LegacyNumericType;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* {@code PointField} implementation for {@code Double} values.
|
||||
* @see PointField
|
||||
* @see DoublePoint
|
||||
*/
|
||||
public class DoublePointField extends PointField implements DoubleValueFieldType {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public DoublePointField() {
|
||||
type = NumberType.DOUBLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toNativeType(Object val) {
|
||||
if (val == null) return null;
|
||||
if (val instanceof Number) return ((Number) val).doubleValue();
|
||||
if (val instanceof String) return Double.parseDouble((String) val);
|
||||
return super.toNativeType(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
double actualMin, actualMax;
|
||||
if (min == null) {
|
||||
actualMin = Double.NEGATIVE_INFINITY;
|
||||
} else {
|
||||
actualMin = Double.parseDouble(min);
|
||||
if (!minInclusive) {
|
||||
actualMin = DoublePoint.nextUp(actualMin);
|
||||
}
|
||||
}
|
||||
if (max == null) {
|
||||
actualMax = Double.POSITIVE_INFINITY;
|
||||
} else {
|
||||
actualMax = Double.parseDouble(max);
|
||||
if (!maxInclusive) {
|
||||
actualMax = DoublePoint.nextDown(actualMax);
|
||||
}
|
||||
}
|
||||
return DoublePoint.newRangeQuery(field.getName(), actualMin, actualMax);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(SchemaField sf, BytesRef term) {
|
||||
return DoublePoint.decodeDimension(term.bytes, term.offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(IndexableField f) {
|
||||
final Number val = f.numericValue();
|
||||
if (val != null) {
|
||||
if (f.fieldType().stored() == false && f.fieldType().docValuesType() == DocValuesType.NUMERIC) {
|
||||
return Double.longBitsToDouble(val.longValue());
|
||||
} else {
|
||||
return val;
|
||||
}
|
||||
} else {
|
||||
throw new AssertionError("Unexpected state. Field: '" + f + "'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getExactQuery(SchemaField field, String externalVal) {
|
||||
return DoublePoint.newExactQuery(field.getName(), Double.parseDouble(externalVal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
|
||||
assert externalVal.size() > 0;
|
||||
double[] values = new double[externalVal.size()];
|
||||
int i = 0;
|
||||
for (String val:externalVal) {
|
||||
values[i] = Double.parseDouble(val);
|
||||
i++;
|
||||
}
|
||||
return DoublePoint.newSetQuery(field.getName(), values);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String indexedToReadable(BytesRef indexedForm) {
|
||||
return Double.toString(DoublePoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
|
||||
result.grow(Double.BYTES);
|
||||
result.setLength(Double.BYTES);
|
||||
DoublePoint.encodeDimension(Double.parseDouble(val.toString()), result.bytes(), 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortField getSortField(SchemaField field, boolean top) {
|
||||
field.checkSortability();
|
||||
|
||||
Object missingValue = null;
|
||||
boolean sortMissingLast = field.sortMissingLast();
|
||||
boolean sortMissingFirst = field.sortMissingFirst();
|
||||
|
||||
if (sortMissingLast) {
|
||||
missingValue = top ? Double.NEGATIVE_INFINITY : Double.POSITIVE_INFINITY;
|
||||
} else if (sortMissingFirst) {
|
||||
missingValue = top ? Double.POSITIVE_INFINITY : Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
SortField sf = new SortField(field.getName(), SortField.Type.DOUBLE, top);
|
||||
sf.setMissingValue(missingValue);
|
||||
return sf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getUninversionType(SchemaField sf) {
|
||||
if (sf.multiValued()) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
|
||||
// return Type.SORTED_DOUBLE;
|
||||
} else {
|
||||
return Type.DOUBLE_POINT;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueSource getValueSource(SchemaField field, QParser qparser) {
|
||||
field.checkFieldCacheSource();
|
||||
return new DoubleFieldSource(field.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyNumericType getNumericType() {
|
||||
// TODO: refactor this to not use LegacyNumericType
|
||||
return LegacyNumericType.DOUBLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexableField createField(SchemaField field, Object value, float boost) {
|
||||
if (!isFieldUsed(field)) return null;
|
||||
|
||||
if (boost != 1.0 && log.isTraceEnabled()) {
|
||||
log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
|
||||
}
|
||||
double doubleValue = (value instanceof Number) ? ((Number) value).doubleValue() : Double.parseDouble(value.toString());
|
||||
return new DoublePoint(field.getName(), doubleValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StoredField getStoredField(SchemaField sf, Object value) {
|
||||
return new StoredField(sf.getName(), (Double) this.toNativeType(value));
|
||||
}
|
||||
}
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.legacy.LegacyNumericUtils;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.EnumFieldSource;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.DocValuesRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
@ -253,10 +252,21 @@ public class EnumField extends PrimitiveFieldType {
|
|||
Query query = null;
|
||||
final boolean matchOnly = field.hasDocValues() && !field.indexed();
|
||||
if (matchOnly) {
|
||||
query = new ConstantScoreQuery(DocValuesRangeQuery.newLongRange(field.getName(),
|
||||
min == null ? null : minValue.longValue(),
|
||||
max == null ? null : maxValue.longValue(),
|
||||
minInclusive, maxInclusive));
|
||||
long lowerValue = Long.MIN_VALUE;
|
||||
long upperValue = Long.MAX_VALUE;
|
||||
if (minValue != null) {
|
||||
lowerValue = minValue.longValue();
|
||||
if (minInclusive == false) {
|
||||
++lowerValue;
|
||||
}
|
||||
}
|
||||
if (maxValue != null) {
|
||||
upperValue = maxValue.longValue();
|
||||
if (maxInclusive == false) {
|
||||
--upperValue;
|
||||
}
|
||||
}
|
||||
query = new ConstantScoreQuery(NumericDocValuesField.newRangeQuery(field.getName(), lowerValue, upperValue));
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newIntRange(field.getName(), DEFAULT_PRECISION_STEP,
|
||||
min == null ? null : minValue,
|
||||
|
|
|
@ -36,13 +36,13 @@ import org.apache.lucene.analysis.util.CharFilterFactory;
|
|||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedSetDocValuesField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.legacy.LegacyNumericType;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DocValuesRangeQuery;
|
||||
import org.apache.lucene.search.DocValuesRewriteMethod;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
|
@ -126,6 +126,10 @@ public abstract class FieldType extends FieldProperties {
|
|||
public boolean isPolyField(){
|
||||
return false;
|
||||
}
|
||||
|
||||
public boolean isPointField() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the fields' docValues should be used for obtaining stored value
|
||||
|
@ -395,7 +399,10 @@ public abstract class FieldType extends FieldProperties {
|
|||
return toInternal(val);
|
||||
}
|
||||
|
||||
/** Given the readable value, return the term value that will match it. */
|
||||
/** Given the readable value, return the term value that will match it.
|
||||
* This method will modify the size and length of the {@code result}
|
||||
* parameter and write from offset 0
|
||||
*/
|
||||
public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
|
||||
final String internal = readableToIndexed(val.toString());
|
||||
result.copyChars(internal);
|
||||
|
@ -713,17 +720,17 @@ public abstract class FieldType extends FieldProperties {
|
|||
*/
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
// TODO: change these all to use readableToIndexed/bytes instead (e.g. for unicode collation)
|
||||
final BytesRef miValue = part1 == null ? null : new BytesRef(toInternal(part1));
|
||||
final BytesRef maxValue = part2 == null ? null : new BytesRef(toInternal(part2));
|
||||
if (field.hasDocValues() && !field.indexed()) {
|
||||
return DocValuesRangeQuery.newBytesRefRange(
|
||||
field.getName(),
|
||||
part1 == null ? null : new BytesRef(toInternal(part1)),
|
||||
part2 == null ? null : new BytesRef(toInternal(part2)),
|
||||
minInclusive, maxInclusive);
|
||||
return SortedSetDocValuesField.newRangeQuery(
|
||||
field.getName(),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
} else {
|
||||
SolrRangeQuery rangeQuery = new SolrRangeQuery(
|
||||
field.getName(),
|
||||
part1 == null ? null : new BytesRef(toInternal(part1)),
|
||||
part2 == null ? null : new BytesRef(toInternal(part2)),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
return rangeQuery;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.document.FloatPoint;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.legacy.LegacyNumericType;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* {@code PointField} implementation for {@code Float} values.
|
||||
* @see PointField
|
||||
* @see FloatPoint
|
||||
*/
|
||||
public class FloatPointField extends PointField implements FloatValueFieldType {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public FloatPointField() {
|
||||
type = NumberType.FLOAT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toNativeType(Object val) {
|
||||
if (val == null) return null;
|
||||
if (val instanceof Number) return ((Number) val).floatValue();
|
||||
if (val instanceof String) return Float.parseFloat((String) val);
|
||||
return super.toNativeType(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
float actualMin, actualMax;
|
||||
if (min == null) {
|
||||
actualMin = Float.NEGATIVE_INFINITY;
|
||||
} else {
|
||||
actualMin = Float.parseFloat(min);
|
||||
if (!minInclusive) {
|
||||
actualMin = FloatPoint.nextUp(actualMin);
|
||||
}
|
||||
}
|
||||
if (max == null) {
|
||||
actualMax = Float.POSITIVE_INFINITY;
|
||||
} else {
|
||||
actualMax = Float.parseFloat(max);
|
||||
if (!maxInclusive) {
|
||||
actualMax = FloatPoint.nextDown(actualMax);
|
||||
}
|
||||
}
|
||||
return FloatPoint.newRangeQuery(field.getName(), actualMin, actualMax);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(SchemaField sf, BytesRef term) {
|
||||
return FloatPoint.decodeDimension(term.bytes, term.offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(IndexableField f) {
|
||||
final Number val = f.numericValue();
|
||||
if (val != null) {
|
||||
if (f.fieldType().stored() == false && f.fieldType().docValuesType() == DocValuesType.NUMERIC) {
|
||||
return Float.intBitsToFloat(val.intValue());
|
||||
} else {
|
||||
return val;
|
||||
}
|
||||
} else {
|
||||
throw new AssertionError("Unexpected state. Field: '" + f + "'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getExactQuery(SchemaField field, String externalVal) {
|
||||
return FloatPoint.newExactQuery(field.getName(), Float.parseFloat(externalVal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
|
||||
assert externalVal.size() > 0;
|
||||
float[] values = new float[externalVal.size()];
|
||||
int i = 0;
|
||||
for (String val:externalVal) {
|
||||
values[i] = Float.parseFloat(val);
|
||||
i++;
|
||||
}
|
||||
return FloatPoint.newSetQuery(field.getName(), values);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String indexedToReadable(BytesRef indexedForm) {
|
||||
return Float.toString(FloatPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
|
||||
result.grow(Float.BYTES);
|
||||
result.setLength(Float.BYTES);
|
||||
FloatPoint.encodeDimension(Float.parseFloat(val.toString()), result.bytes(), 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortField getSortField(SchemaField field, boolean top) {
|
||||
field.checkSortability();
|
||||
|
||||
Object missingValue = null;
|
||||
boolean sortMissingLast = field.sortMissingLast();
|
||||
boolean sortMissingFirst = field.sortMissingFirst();
|
||||
|
||||
if (sortMissingLast) {
|
||||
missingValue = top ? Float.NEGATIVE_INFINITY : Float.POSITIVE_INFINITY;
|
||||
} else if (sortMissingFirst) {
|
||||
missingValue = top ? Float.POSITIVE_INFINITY : Float.NEGATIVE_INFINITY;
|
||||
}
|
||||
SortField sf = new SortField(field.getName(), SortField.Type.FLOAT, top);
|
||||
sf.setMissingValue(missingValue);
|
||||
return sf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getUninversionType(SchemaField sf) {
|
||||
if (sf.multiValued()) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
|
||||
// return Type.SORTED_FLOAT;
|
||||
} else {
|
||||
return Type.FLOAT_POINT;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueSource getValueSource(SchemaField field, QParser qparser) {
|
||||
field.checkFieldCacheSource();
|
||||
return new FloatFieldSource(field.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyNumericType getNumericType() {
|
||||
// TODO: refactor this to not use LegacyNumericType
|
||||
return LegacyNumericType.FLOAT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexableField createField(SchemaField field, Object value, float boost) {
|
||||
if (!isFieldUsed(field)) return null;
|
||||
|
||||
if (boost != 1.0 && log.isTraceEnabled()) {
|
||||
log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
|
||||
}
|
||||
float floatValue = (value instanceof Number) ? ((Number) value).floatValue() : Float.parseFloat(value.toString());
|
||||
return new FloatPoint(field.getName(), floatValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StoredField getStoredField(SchemaField sf, Object value) {
|
||||
return new StoredField(sf.getName(), (Float) this.toNativeType(value));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.document.IntPoint;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.legacy.LegacyNumericType;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.IntFieldSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* {@code PointField} implementation for {@code Integer} values.
|
||||
* @see PointField
|
||||
* @see IntPoint
|
||||
*/
|
||||
public class IntPointField extends PointField implements IntValueFieldType {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public IntPointField() {
|
||||
type = NumberType.INTEGER;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toNativeType(Object val) {
|
||||
if (val == null) return null;
|
||||
if (val instanceof Number) return ((Number) val).intValue();
|
||||
try {
|
||||
if (val instanceof String) return Integer.parseInt((String) val);
|
||||
} catch (NumberFormatException e) {
|
||||
Float v = Float.parseFloat((String) val);
|
||||
return v.intValue();
|
||||
}
|
||||
return super.toNativeType(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
int actualMin, actualMax;
|
||||
if (min == null) {
|
||||
actualMin = Integer.MIN_VALUE;
|
||||
} else {
|
||||
actualMin = Integer.parseInt(min);
|
||||
if (!minInclusive) {
|
||||
actualMin++;
|
||||
}
|
||||
}
|
||||
if (max == null) {
|
||||
actualMax = Integer.MAX_VALUE;
|
||||
} else {
|
||||
actualMax = Integer.parseInt(max);
|
||||
if (!maxInclusive) {
|
||||
actualMax--;
|
||||
}
|
||||
}
|
||||
return IntPoint.newRangeQuery(field.getName(), actualMin, actualMax);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(SchemaField sf, BytesRef term) {
|
||||
return IntPoint.decodeDimension(term.bytes, term.offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(IndexableField f) {
|
||||
final Number val = f.numericValue();
|
||||
if (val != null) {
|
||||
return val.intValue();
|
||||
} else {
|
||||
throw new AssertionError("Unexpected state. Field: '" + f + "'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getExactQuery(SchemaField field, String externalVal) {
|
||||
return IntPoint.newExactQuery(field.getName(), Integer.parseInt(externalVal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
|
||||
assert externalVal.size() > 0;
|
||||
int[] values = new int[externalVal.size()];
|
||||
int i = 0;
|
||||
for (String val:externalVal) {
|
||||
values[i] = Integer.parseInt(val);
|
||||
i++;
|
||||
}
|
||||
return IntPoint.newSetQuery(field.getName(), values);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String indexedToReadable(BytesRef indexedForm) {
|
||||
return Integer.toString(IntPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
|
||||
result.grow(Integer.BYTES);
|
||||
result.setLength(Integer.BYTES);
|
||||
IntPoint.encodeDimension(Integer.parseInt(val.toString()), result.bytes(), 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortField getSortField(SchemaField field, boolean top) {
|
||||
field.checkSortability();
|
||||
|
||||
Object missingValue = null;
|
||||
boolean sortMissingLast = field.sortMissingLast();
|
||||
boolean sortMissingFirst = field.sortMissingFirst();
|
||||
|
||||
if (sortMissingLast) {
|
||||
missingValue = top ? Integer.MIN_VALUE : Integer.MAX_VALUE;
|
||||
} else if (sortMissingFirst) {
|
||||
missingValue = top ? Integer.MAX_VALUE : Integer.MIN_VALUE;
|
||||
}
|
||||
SortField sf = new SortField(field.getName(), SortField.Type.INT, top);
|
||||
sf.setMissingValue(missingValue);
|
||||
return sf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getUninversionType(SchemaField sf) {
|
||||
if (sf.multiValued()) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
|
||||
// return Type.SORTED_INTEGER;
|
||||
} else {
|
||||
return Type.INTEGER_POINT;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueSource getValueSource(SchemaField field, QParser qparser) {
|
||||
field.checkFieldCacheSource();
|
||||
return new IntFieldSource(field.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyNumericType getNumericType() {
|
||||
return LegacyNumericType.INT;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexableField createField(SchemaField field, Object value, float boost) {
|
||||
if (!isFieldUsed(field)) return null;
|
||||
|
||||
if (boost != 1.0 && log.isTraceEnabled()) {
|
||||
log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
|
||||
}
|
||||
int intValue = (value instanceof Number) ? ((Number) value).intValue() : Integer.parseInt(value.toString());
|
||||
return new IntPoint(field.getName(), intValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StoredField getStoredField(SchemaField sf, Object value) {
|
||||
return new StoredField(sf.getName(), (Integer) this.toNativeType(value));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.document.LongPoint;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.legacy.LegacyNumericType;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.queries.function.valuesource.LongFieldSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* {@code PointField} implementation for {@code Long} values.
|
||||
* @see PointField
|
||||
* @see LongPoint
|
||||
*/
|
||||
public class LongPointField extends PointField implements LongValueFieldType {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public LongPointField() {
|
||||
type = NumberType.LONG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toNativeType(Object val) {
|
||||
if (val == null) return null;
|
||||
if (val instanceof Number) return ((Number) val).longValue();
|
||||
try {
|
||||
if (val instanceof String) return Long.parseLong((String) val);
|
||||
} catch (NumberFormatException e) {
|
||||
Double v = Double.parseDouble((String) val);
|
||||
return v.longValue();
|
||||
}
|
||||
return super.toNativeType(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
long actualMin, actualMax;
|
||||
if (min == null) {
|
||||
actualMin = Long.MIN_VALUE;
|
||||
} else {
|
||||
actualMin = Long.parseLong(min);
|
||||
if (!minInclusive) {
|
||||
actualMin++;
|
||||
}
|
||||
}
|
||||
if (max == null) {
|
||||
actualMax = Long.MAX_VALUE;
|
||||
} else {
|
||||
actualMax = Long.parseLong(max);
|
||||
if (!maxInclusive) {
|
||||
actualMax--;
|
||||
}
|
||||
}
|
||||
return LongPoint.newRangeQuery(field.getName(), actualMin, actualMax);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(SchemaField sf, BytesRef term) {
|
||||
return LongPoint.decodeDimension(term.bytes, term.offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object toObject(IndexableField f) {
|
||||
final Number val = f.numericValue();
|
||||
if (val != null) {
|
||||
return val;
|
||||
} else {
|
||||
throw new AssertionError("Unexpected state. Field: '" + f + "'");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query getExactQuery(SchemaField field, String externalVal) {
|
||||
return LongPoint.newExactQuery(field.getName(), Long.parseLong(externalVal));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVal) {
|
||||
assert externalVal.size() > 0;
|
||||
long[] values = new long[externalVal.size()];
|
||||
int i = 0;
|
||||
for (String val:externalVal) {
|
||||
values[i] = Long.parseLong(val);
|
||||
i++;
|
||||
}
|
||||
return LongPoint.newSetQuery(field.getName(), values);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String indexedToReadable(BytesRef indexedForm) {
|
||||
return Long.toString(LongPoint.decodeDimension(indexedForm.bytes, indexedForm.offset));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readableToIndexed(CharSequence val, BytesRefBuilder result) {
|
||||
result.grow(Long.BYTES);
|
||||
result.setLength(Long.BYTES);
|
||||
LongPoint.encodeDimension(Long.parseLong(val.toString()), result.bytes(), 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortField getSortField(SchemaField field, boolean top) {
|
||||
field.checkSortability();
|
||||
|
||||
Object missingValue = null;
|
||||
boolean sortMissingLast = field.sortMissingLast();
|
||||
boolean sortMissingFirst = field.sortMissingFirst();
|
||||
|
||||
if (sortMissingLast) {
|
||||
missingValue = top ? Long.MIN_VALUE : Long.MAX_VALUE;
|
||||
} else if (sortMissingFirst) {
|
||||
missingValue = top ? Long.MAX_VALUE : Long.MIN_VALUE;
|
||||
}
|
||||
SortField sf = new SortField(field.getName(), SortField.Type.LONG, top);
|
||||
sf.setMissingValue(missingValue);
|
||||
return sf;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Type getUninversionType(SchemaField sf) {
|
||||
if (sf.multiValued()) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
|
||||
// return Type.SORTED_LONG;
|
||||
} else {
|
||||
return Type.LONG_POINT;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ValueSource getValueSource(SchemaField field, QParser qparser) {
|
||||
field.checkFieldCacheSource();
|
||||
return new LongFieldSource(field.getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyNumericType getNumericType() {
|
||||
return LegacyNumericType.LONG;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexableField createField(SchemaField field, Object value, float boost) {
|
||||
if (!isFieldUsed(field)) return null;
|
||||
|
||||
if (boost != 1.0 && log.isTraceEnabled()) {
|
||||
log.trace("Can't use document/field boost for PointField. Field: " + field.getName() + ", boost: " + boost);
|
||||
}
|
||||
long longValue = (value instanceof Number) ? ((Number) value).longValue() : Long.parseLong(value.toString());
|
||||
return new LongPoint(field.getName(), longValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StoredField getStoredField(SchemaField sf, Object value) {
|
||||
return new StoredField(sf.getName(), (Long) this.toNativeType(value));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.search.FunctionRangeQuery;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
import org.apache.solr.util.DateMathParser;
|
||||
|
||||
public abstract class NumericFieldType extends PrimitiveFieldType {
|
||||
|
||||
public static enum NumberType {
|
||||
INTEGER,
|
||||
LONG,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
DATE
|
||||
}
|
||||
|
||||
protected NumberType type;
|
||||
|
||||
/**
|
||||
* @return the type of this field
|
||||
*/
|
||||
final public NumberType getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
private static long FLOAT_NEGATIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.NEGATIVE_INFINITY);
|
||||
private static long DOUBLE_NEGATIVE_INFINITY_BITS = Double.doubleToLongBits(Double.NEGATIVE_INFINITY);
|
||||
private static long FLOAT_POSITIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.POSITIVE_INFINITY);
|
||||
private static long DOUBLE_POSITIVE_INFINITY_BITS = Double.doubleToLongBits(Double.POSITIVE_INFINITY);
|
||||
private static long FLOAT_MINUS_ZERO_BITS = (long)Float.floatToIntBits(-0f);
|
||||
private static long DOUBLE_MINUS_ZERO_BITS = Double.doubleToLongBits(-0d);
|
||||
private static long FLOAT_ZERO_BITS = (long)Float.floatToIntBits(0f);
|
||||
private static long DOUBLE_ZERO_BITS = Double.doubleToLongBits(0d);
|
||||
|
||||
protected Query getDocValuesRangeQuery(QParser parser, SchemaField field, String min, String max,
|
||||
boolean minInclusive, boolean maxInclusive) {
|
||||
assert field.hasDocValues() && !field.multiValued();
|
||||
|
||||
switch (getType()) {
|
||||
case INTEGER:
|
||||
return numericDocValuesRangeQuery(field.getName(),
|
||||
min == null ? null : (long) Integer.parseInt(min),
|
||||
max == null ? null : (long) Integer.parseInt(max),
|
||||
minInclusive, maxInclusive);
|
||||
case FLOAT:
|
||||
return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
|
||||
case LONG:
|
||||
return numericDocValuesRangeQuery(field.getName(),
|
||||
min == null ? null : Long.parseLong(min),
|
||||
max == null ? null : Long.parseLong(max),
|
||||
minInclusive, maxInclusive);
|
||||
case DOUBLE:
|
||||
return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
|
||||
case DATE:
|
||||
return numericDocValuesRangeQuery(field.getName(),
|
||||
min == null ? null : DateMathParser.parseMath(null, min).getTime(),
|
||||
max == null ? null : DateMathParser.parseMath(null, max).getTime(),
|
||||
minInclusive, maxInclusive);
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for numeric field");
|
||||
}
|
||||
}
|
||||
|
||||
protected Query getRangeQueryForFloatDoubleDocValues(SchemaField sf, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
Query query;
|
||||
String fieldName = sf.getName();
|
||||
|
||||
Number minVal = min == null ? null : getType() == NumberType.FLOAT ? Float.parseFloat(min): Double.parseDouble(min);
|
||||
Number maxVal = max == null ? null : getType() == NumberType.FLOAT ? Float.parseFloat(max): Double.parseDouble(max);
|
||||
|
||||
Long minBits =
|
||||
min == null ? null : getType() == NumberType.FLOAT ? (long) Float.floatToIntBits(minVal.floatValue()): Double.doubleToLongBits(minVal.doubleValue());
|
||||
Long maxBits =
|
||||
max == null ? null : getType() == NumberType.FLOAT ? (long) Float.floatToIntBits(maxVal.floatValue()): Double.doubleToLongBits(maxVal.doubleValue());
|
||||
|
||||
long negativeInfinityBits = getType() == NumberType.FLOAT ? FLOAT_NEGATIVE_INFINITY_BITS : DOUBLE_NEGATIVE_INFINITY_BITS;
|
||||
long positiveInfinityBits = getType() == NumberType.FLOAT ? FLOAT_POSITIVE_INFINITY_BITS : DOUBLE_POSITIVE_INFINITY_BITS;
|
||||
long minusZeroBits = getType() == NumberType.FLOAT ? FLOAT_MINUS_ZERO_BITS : DOUBLE_MINUS_ZERO_BITS;
|
||||
long zeroBits = getType() == NumberType.FLOAT ? FLOAT_ZERO_BITS : DOUBLE_ZERO_BITS;
|
||||
|
||||
// If min is negative (or -0d) and max is positive (or +0d), then issue a FunctionRangeQuery
|
||||
if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
|
||||
(maxVal == null || (maxVal.doubleValue() > 0d || maxBits == zeroBits))) {
|
||||
|
||||
ValueSource vs = getValueSource(sf, null);
|
||||
query = new FunctionRangeQuery(new ValueSourceRangeFilter(vs, min, max, minInclusive, maxInclusive));
|
||||
|
||||
} else { // If both max and min are negative (or -0d), then issue range query with max and min reversed
|
||||
if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
|
||||
(maxVal != null && (maxVal.doubleValue() < 0d || maxBits == minusZeroBits))) {
|
||||
query = numericDocValuesRangeQuery
|
||||
(fieldName, maxBits, (min == null ? negativeInfinityBits : minBits), maxInclusive, minInclusive);
|
||||
} else { // If both max and min are positive, then issue range query
|
||||
query = numericDocValuesRangeQuery
|
||||
(fieldName, minBits, (max == null ? positiveInfinityBits : maxBits), minInclusive, maxInclusive);
|
||||
}
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
public static Query numericDocValuesRangeQuery(
|
||||
String field,
|
||||
Number lowerValue, Number upperValue,
|
||||
boolean lowerInclusive, boolean upperInclusive) {
|
||||
|
||||
long actualLowerValue = Long.MIN_VALUE;
|
||||
if (lowerValue != null) {
|
||||
actualLowerValue = lowerValue.longValue();
|
||||
if (lowerInclusive == false) {
|
||||
if (actualLowerValue == Long.MAX_VALUE) {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
++actualLowerValue;
|
||||
}
|
||||
}
|
||||
|
||||
long actualUpperValue = Long.MAX_VALUE;
|
||||
if (upperValue != null) {
|
||||
actualUpperValue = upperValue.longValue();
|
||||
if (upperInclusive == false) {
|
||||
if (actualUpperValue == Long.MIN_VALUE) {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
--actualUpperValue;
|
||||
}
|
||||
}
|
||||
return NumericDocValuesField.newRangeQuery(field, actualLowerValue, actualUpperValue);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.CharsRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Provides field types to support for Lucene's {@link
|
||||
* org.apache.lucene.document.IntPoint}, {@link org.apache.lucene.document.LongPoint}, {@link org.apache.lucene.document.FloatPoint} and
|
||||
* {@link org.apache.lucene.document.DoublePoint}.
|
||||
* See {@link org.apache.lucene.search.PointRangeQuery} for more details.
|
||||
* It supports integer, float, long and double types. See subclasses for details.
|
||||
* <br>
|
||||
* {@code DocValues} are supported for single-value cases ({@code NumericDocValues}).
|
||||
* {@code FieldCache} is not supported for {@code PointField}s, so sorting, faceting, etc on these fields require the use of {@code docValues="true"} in the schema.
|
||||
*/
|
||||
public abstract class PointField extends NumericFieldType {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public boolean isPointField() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final ValueSource getSingleValueSource(MultiValueSelector choice, SchemaField field, QParser parser) {
|
||||
// trivial base case
|
||||
if (!field.multiValued()) {
|
||||
// single value matches any selector
|
||||
return getValueSource(field, parser);
|
||||
}
|
||||
|
||||
// Point fields don't support UninvertingReader. See SOLR-9202
|
||||
if (!field.hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"docValues='true' is required to select '" + choice.toString() +
|
||||
"' value from multivalued field ("+ field.getName() +") at query time");
|
||||
}
|
||||
|
||||
// multivalued Point fields all use SortedSetDocValues, so we give a clean error if that's
|
||||
// not supported by the specified choice, else we delegate to a helper
|
||||
SortedSetSelector.Type selectorType = choice.getSortedSetSelectorType();
|
||||
if (null == selectorType) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
choice.toString() + " is not a supported option for picking a single value"
|
||||
+ " from the multivalued field: " + field.getName() +
|
||||
" (type: " + this.getTypeName() + ")");
|
||||
}
|
||||
|
||||
return getSingleValueSource(selectorType, field);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method that will only be called for multivalued Point fields that have doc values.
|
||||
* Default impl throws an error indicating that selecting a single value from this multivalued
|
||||
* field is not supported for this field type
|
||||
*
|
||||
* @param choice the selector Type to use, will never be null
|
||||
* @param field the field to use, guaranteed to be multivalued.
|
||||
* @see #getSingleValueSource(MultiValueSelector,SchemaField,QParser)
|
||||
*/
|
||||
protected ValueSource getSingleValueSource(SortedSetSelector.Type choice, SchemaField field) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isTokenized() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean multiValuedFieldCache() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public abstract Query getSetQuery(QParser parser, SchemaField field, Collection<String> externalVals);
|
||||
|
||||
@Override
|
||||
public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
|
||||
if (!field.indexed() && field.hasDocValues()) {
|
||||
// currently implemented as singleton range
|
||||
return getRangeQuery(parser, field, externalVal, externalVal, true, true);
|
||||
} else {
|
||||
return getExactQuery(field, externalVal);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract Query getExactQuery(SchemaField field, String externalVal);
|
||||
|
||||
public abstract Query getPointRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive);
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
if (!field.indexed() && field.hasDocValues() && !field.multiValued()) {
|
||||
return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
} else {
|
||||
return getPointRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String storedToReadable(IndexableField f) {
|
||||
return toExternal(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toInternal(String val) {
|
||||
throw new UnsupportedOperationException("Can't generate internal string in PointField. use PointField.toInternalByteRef");
|
||||
}
|
||||
|
||||
public BytesRef toInternalByteRef(String val) {
|
||||
final BytesRefBuilder bytes = new BytesRefBuilder();
|
||||
readableToIndexed(val, bytes);
|
||||
return bytes.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
|
||||
writer.writeVal(name, toObject(f));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String storedToIndexed(IndexableField f) {
|
||||
throw new UnsupportedOperationException("Not supported with PointFields");
|
||||
}
|
||||
|
||||
@Override
|
||||
public CharsRef indexedToReadable(BytesRef indexedForm, CharsRefBuilder charsRef) {
|
||||
final String value = indexedToReadable(indexedForm);
|
||||
charsRef.grow(value.length());
|
||||
charsRef.setLength(value.length());
|
||||
value.getChars(0, charsRef.length(), charsRef.chars(), 0);
|
||||
return charsRef.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String indexedToReadable(String indexedForm) {
|
||||
return indexedToReadable(new BytesRef(indexedForm));
|
||||
}
|
||||
|
||||
protected abstract String indexedToReadable(BytesRef indexedForm);
|
||||
|
||||
protected boolean isFieldUsed(SchemaField field) {
|
||||
boolean indexed = field.indexed();
|
||||
boolean stored = field.stored();
|
||||
boolean docValues = field.hasDocValues();
|
||||
|
||||
if (!indexed && !stored && !docValues) {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<IndexableField> createFields(SchemaField sf, Object value, float boost) {
|
||||
if (!(sf.hasDocValues() || sf.stored())) {
|
||||
return Collections.singletonList(createField(sf, value, boost));
|
||||
}
|
||||
List<IndexableField> fields = new ArrayList<>();
|
||||
final IndexableField field = createField(sf, value, boost);
|
||||
fields.add(field);
|
||||
|
||||
if (sf.hasDocValues()) {
|
||||
if (sf.multiValued()) {
|
||||
throw new UnsupportedOperationException("MultiValued Point fields with DocValues is not currently supported. Field: '" + sf.getName() + "'");
|
||||
} else {
|
||||
final long bits;
|
||||
if (field.numericValue() instanceof Integer || field.numericValue() instanceof Long) {
|
||||
bits = field.numericValue().longValue();
|
||||
} else if (field.numericValue() instanceof Float) {
|
||||
bits = Float.floatToIntBits(field.numericValue().floatValue());
|
||||
} else {
|
||||
assert field.numericValue() instanceof Double;
|
||||
bits = Double.doubleToLongBits(field.numericValue().doubleValue());
|
||||
}
|
||||
fields.add(new NumericDocValuesField(sf.getName(), bits));
|
||||
}
|
||||
}
|
||||
if (sf.stored()) {
|
||||
fields.add(getStoredField(sf, value));
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
|
||||
protected abstract StoredField getStoredField(SchemaField sf, Object value);
|
||||
|
||||
@Override
|
||||
public void checkSchemaField(final SchemaField field) {
|
||||
// PointFields support DocValues
|
||||
}
|
||||
}
|
|
@ -170,6 +170,11 @@ public final class SchemaField extends FieldProperties implements IndexableField
|
|||
"can not sort on multivalued field: "
|
||||
+ getName());
|
||||
}
|
||||
if (this.type.isPointField() && !hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"can not sort on a PointField without doc values: "
|
||||
+ getName());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,6 +196,11 @@ public final class SchemaField extends FieldProperties implements IndexableField
|
|||
"can not use FieldCache on multivalued field: "
|
||||
+ getName());
|
||||
}
|
||||
if (this.type.isPointField() && !hasDocValues()) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Point fields can't use FieldCache. Use docValues=true for field: "
|
||||
+ getName());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -84,7 +84,7 @@ import org.apache.solr.util.DateMathParser;
|
|||
*/
|
||||
public class TrieDateField extends TrieField implements DateValueFieldType {
|
||||
{
|
||||
this.type = TrieTypes.DATE;
|
||||
this.type = NumberType.DATE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.lucene.util.mutable.MutableValueDouble;
|
|||
*/
|
||||
public class TrieDoubleField extends TrieField implements DoubleValueFieldType {
|
||||
{
|
||||
type=TrieTypes.DOUBLE;
|
||||
type = NumberType.DOUBLE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.queries.function.valuesource.DoubleFieldSource;
|
|||
import org.apache.lucene.queries.function.valuesource.FloatFieldSource;
|
||||
import org.apache.lucene.queries.function.valuesource.IntFieldSource;
|
||||
import org.apache.lucene.queries.function.valuesource.LongFieldSource;
|
||||
import org.apache.lucene.search.DocValuesRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
|
@ -56,9 +55,7 @@ import org.apache.lucene.util.mutable.MutableValueDate;
|
|||
import org.apache.lucene.util.mutable.MutableValueLong;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.response.TextResponseWriter;
|
||||
import org.apache.solr.search.FunctionRangeQuery;
|
||||
import org.apache.solr.search.QParser;
|
||||
import org.apache.solr.search.function.ValueSourceRangeFilter;
|
||||
import org.apache.solr.uninverting.UninvertingReader.Type;
|
||||
import org.apache.solr.util.DateMathParser;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -84,12 +81,11 @@ import org.slf4j.LoggerFactory;
|
|||
* @see org.apache.lucene.legacy.LegacyNumericRangeQuery
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class TrieField extends PrimitiveFieldType {
|
||||
public class TrieField extends NumericFieldType {
|
||||
public static final int DEFAULT_PRECISION_STEP = 8;
|
||||
|
||||
protected int precisionStepArg = TrieField.DEFAULT_PRECISION_STEP; // the one passed in or defaulted
|
||||
protected int precisionStep; // normalized
|
||||
protected TrieTypes type;
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
@ -107,7 +103,7 @@ public class TrieField extends PrimitiveFieldType {
|
|||
|
||||
if (t != null) {
|
||||
try {
|
||||
type = TrieTypes.valueOf(t.toUpperCase(Locale.ROOT));
|
||||
type = NumberType.valueOf(t.toUpperCase(Locale.ROOT));
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Invalid type specified in schema.xml for field: " + args.get("name"), e);
|
||||
|
@ -139,7 +135,7 @@ public class TrieField extends PrimitiveFieldType {
|
|||
}
|
||||
|
||||
// normal stored case
|
||||
return (type == TrieTypes.DATE) ? new Date(val.longValue()) : val;
|
||||
return (type == NumberType.DATE) ? new Date(val.longValue()) : val;
|
||||
} else {
|
||||
// multi-valued numeric docValues currently use SortedSet on the indexed terms.
|
||||
BytesRef term = f.binaryValue();
|
||||
|
@ -340,13 +336,6 @@ public class TrieField extends PrimitiveFieldType {
|
|||
return precisionStepArg;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the type of this field
|
||||
*/
|
||||
public TrieTypes getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public LegacyNumericType getNumericType() {
|
||||
switch (type) {
|
||||
|
@ -372,66 +361,41 @@ public class TrieField extends PrimitiveFieldType {
|
|||
}
|
||||
int ps = precisionStep;
|
||||
Query query;
|
||||
final boolean matchOnly = field.hasDocValues() && !field.indexed();
|
||||
|
||||
if (field.hasDocValues() && !field.indexed()) {
|
||||
return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case INTEGER:
|
||||
if (matchOnly) {
|
||||
query = DocValuesRangeQuery.newLongRange(field.getName(),
|
||||
min == null ? null : (long) Integer.parseInt(min),
|
||||
max == null ? null : (long) Integer.parseInt(max),
|
||||
minInclusive, maxInclusive);
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newIntRange(field.getName(), ps,
|
||||
min == null ? null : Integer.parseInt(min),
|
||||
max == null ? null : Integer.parseInt(max),
|
||||
minInclusive, maxInclusive);
|
||||
}
|
||||
query = LegacyNumericRangeQuery.newIntRange(field.getName(), ps,
|
||||
min == null ? null : Integer.parseInt(min),
|
||||
max == null ? null : Integer.parseInt(max),
|
||||
minInclusive, maxInclusive);
|
||||
break;
|
||||
case FLOAT:
|
||||
if (matchOnly) {
|
||||
return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newFloatRange(field.getName(), ps,
|
||||
min == null ? null : Float.parseFloat(min),
|
||||
max == null ? null : Float.parseFloat(max),
|
||||
minInclusive, maxInclusive);
|
||||
}
|
||||
query = LegacyNumericRangeQuery.newFloatRange(field.getName(), ps,
|
||||
min == null ? null : Float.parseFloat(min),
|
||||
max == null ? null : Float.parseFloat(max),
|
||||
minInclusive, maxInclusive);
|
||||
break;
|
||||
case LONG:
|
||||
if (matchOnly) {
|
||||
query = DocValuesRangeQuery.newLongRange(field.getName(),
|
||||
min == null ? null : Long.parseLong(min),
|
||||
max == null ? null : Long.parseLong(max),
|
||||
minInclusive, maxInclusive);
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
|
||||
min == null ? null : Long.parseLong(min),
|
||||
max == null ? null : Long.parseLong(max),
|
||||
minInclusive, maxInclusive);
|
||||
}
|
||||
query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
|
||||
min == null ? null : Long.parseLong(min),
|
||||
max == null ? null : Long.parseLong(max),
|
||||
minInclusive, maxInclusive);
|
||||
break;
|
||||
case DOUBLE:
|
||||
if (matchOnly) {
|
||||
return getRangeQueryForFloatDoubleDocValues(field, min, max, minInclusive, maxInclusive);
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newDoubleRange(field.getName(), ps,
|
||||
min == null ? null : Double.parseDouble(min),
|
||||
max == null ? null : Double.parseDouble(max),
|
||||
minInclusive, maxInclusive);
|
||||
}
|
||||
query = LegacyNumericRangeQuery.newDoubleRange(field.getName(), ps,
|
||||
min == null ? null : Double.parseDouble(min),
|
||||
max == null ? null : Double.parseDouble(max),
|
||||
minInclusive, maxInclusive);
|
||||
break;
|
||||
case DATE:
|
||||
if (matchOnly) {
|
||||
query = DocValuesRangeQuery.newLongRange(field.getName(),
|
||||
min == null ? null : DateMathParser.parseMath(null, min).getTime(),
|
||||
max == null ? null : DateMathParser.parseMath(null, max).getTime(),
|
||||
minInclusive, maxInclusive);
|
||||
} else {
|
||||
query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
|
||||
min == null ? null : DateMathParser.parseMath(null, min).getTime(),
|
||||
max == null ? null : DateMathParser.parseMath(null, max).getTime(),
|
||||
minInclusive, maxInclusive);
|
||||
}
|
||||
query = LegacyNumericRangeQuery.newLongRange(field.getName(), ps,
|
||||
min == null ? null : DateMathParser.parseMath(null, min).getTime(),
|
||||
max == null ? null : DateMathParser.parseMath(null, max).getTime(),
|
||||
minInclusive, maxInclusive);
|
||||
break;
|
||||
default:
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field");
|
||||
|
@ -440,52 +404,6 @@ public class TrieField extends PrimitiveFieldType {
|
|||
return query;
|
||||
}
|
||||
|
||||
private static long FLOAT_NEGATIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.NEGATIVE_INFINITY);
|
||||
private static long DOUBLE_NEGATIVE_INFINITY_BITS = Double.doubleToLongBits(Double.NEGATIVE_INFINITY);
|
||||
private static long FLOAT_POSITIVE_INFINITY_BITS = (long)Float.floatToIntBits(Float.POSITIVE_INFINITY);
|
||||
private static long DOUBLE_POSITIVE_INFINITY_BITS = Double.doubleToLongBits(Double.POSITIVE_INFINITY);
|
||||
private static long FLOAT_MINUS_ZERO_BITS = (long)Float.floatToIntBits(-0f);
|
||||
private static long DOUBLE_MINUS_ZERO_BITS = Double.doubleToLongBits(-0d);
|
||||
private static long FLOAT_ZERO_BITS = (long)Float.floatToIntBits(0f);
|
||||
private static long DOUBLE_ZERO_BITS = Double.doubleToLongBits(0d);
|
||||
|
||||
private Query getRangeQueryForFloatDoubleDocValues(SchemaField sf, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
Query query;
|
||||
String fieldName = sf.getName();
|
||||
|
||||
Number minVal = min == null ? null : type == TrieTypes.FLOAT ? Float.parseFloat(min): Double.parseDouble(min);
|
||||
Number maxVal = max == null ? null : type == TrieTypes.FLOAT ? Float.parseFloat(max): Double.parseDouble(max);
|
||||
|
||||
Long minBits =
|
||||
min == null ? null : type == TrieTypes.FLOAT ? (long) Float.floatToIntBits(minVal.floatValue()): Double.doubleToLongBits(minVal.doubleValue());
|
||||
Long maxBits =
|
||||
max == null ? null : type == TrieTypes.FLOAT ? (long) Float.floatToIntBits(maxVal.floatValue()): Double.doubleToLongBits(maxVal.doubleValue());
|
||||
|
||||
long negativeInfinityBits = type == TrieTypes.FLOAT ? FLOAT_NEGATIVE_INFINITY_BITS : DOUBLE_NEGATIVE_INFINITY_BITS;
|
||||
long positiveInfinityBits = type == TrieTypes.FLOAT ? FLOAT_POSITIVE_INFINITY_BITS : DOUBLE_POSITIVE_INFINITY_BITS;
|
||||
long minusZeroBits = type == TrieTypes.FLOAT ? FLOAT_MINUS_ZERO_BITS : DOUBLE_MINUS_ZERO_BITS;
|
||||
long zeroBits = type == TrieTypes.FLOAT ? FLOAT_ZERO_BITS : DOUBLE_ZERO_BITS;
|
||||
|
||||
// If min is negative (or -0d) and max is positive (or +0d), then issue a FunctionRangeQuery
|
||||
if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
|
||||
(maxVal == null || (maxVal.doubleValue() > 0d || maxBits == zeroBits))) {
|
||||
|
||||
ValueSource vs = getValueSource(sf, null);
|
||||
query = new FunctionRangeQuery(new ValueSourceRangeFilter(vs, min, max, minInclusive, maxInclusive));
|
||||
|
||||
} else { // If both max and min are negative (or -0d), then issue range query with max and min reversed
|
||||
if ((minVal == null || minVal.doubleValue() < 0d || minBits == minusZeroBits) &&
|
||||
(maxVal != null && (maxVal.doubleValue() < 0d || maxBits == minusZeroBits))) {
|
||||
query = DocValuesRangeQuery.newLongRange
|
||||
(fieldName, maxBits, (min == null ? negativeInfinityBits : minBits), maxInclusive, minInclusive);
|
||||
} else { // If both max and min are positive, then issue range query
|
||||
query = DocValuesRangeQuery.newLongRange
|
||||
(fieldName, minBits, (max == null ? positiveInfinityBits : maxBits), minInclusive, maxInclusive);
|
||||
}
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
|
||||
if (!field.indexed() && field.hasDocValues()) {
|
||||
|
@ -550,7 +468,7 @@ public class TrieField extends PrimitiveFieldType {
|
|||
|
||||
@Override
|
||||
public String toExternal(IndexableField f) {
|
||||
return (type == TrieTypes.DATE)
|
||||
return (type == NumberType.DATE)
|
||||
? ((Date) toObject(f)).toInstant().toString()
|
||||
: toObject(f).toString();
|
||||
}
|
||||
|
@ -763,15 +681,6 @@ public class TrieField extends PrimitiveFieldType {
|
|||
}
|
||||
}
|
||||
|
||||
public enum TrieTypes {
|
||||
INTEGER,
|
||||
LONG,
|
||||
FLOAT,
|
||||
DOUBLE,
|
||||
DATE
|
||||
}
|
||||
|
||||
|
||||
static final String INT_PREFIX = new String(new char[]{LegacyNumericUtils.SHIFT_START_INT});
|
||||
static final String LONG_PREFIX = new String(new char[]{LegacyNumericUtils.SHIFT_START_LONG});
|
||||
|
||||
|
@ -834,7 +743,6 @@ class TrieDateFieldSource extends LongFieldSource {
|
|||
public long externalToLong(String extVal) {
|
||||
return DateMathParser.parseMath(null, extVal).getTime();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ import org.apache.lucene.util.mutable.MutableValueFloat;
|
|||
*/
|
||||
public class TrieFloatField extends TrieField implements FloatValueFieldType {
|
||||
{
|
||||
type=TrieTypes.FLOAT;
|
||||
type = NumberType.FLOAT;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue