mirror of https://github.com/apache/lucene.git
SOLR-2452: merged with trunk up to r1136777
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/branches/solr2452@1136783 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
6264baa060
|
@ -21,18 +21,7 @@ A. How to use nightly Jenkins-built Lucene/Solr Maven artifacts
|
|||
|
||||
B. How to generate Lucene Maven artifacts
|
||||
|
||||
1. Prerequisites: JDK 1.5+, Ant 1.7.X, and maven-ant-tasks-2.1.1.jar
|
||||
|
||||
In order to generate Maven artifacts for Lucene/Solr, you must first
|
||||
download the Maven ant tasks JAR (maven-ant-tasks-2.1.1.jar), e.g.
|
||||
from <http://maven.apache.org/ant-tasks/download.html>, and add it
|
||||
to any one of the following:
|
||||
|
||||
a. Your $HOME/.ant/lib/ directory (C:\Users\username\.ant\lib\ under
|
||||
Windows Vista/7); or
|
||||
b. Your $ANT_HOME/lib/ directory (%ANT_HOME%\lib\ under Windows); or
|
||||
c. Your $CLASSPATH (%CLASSPATH% under Windows); or
|
||||
d. Your ant commond line: "-lib /path/to/maven-ant-tasks-2.1.1.jar".
|
||||
1. Prerequisites: JDK 1.5+ and Ant 1.7.X
|
||||
|
||||
2. Run the following command from the lucene/ directory:
|
||||
|
||||
|
@ -47,8 +36,7 @@ B. How to generate Lucene Maven artifacts
|
|||
|
||||
C. How to generate Solr Maven artifacts
|
||||
|
||||
1. Prerequisites: JDK 1.6+; Ant 1.7.X; and maven-ant-tasks-2.1.1.jar
|
||||
(see item A.1. above for where to put the Maven ant tasks jar).
|
||||
1. Prerequisites: JDK 1.6+ and Ant 1.7.X
|
||||
|
||||
2. Run the following from the solr/ directory:
|
||||
|
||||
|
|
|
@ -473,6 +473,11 @@ Changes in backwards compatibility policy
|
|||
of IndexInput) as its first argument. (Robert Muir, Dawid Weiss,
|
||||
Mike McCandless)
|
||||
|
||||
* LUCENE-3208: Made deprecated methods Query.weight(Searcher) and
|
||||
Searcher.createWeight() final to prevent override. If you have
|
||||
overridden one of these methods, cut over to the non-deprecated
|
||||
implementation. (Uwe Schindler, Robert Muir, Yonik Seeley)
|
||||
|
||||
Changes in runtime behavior
|
||||
|
||||
* LUCENE-2834: the hash used to compute the lock file name when the
|
||||
|
@ -511,6 +516,21 @@ Bug fixes
|
|||
ArrayIndexOutOfBoundsException (selckin, Robert Muir, Mike
|
||||
McCandless)
|
||||
|
||||
* LUCENE-3208: IndexSearcher had its own private similarity field
|
||||
and corresponding get/setter overriding Searcher's implementation. If you
|
||||
setted a different Similarity instance on IndexSearcher, methods implemented
|
||||
in the superclass Searcher were not using it, leading to strange bugs.
|
||||
(Uwe Schindler, Robert Muir)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-3208: Renamed protected IndexSearcher.createWeight() to expert
|
||||
public method IndexSearcher.createNormalizedWeight() as this better describes
|
||||
what this method does. The old method is still there for backwards
|
||||
compatibility. Query.weight() was deprecated and simply delegates to
|
||||
IndexSearcher. Both deprecated methods will be removed in Lucene 4.0.
|
||||
(Uwe Schindler, Robert Muir, Yonik Seeley)
|
||||
|
||||
New Features
|
||||
|
||||
* LUCENE-3140: Added experimental FST implementation to Lucene.
|
||||
|
@ -520,10 +540,23 @@ New Features
|
|||
algorithm over objects that implement the new TwoPhaseCommit interface (such
|
||||
as IndexWriter). (Shai Erera)
|
||||
|
||||
* LUCENE-3191: Added TopDocs.merge, to facilitate merging results from
|
||||
different shards (Uwe Schindler, Mike McCandless)
|
||||
|
||||
Build
|
||||
|
||||
* LUCENE-1344: Create OSGi bundle using dev-tools/maven.
|
||||
(Nicolas Lalevée, Luca Stancapiano via ryan)
|
||||
|
||||
* LUCENE-3204: The maven-ant-tasks jar is now included in the source tree;
|
||||
users of the generate-maven-artifacts target no longer have to manually
|
||||
place this jar in the Ant classpath. NOTE: when Ant looks for the
|
||||
maven-ant-tasks jar, it looks first in its pre-existing classpath, so
|
||||
any copies it finds will be used instead of the copy included in the
|
||||
Lucene/Solr source tree. For this reason, it is recommeded to remove
|
||||
any copies of the maven-ant-tasks jar in the Ant classpath, e.g. under
|
||||
~/.ant/lib/ or under the Ant installation's lib/ directory. (Steve Rowe)
|
||||
|
||||
|
||||
======================= Lucene 3.2.0 =======================
|
||||
|
||||
|
|
|
@ -444,7 +444,10 @@
|
|||
|
||||
|
||||
<target name="generate-maven-artifacts"
|
||||
depends="maven.ant.tasks-check, package, jar-src, jar-test-framework-src, javadocs">
|
||||
depends="package, jar-src, jar-test-framework-src, javadocs">
|
||||
<taskdef resource="org/apache/maven/artifact/ant/antlib.xml"
|
||||
uri="antlib:org.apache.maven.artifact.ant"
|
||||
classpathref="maven-ant-tasks.classpath"/>
|
||||
<sequential>
|
||||
<ant target="get-maven-poms" dir=".."/>
|
||||
|
||||
|
|
|
@ -64,6 +64,11 @@
|
|||
<pathelement location="${common.dir}/build/classes/tools"/>
|
||||
</path>
|
||||
|
||||
<path id="maven-ant-tasks.classpath">
|
||||
<fileset dir="${common.dir}/lib">
|
||||
<include name="maven-ant-tasks-*.jar"/>
|
||||
</fileset>
|
||||
</path>
|
||||
|
||||
<!-- default arguments to pass to JVM executing tests -->
|
||||
<property name="testmethod" value=""/>
|
||||
|
@ -196,11 +201,6 @@
|
|||
</and>
|
||||
</condition>
|
||||
|
||||
<available
|
||||
property="maven.ant.tasks.present"
|
||||
classname="org.apache.maven.artifact.ant.Pom"
|
||||
/>
|
||||
|
||||
<target name="clean"
|
||||
description="Removes contents of build and dist directories">
|
||||
<delete dir="${build.dir}"/>
|
||||
|
@ -322,20 +322,6 @@
|
|||
<jarify/>
|
||||
</target>
|
||||
|
||||
<target name="maven.ant.tasks-check">
|
||||
<fail unless="maven.ant.tasks.present">#
|
||||
##########################################################################
|
||||
Maven ant tasks not found.
|
||||
|
||||
Please download the Maven ant tasks JAR (maven-ant-tasks-2.1.1.jar)
|
||||
from http://maven.apache.org/ant-tasks/download.html and add it to your
|
||||
$$HOME/.ant/lib/ directory, or to your $$ANT_HOME/lib/ directory, or
|
||||
to your $$CLASSPATH, or add "-lib /path/to/maven-ant-tasks-2.1.1.jar"
|
||||
to the ant command.
|
||||
##########################################################################
|
||||
</fail>
|
||||
</target>
|
||||
|
||||
<macrodef name="m2-deploy" description="Builds a Maven artifact">
|
||||
<element name="artifact-attachments" optional="yes"/>
|
||||
<attribute name="pom.xml" default="pom.xml"/>
|
||||
|
|
|
@ -75,6 +75,10 @@ New Features
|
|||
allow an app to control which indexing changes must be visible to
|
||||
which search requests. (Mike McCandless)
|
||||
|
||||
* LUCENE-3191: Added SearchGroup.merge and TopGroups.merge, to
|
||||
facilitate doing grouping in a distributed environment (Uwe
|
||||
Schindler, Mike McCandless)
|
||||
|
||||
API Changes
|
||||
|
||||
* LUCENE-3141: add getter method to access fragInfos in FieldFragList.
|
||||
|
|
|
@ -76,6 +76,9 @@
|
|||
</available>
|
||||
|
||||
<target name="dist-maven" if="pom.xml.present" depends="compile-core, jar-src">
|
||||
<taskdef resource="org/apache/maven/artifact/ant/antlib.xml"
|
||||
uri="antlib:org.apache.maven.artifact.ant"
|
||||
classpathref="maven-ant-tasks.classpath"/>
|
||||
<sequential>
|
||||
<m2-deploy>
|
||||
<artifact-attachments>
|
||||
|
|
|
@ -502,12 +502,12 @@ public class TestNRTManager extends LuceneTestCase {
|
|||
int seenTermCount = 0;
|
||||
int shift;
|
||||
int trigger;
|
||||
if (totTermCount.get() == 0) {
|
||||
if (totTermCount.get() < 10) {
|
||||
shift = 0;
|
||||
trigger = 1;
|
||||
} else {
|
||||
shift = random.nextInt(totTermCount.get()/10);
|
||||
trigger = totTermCount.get()/10;
|
||||
shift = random.nextInt(trigger);
|
||||
}
|
||||
|
||||
while(System.currentTimeMillis() < stopTime) {
|
||||
|
@ -518,13 +518,13 @@ public class TestNRTManager extends LuceneTestCase {
|
|||
}
|
||||
totTermCount.set(seenTermCount);
|
||||
seenTermCount = 0;
|
||||
if (totTermCount.get() == 0) {
|
||||
if (totTermCount.get() < 10) {
|
||||
shift = 0;
|
||||
trigger = 1;
|
||||
} else {
|
||||
trigger = totTermCount.get()/10;
|
||||
//System.out.println("trigger " + trigger);
|
||||
shift = random.nextInt(totTermCount.get()/10);
|
||||
shift = random.nextInt(trigger);
|
||||
}
|
||||
termsEnum.seek(new BytesRef(""));
|
||||
continue;
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
* This class will be removed in Lucene 5.0
|
||||
*/
|
||||
@Deprecated
|
||||
public final class SlowCollatedStringComparator extends FieldComparator {
|
||||
public final class SlowCollatedStringComparator extends FieldComparator<BytesRef> {
|
||||
|
||||
private final String[] values;
|
||||
private DocTerms currentDocTerms;
|
||||
|
@ -99,8 +99,22 @@ public final class SlowCollatedStringComparator extends FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
final String s = values[slot];
|
||||
return s == null ? null : new BytesRef(values[slot]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareValues(BytesRef first, BytesRef second) {
|
||||
if (first == null) {
|
||||
if (second == null) {
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
} else if (second == null) {
|
||||
return 1;
|
||||
} else {
|
||||
return collator.compare(first, second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,94 +31,91 @@ import org.apache.lucene.search.FieldComparatorSource;
|
|||
*/
|
||||
public class DistanceFieldComparatorSource extends FieldComparatorSource {
|
||||
|
||||
private DistanceFilter distanceFilter;
|
||||
private DistanceScoreDocLookupComparator dsdlc;
|
||||
private DistanceFilter distanceFilter;
|
||||
private DistanceScoreDocLookupComparator dsdlc;
|
||||
|
||||
public DistanceFieldComparatorSource(Filter distanceFilter) {
|
||||
public DistanceFieldComparatorSource(Filter distanceFilter) {
|
||||
this.distanceFilter = (DistanceFilter) distanceFilter;
|
||||
}
|
||||
|
||||
this.distanceFilter = (DistanceFilter) distanceFilter;
|
||||
public void cleanUp() {
|
||||
distanceFilter = null;
|
||||
|
||||
}
|
||||
if (dsdlc != null) {
|
||||
dsdlc.cleanUp();
|
||||
}
|
||||
|
||||
public void cleanUp() {
|
||||
distanceFilter = null;
|
||||
dsdlc = null;
|
||||
}
|
||||
|
||||
if (dsdlc != null)
|
||||
dsdlc.cleanUp();
|
||||
@Override
|
||||
public FieldComparator newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
dsdlc = new DistanceScoreDocLookupComparator(numHits);
|
||||
return dsdlc;
|
||||
}
|
||||
|
||||
dsdlc = null;
|
||||
}
|
||||
private class DistanceScoreDocLookupComparator extends FieldComparator<Double> {
|
||||
|
||||
@Override
|
||||
public FieldComparator newComparator(String fieldname, int numHits,
|
||||
int sortPos, boolean reversed) throws IOException {
|
||||
dsdlc = new DistanceScoreDocLookupComparator(numHits);
|
||||
return dsdlc;
|
||||
}
|
||||
|
||||
private class DistanceScoreDocLookupComparator extends FieldComparator {
|
||||
|
||||
private double[] values;
|
||||
private double bottom;
|
||||
private int offset =0;
|
||||
private double[] values;
|
||||
private double bottom;
|
||||
private int offset =0;
|
||||
|
||||
public DistanceScoreDocLookupComparator(int numHits) {
|
||||
values = new double[numHits];
|
||||
return;
|
||||
}
|
||||
public DistanceScoreDocLookupComparator(int numHits) {
|
||||
values = new double[numHits];
|
||||
return;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(int slot1, int slot2) {
|
||||
double a = values[slot1];
|
||||
double b = values[slot2];
|
||||
if (a > b)
|
||||
return 1;
|
||||
if (a < b)
|
||||
return -1;
|
||||
@Override
|
||||
public int compare(int slot1, int slot2) {
|
||||
double a = values[slot1];
|
||||
double b = values[slot2];
|
||||
if (a > b)
|
||||
return 1;
|
||||
if (a < b)
|
||||
return -1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
public void cleanUp() {
|
||||
distanceFilter = null;
|
||||
}
|
||||
public void cleanUp() {
|
||||
distanceFilter = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareBottom(int doc) {
|
||||
double v2 = distanceFilter.getDistance(doc+ offset);
|
||||
@Override
|
||||
public int compareBottom(int doc) {
|
||||
double v2 = distanceFilter.getDistance(doc+ offset);
|
||||
|
||||
if (bottom > v2) {
|
||||
return 1;
|
||||
} else if (bottom < v2) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (bottom > v2) {
|
||||
return 1;
|
||||
} else if (bottom < v2) {
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copy(int slot, int doc) {
|
||||
values[slot] = distanceFilter.getDistance(doc + offset);
|
||||
}
|
||||
@Override
|
||||
public void copy(int slot, int doc) {
|
||||
values[slot] = distanceFilter.getDistance(doc + offset);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBottom(int slot) {
|
||||
this.bottom = values[slot];
|
||||
|
||||
}
|
||||
@Override
|
||||
public void setBottom(int slot) {
|
||||
this.bottom = values[slot];
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context)
|
||||
throws IOException {
|
||||
throws IOException {
|
||||
// each reader in a segmented base
|
||||
// has an offset based on the maxDocs of previous readers
|
||||
offset = context.docBase;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparable<Double> value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
AnyObjectId[7810a541b8350775d61aea353538560817cce06e] was removed in git history.
|
||||
Apache SVN contains full history.
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,8 @@
|
|||
|
||||
Maven Ant Tasks
|
||||
Copyright 2002-2010 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
||||
|
|
@ -28,10 +28,10 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.QueryWrapperFilter;
|
||||
|
||||
/* Tracks the stream of {@link BufferedDeletes}.
|
||||
* When DocumentsWriterPerThread flushes, its buffered
|
||||
|
@ -434,18 +434,16 @@ class BufferedDeletesStream {
|
|||
// Delete by query
|
||||
private synchronized long applyQueryDeletes(Iterable<QueryAndLimit> queriesIter, SegmentReader reader) throws IOException {
|
||||
long delCount = 0;
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
assert searcher.getTopReaderContext().isAtomic;
|
||||
final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext();
|
||||
try {
|
||||
for (QueryAndLimit ent : queriesIter) {
|
||||
Query query = ent.query;
|
||||
int limit = ent.limit;
|
||||
Weight weight = query.weight(searcher);
|
||||
Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def());
|
||||
if (scorer != null) {
|
||||
final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
|
||||
for (QueryAndLimit ent : queriesIter) {
|
||||
Query query = ent.query;
|
||||
int limit = ent.limit;
|
||||
final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext);
|
||||
if (docs != null) {
|
||||
final DocIdSetIterator it = docs.iterator();
|
||||
if (it != null) {
|
||||
while(true) {
|
||||
int doc = scorer.nextDoc();
|
||||
int doc = it.nextDoc();
|
||||
if (doc >= limit)
|
||||
break;
|
||||
|
||||
|
@ -459,8 +457,6 @@ class BufferedDeletesStream {
|
|||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
searcher.close();
|
||||
}
|
||||
|
||||
return delCount;
|
||||
|
|
|
@ -97,9 +97,25 @@ public final class DocumentsWriterFlushControl {
|
|||
// for this assert we must be tolerant to ram buffer changes!
|
||||
maxConfiguredRamBuffer = Math.max(maxRamMB, maxConfiguredRamBuffer);
|
||||
final long ram = flushBytes + activeBytes;
|
||||
final long ramBufferBytes = (long) (maxConfiguredRamBuffer * 1024 * 1024);
|
||||
// take peakDelta into account - worst case is that all flushing, pending and blocked DWPT had maxMem and the last doc had the peakDelta
|
||||
final long expected = (long)(2 * (maxConfiguredRamBuffer * 1024 * 1024)) + ((numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta);
|
||||
assert ram <= expected : "ram was " + ram + " expected: " + expected + " flush mem: " + flushBytes + " active: " + activeBytes + " pending: " + numPending + " flushing: " + numFlushingDWPT() + " blocked: " + numBlockedFlushes() + " peakDelta: " + peakDelta ;
|
||||
final long expected = (long)(2 * (ramBufferBytes)) + ((numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta);
|
||||
if (peakDelta < (ramBufferBytes >> 1)) {
|
||||
/*
|
||||
* if we are indexing with very low maxRamBuffer like 0.1MB memory can
|
||||
* easily overflow if we check out some DWPT based on docCount and have
|
||||
* several DWPT in flight indexing large documents (compared to the ram
|
||||
* buffer). This means that those DWPT and their threads will not hit
|
||||
* the stall control before asserting the memory which would in turn
|
||||
* fail. To prevent this we only assert if the the largest document seen
|
||||
* is smaller than the 1/2 of the maxRamBufferMB
|
||||
*/
|
||||
assert ram <= expected : "ram was " + ram + " expected: " + expected
|
||||
+ " flush mem: " + flushBytes + " activeMem: " + activeBytes
|
||||
+ " pendingMem: " + numPending + " flushingMem: "
|
||||
+ numFlushingDWPT() + " blockedMem: " + numBlockedFlushes()
|
||||
+ " peakDeltaMem: " + peakDelta;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -60,6 +60,11 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
|
|||
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SlowMultiReaderWrapper(" + in + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
return MultiFields.getFields(in);
|
||||
|
|
|
@ -92,7 +92,7 @@ public class PreFlexFields extends FieldsProducer {
|
|||
|
||||
// make sure that all index files have been read or are kept open
|
||||
// so that if an index update removes them we'll still have them
|
||||
freqStream = dir.openInput(info.name + ".frq", readBufferSize);
|
||||
freqStream = dir.openInput(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.FREQ_EXTENSION), readBufferSize);
|
||||
boolean anyProx = false;
|
||||
for (FieldInfo fi : fieldInfos) {
|
||||
if (fi.isIndexed) {
|
||||
|
@ -105,7 +105,7 @@ public class PreFlexFields extends FieldsProducer {
|
|||
}
|
||||
|
||||
if (anyProx) {
|
||||
proxStream = dir.openInput(info.name + ".prx", readBufferSize);
|
||||
proxStream = dir.openInput(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.PROX_EXTENSION), readBufferSize);
|
||||
} else {
|
||||
proxStream = null;
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ import org.apache.lucene.util.packed.PackedInts;
|
|||
*
|
||||
* @lucene.experimental
|
||||
*/
|
||||
public abstract class FieldComparator {
|
||||
public abstract class FieldComparator<T> {
|
||||
|
||||
/**
|
||||
* Compare hit at slot1 with hit at slot2.
|
||||
|
@ -176,13 +176,21 @@ public abstract class FieldComparator {
|
|||
* Return the actual value in the slot.
|
||||
*
|
||||
* @param slot the value
|
||||
* @return value in this slot upgraded to Comparable
|
||||
* @return value in this slot
|
||||
*/
|
||||
public abstract Comparable<?> value(int slot);
|
||||
public abstract T value(int slot);
|
||||
|
||||
|
||||
/** Returns -1 if first is less than second. Default
|
||||
* impl to assume the type implements Comparable and
|
||||
* invoke .compareTo; be sure to override this method if
|
||||
* your FieldComparator's type isn't a Comparable or
|
||||
* if your values may sometimes be null */
|
||||
@SuppressWarnings("unchecked")
|
||||
public int compareValues(T first, T second) {
|
||||
return ((Comparable<T>) first).compareTo(second);
|
||||
}
|
||||
|
||||
public static abstract class NumericComparator<T extends CachedArray> extends FieldComparator {
|
||||
public static abstract class NumericComparator<T extends CachedArray, U extends Number> extends FieldComparator<U> {
|
||||
protected final CachedArrayCreator<T> creator;
|
||||
protected T cached;
|
||||
protected final boolean checkMissing;
|
||||
|
@ -203,7 +211,7 @@ public abstract class FieldComparator {
|
|||
|
||||
/** Parses field's values as byte (using {@link
|
||||
* FieldCache#getBytes} and sorts by ascending value */
|
||||
public static final class ByteComparator extends NumericComparator<ByteValues> {
|
||||
public static final class ByteComparator extends NumericComparator<ByteValues,Byte> {
|
||||
private byte[] docValues;
|
||||
private final byte[] values;
|
||||
private final byte missingValue;
|
||||
|
@ -252,7 +260,7 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Byte value(int slot) {
|
||||
return Byte.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
@ -260,13 +268,12 @@ public abstract class FieldComparator {
|
|||
|
||||
/** Parses field's values as double (using {@link
|
||||
* FieldCache#getDoubles} and sorts by ascending value */
|
||||
public static final class DoubleComparator extends NumericComparator<DoubleValues> {
|
||||
public static final class DoubleComparator extends NumericComparator<DoubleValues,Double> {
|
||||
private double[] docValues;
|
||||
private final double[] values;
|
||||
private final double missingValue;
|
||||
private double bottom;
|
||||
|
||||
|
||||
DoubleComparator(int numHits, DoubleValuesCreator creator, Double missingValue ) {
|
||||
super( creator, missingValue != null );
|
||||
values = new double[numHits];
|
||||
|
@ -324,13 +331,13 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Double value(int slot) {
|
||||
return Double.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Uses float index values to sort by ascending value */
|
||||
public static final class FloatDocValuesComparator extends FieldComparator {
|
||||
public static final class FloatDocValuesComparator extends FieldComparator<Double> {
|
||||
private final double[] values;
|
||||
private Source currentReaderValues;
|
||||
private final String field;
|
||||
|
@ -386,14 +393,14 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<Double> value(int slot) {
|
||||
public Double value(int slot) {
|
||||
return Double.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Parses field's values as float (using {@link
|
||||
* FieldCache#getFloats} and sorts by ascending value */
|
||||
public static final class FloatComparator extends NumericComparator<FloatValues> {
|
||||
public static final class FloatComparator extends NumericComparator<FloatValues,Float> {
|
||||
private float[] docValues;
|
||||
private final float[] values;
|
||||
private final float missingValue;
|
||||
|
@ -460,14 +467,14 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Float value(int slot) {
|
||||
return Float.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Parses field's values as short (using {@link
|
||||
* FieldCache#getShorts} and sorts by ascending value */
|
||||
public static final class ShortComparator extends NumericComparator<ShortValues> {
|
||||
public static final class ShortComparator extends NumericComparator<ShortValues,Short> {
|
||||
private short[] docValues;
|
||||
private final short[] values;
|
||||
private short bottom;
|
||||
|
@ -516,14 +523,14 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Short value(int slot) {
|
||||
return Short.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Parses field's values as int (using {@link
|
||||
* FieldCache#getInts} and sorts by ascending value */
|
||||
public static final class IntComparator extends NumericComparator<IntValues> {
|
||||
public static final class IntComparator extends NumericComparator<IntValues,Integer> {
|
||||
private int[] docValues;
|
||||
private final int[] values;
|
||||
private int bottom; // Value of bottom of queue
|
||||
|
@ -594,13 +601,13 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return Integer.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Loads int index values and sorts by ascending value. */
|
||||
public static final class IntDocValuesComparator extends FieldComparator {
|
||||
public static final class IntDocValuesComparator extends FieldComparator<Long> {
|
||||
private final long[] values;
|
||||
private Source currentReaderValues;
|
||||
private final String field;
|
||||
|
@ -660,14 +667,14 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<Long> value(int slot) {
|
||||
public Long value(int slot) {
|
||||
return Long.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
||||
/** Parses field's values as long (using {@link
|
||||
* FieldCache#getLongs} and sorts by ascending value */
|
||||
public static final class LongComparator extends NumericComparator<LongValues> {
|
||||
public static final class LongComparator extends NumericComparator<LongValues,Long> {
|
||||
private long[] docValues;
|
||||
private final long[] values;
|
||||
private long bottom;
|
||||
|
@ -735,7 +742,7 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Long value(int slot) {
|
||||
return Long.valueOf(values[slot]);
|
||||
}
|
||||
}
|
||||
|
@ -746,7 +753,7 @@ public abstract class FieldComparator {
|
|||
* using {@link TopScoreDocCollector} directly (which {@link
|
||||
* IndexSearcher#search} uses when no {@link Sort} is
|
||||
* specified). */
|
||||
public static final class RelevanceComparator extends FieldComparator {
|
||||
public static final class RelevanceComparator extends FieldComparator<Float> {
|
||||
private final float[] scores;
|
||||
private float bottom;
|
||||
private Scorer scorer;
|
||||
|
@ -791,15 +798,21 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Float value(int slot) {
|
||||
return Float.valueOf(scores[slot]);
|
||||
}
|
||||
|
||||
// Override because we sort reverse of natural Float order:
|
||||
@Override
|
||||
public int compareValues(Float first, Float second) {
|
||||
// Reversed intentionally because relevance by default
|
||||
// sorts descending:
|
||||
return second.compareTo(first);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/** Sorts by ascending docID */
|
||||
public static final class DocComparator extends FieldComparator {
|
||||
public static final class DocComparator extends FieldComparator<Integer> {
|
||||
private final int[] docIDs;
|
||||
private int docBase;
|
||||
private int bottom;
|
||||
|
@ -840,7 +853,7 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return Integer.valueOf(docIDs[slot]);
|
||||
}
|
||||
}
|
||||
|
@ -854,7 +867,7 @@ public abstract class FieldComparator {
|
|||
* to large results, this comparator will be much faster
|
||||
* than {@link TermValComparator}. For very small
|
||||
* result sets it may be slower. */
|
||||
public static final class TermOrdValComparator extends FieldComparator {
|
||||
public static final class TermOrdValComparator extends FieldComparator<BytesRef> {
|
||||
/** @lucene.internal */
|
||||
final int[] ords;
|
||||
/** @lucene.internal */
|
||||
|
@ -920,7 +933,7 @@ public abstract class FieldComparator {
|
|||
* the underlying array access when looking up doc->ord
|
||||
* @lucene.internal
|
||||
*/
|
||||
abstract class PerSegmentComparator extends FieldComparator {
|
||||
abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
|
||||
|
||||
@Override
|
||||
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
|
||||
|
@ -938,9 +951,22 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
return TermOrdValComparator.this.value(slot);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareValues(BytesRef val1, BytesRef val2) {
|
||||
if (val1 == null) {
|
||||
if (val2 == null) {
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
} else if (val2 == null) {
|
||||
return 1;
|
||||
}
|
||||
return val1.compareTo(val2);
|
||||
}
|
||||
}
|
||||
|
||||
// Used per-segment when bit width of doc->ord is 8:
|
||||
|
@ -1244,7 +1270,7 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
}
|
||||
|
@ -1253,7 +1279,7 @@ public abstract class FieldComparator {
|
|||
* comparisons are done using BytesRef.compareTo, which is
|
||||
* slow for medium to large result sets but possibly
|
||||
* very fast for very small results sets. */
|
||||
public static final class TermValComparator extends FieldComparator {
|
||||
public static final class TermValComparator extends FieldComparator<BytesRef> {
|
||||
|
||||
private BytesRef[] values;
|
||||
private DocTerms docTerms;
|
||||
|
@ -1316,9 +1342,22 @@ public abstract class FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareValues(BytesRef val1, BytesRef val2) {
|
||||
if (val1 == null) {
|
||||
if (val2 == null) {
|
||||
return 0;
|
||||
}
|
||||
return -1;
|
||||
} else if (val2 == null) {
|
||||
return 1;
|
||||
}
|
||||
return val1.compareTo(val2);
|
||||
}
|
||||
}
|
||||
|
||||
final protected static int binarySearch(BytesRef br, DocTermsIndex a, BytesRef key) {
|
||||
|
|
|
@ -40,24 +40,31 @@ public class FieldDoc extends ScoreDoc {
|
|||
|
||||
/** Expert: The values which are used to sort the referenced document.
|
||||
* The order of these will match the original sort criteria given by a
|
||||
* Sort object. Each Object will be either an Integer, Float or String,
|
||||
* depending on the type of values in the terms of the original field.
|
||||
* Sort object. Each Object will have been returned from
|
||||
* the <code>value</code> method corresponding
|
||||
* FieldComparator used to sort this field.
|
||||
* @see Sort
|
||||
* @see IndexSearcher#search(Query,Filter,int,Sort)
|
||||
*/
|
||||
public Comparable[] fields;
|
||||
public Object[] fields;
|
||||
|
||||
/** Expert: Creates one of these objects with empty sort information. */
|
||||
public FieldDoc (int doc, float score) {
|
||||
public FieldDoc(int doc, float score) {
|
||||
super (doc, score);
|
||||
}
|
||||
|
||||
/** Expert: Creates one of these objects with the given sort information. */
|
||||
public FieldDoc (int doc, float score, Comparable[] fields) {
|
||||
public FieldDoc(int doc, float score, Object[] fields) {
|
||||
super (doc, score);
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
/** Expert: Creates one of these objects with the given sort information. */
|
||||
public FieldDoc(int doc, float score, Object[] fields, int shardIndex) {
|
||||
super (doc, score, shardIndex);
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
// A convenience method for debugging.
|
||||
@Override
|
||||
public String toString() {
|
||||
|
|
|
@ -200,7 +200,7 @@ public abstract class FieldValueHitQueue extends PriorityQueue<FieldValueHitQueu
|
|||
*/
|
||||
FieldDoc fillFields(final Entry entry) {
|
||||
final int n = comparators.length;
|
||||
final Comparable<?>[] fields = new Comparable[n];
|
||||
final Object[] fields = new Object[n];
|
||||
for (int i = 0; i < n; ++i) {
|
||||
fields[i] = comparators[i].value(entry.slot);
|
||||
}
|
||||
|
|
|
@ -289,7 +289,7 @@ public class IndexSearcher {
|
|||
*/
|
||||
public TopDocs search(Query query, Filter filter, int n)
|
||||
throws IOException {
|
||||
return search(createWeight(query), filter, n);
|
||||
return search(createNormalizedWeight(query), filter, n);
|
||||
}
|
||||
|
||||
/** Lower-level search API.
|
||||
|
@ -310,7 +310,7 @@ public class IndexSearcher {
|
|||
*/
|
||||
public void search(Query query, Filter filter, Collector results)
|
||||
throws IOException {
|
||||
search(leafContexts, createWeight(query), filter, results);
|
||||
search(leafContexts, createNormalizedWeight(query), filter, results);
|
||||
}
|
||||
|
||||
/** Lower-level search API.
|
||||
|
@ -328,7 +328,7 @@ public class IndexSearcher {
|
|||
*/
|
||||
public void search(Query query, Collector results)
|
||||
throws IOException {
|
||||
search(leafContexts, createWeight(query), null, results);
|
||||
search(leafContexts, createNormalizedWeight(query), null, results);
|
||||
}
|
||||
|
||||
/** Search implementation with arbitrary sorting. Finds
|
||||
|
@ -344,7 +344,7 @@ public class IndexSearcher {
|
|||
*/
|
||||
public TopFieldDocs search(Query query, Filter filter, int n,
|
||||
Sort sort) throws IOException {
|
||||
return search(createWeight(query), filter, n, sort);
|
||||
return search(createNormalizedWeight(query), filter, n, sort);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -357,7 +357,7 @@ public class IndexSearcher {
|
|||
*/
|
||||
public TopFieldDocs search(Query query, int n,
|
||||
Sort sort) throws IOException {
|
||||
return search(createWeight(query), null, n, sort);
|
||||
return search(createNormalizedWeight(query), null, n, sort);
|
||||
}
|
||||
|
||||
/** Expert: Low-level search implementation. Finds the top <code>n</code>
|
||||
|
@ -443,7 +443,7 @@ public class IndexSearcher {
|
|||
* Collector)}.</p>
|
||||
*/
|
||||
protected TopFieldDocs search(Weight weight, Filter filter, int nDocs,
|
||||
Sort sort, boolean fillFields)
|
||||
Sort sort, boolean fillFields)
|
||||
throws IOException {
|
||||
|
||||
if (sort == null) throw new NullPointerException();
|
||||
|
@ -623,7 +623,7 @@ public class IndexSearcher {
|
|||
* entire index.
|
||||
*/
|
||||
public Explanation explain(Query query, int doc) throws IOException {
|
||||
return explain(createWeight(query), doc);
|
||||
return explain(createNormalizedWeight(query), doc);
|
||||
}
|
||||
|
||||
/** Expert: low-level implementation method
|
||||
|
@ -665,13 +665,23 @@ public class IndexSearcher {
|
|||
}
|
||||
|
||||
/**
|
||||
* creates a weight for <code>query</code>
|
||||
* @return new weight
|
||||
* Creates a normalized weight for a top-level {@link Query}.
|
||||
* The query is rewritten by this method and {@link Query#createWeight} called,
|
||||
* afterwards the {@link Weight} is normalized. The returned {@code Weight}
|
||||
* can then directly be used to get a {@link Scorer}.
|
||||
* @lucene.internal
|
||||
*/
|
||||
protected Weight createWeight(Query query) throws IOException {
|
||||
return query.weight(this);
|
||||
public Weight createNormalizedWeight(Query query) throws IOException {
|
||||
query = rewrite(query);
|
||||
Weight weight = query.createWeight(this);
|
||||
float sum = weight.sumOfSquaredWeights();
|
||||
float norm = getSimilarityProvider().queryNorm(sum);
|
||||
if (Float.isInfinite(norm) || Float.isNaN(norm))
|
||||
norm = 1.0f;
|
||||
weight.normalize(norm);
|
||||
return weight;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns this searchers the top-level {@link ReaderContext}.
|
||||
* @see IndexReader#getTopReaderContext()
|
||||
|
|
|
@ -91,21 +91,6 @@ public abstract class Query implements Cloneable {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Expert: Constructs and initializes a Weight for a top-level query.
|
||||
*/
|
||||
public Weight weight(IndexSearcher searcher) throws IOException {
|
||||
Query query = searcher.rewrite(this);
|
||||
Weight weight = query.createWeight(searcher);
|
||||
float sum = weight.sumOfSquaredWeights();
|
||||
float norm = searcher.getSimilarityProvider().queryNorm(sum);
|
||||
if (Float.isInfinite(norm) || Float.isNaN(norm))
|
||||
norm = 1.0f;
|
||||
weight.normalize(norm);
|
||||
return weight;
|
||||
}
|
||||
|
||||
|
||||
/** Expert: called to re-write queries into primitive queries. For example,
|
||||
* a PrefixQuery will be rewritten into a BooleanQuery that consists
|
||||
* of TermQuerys.
|
||||
|
|
|
@ -52,7 +52,7 @@ public class QueryWrapperFilter extends Filter {
|
|||
// get a private context that is used to rewrite, createWeight and score eventually
|
||||
assert context.reader.getTopReaderContext().isAtomic;
|
||||
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
|
||||
final Weight weight = query.weight(new IndexSearcher(privateContext));
|
||||
final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
|
||||
return new DocIdSet() {
|
||||
@Override
|
||||
public DocIdSetIterator iterator() throws IOException {
|
||||
|
|
|
@ -17,21 +17,30 @@ package org.apache.lucene.search;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/** Expert: Returned by low-level search implementations.
|
||||
* @see TopDocs */
|
||||
/** Holds one hit in {@link TopDocs}. */
|
||||
|
||||
public class ScoreDoc {
|
||||
/** Expert: The score of this document for the query. */
|
||||
|
||||
/** The score of this document for the query. */
|
||||
public float score;
|
||||
|
||||
/** Expert: A hit document's number.
|
||||
* @see IndexSearcher#doc(int)
|
||||
*/
|
||||
/** A hit document's number.
|
||||
* @see IndexSearcher#doc(int) */
|
||||
public int doc;
|
||||
|
||||
/** Expert: Constructs a ScoreDoc. */
|
||||
/** Only set by {@link TopDocs#merge} */
|
||||
public int shardIndex;
|
||||
|
||||
/** Constructs a ScoreDoc. */
|
||||
public ScoreDoc(int doc, float score) {
|
||||
this(doc, score, -1);
|
||||
}
|
||||
|
||||
/** Constructs a ScoreDoc. */
|
||||
public ScoreDoc(int doc, float score, int shardIndex) {
|
||||
this.doc = doc;
|
||||
this.score = score;
|
||||
this.shardIndex = shardIndex;
|
||||
}
|
||||
|
||||
// A convenience method for debugging.
|
||||
|
@ -39,5 +48,4 @@ public class ScoreDoc {
|
|||
public String toString() {
|
||||
return "doc=" + doc + " score=" + score;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -91,10 +91,10 @@ public class SortField {
|
|||
public static final int BYTES = 12;
|
||||
|
||||
/** Represents sorting by document score (relevance). */
|
||||
public static final SortField FIELD_SCORE = new SortField (null, SCORE);
|
||||
public static final SortField FIELD_SCORE = new SortField(null, SCORE);
|
||||
|
||||
/** Represents sorting by document number (index order). */
|
||||
public static final SortField FIELD_DOC = new SortField (null, DOC);
|
||||
public static final SortField FIELD_DOC = new SortField(null, DOC);
|
||||
|
||||
private String field;
|
||||
private int type; // defaults to determining type dynamically
|
||||
|
@ -111,7 +111,7 @@ public class SortField {
|
|||
* <code>type</code> is SCORE or DOC.
|
||||
* @param type Type of values in the terms.
|
||||
*/
|
||||
public SortField (String field, int type) {
|
||||
public SortField(String field, int type) {
|
||||
initFieldType(field, type);
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ public class SortField {
|
|||
* @param type Type of values in the terms.
|
||||
* @param reverse True if natural order should be reversed.
|
||||
*/
|
||||
public SortField (String field, int type, boolean reverse) {
|
||||
public SortField(String field, int type, boolean reverse) {
|
||||
initFieldType(field, type);
|
||||
this.reverse = reverse;
|
||||
}
|
||||
|
@ -140,7 +140,7 @@ public class SortField {
|
|||
* @deprecated (4.0) use EntryCreator version
|
||||
*/
|
||||
@Deprecated
|
||||
public SortField (String field, FieldCache.Parser parser) {
|
||||
public SortField(String field, FieldCache.Parser parser) {
|
||||
this(field, parser, false);
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ public class SortField {
|
|||
* @deprecated (4.0) use EntryCreator version
|
||||
*/
|
||||
@Deprecated
|
||||
public SortField (String field, FieldCache.Parser parser, boolean reverse) {
|
||||
public SortField(String field, FieldCache.Parser parser, boolean reverse) {
|
||||
if (field == null) {
|
||||
throw new IllegalArgumentException("field can only be null when type is SCORE or DOC");
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ public class SortField {
|
|||
* @param field Name of field to sort by; cannot be <code>null</code>.
|
||||
* @param comparator Returns a comparator for sorting hits.
|
||||
*/
|
||||
public SortField (String field, FieldComparatorSource comparator) {
|
||||
public SortField(String field, FieldComparatorSource comparator) {
|
||||
initFieldType(field, CUSTOM);
|
||||
this.comparatorSource = comparator;
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ public class SortField {
|
|||
* @param comparator Returns a comparator for sorting hits.
|
||||
* @param reverse True if natural order should be reversed.
|
||||
*/
|
||||
public SortField (String field, FieldComparatorSource comparator, boolean reverse) {
|
||||
public SortField(String field, FieldComparatorSource comparator, boolean reverse) {
|
||||
initFieldType(field, CUSTOM);
|
||||
this.reverse = reverse;
|
||||
this.comparatorSource = comparator;
|
||||
|
|
|
@ -89,7 +89,7 @@ public class TermQuery extends Query {
|
|||
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||
final String field = term.field();
|
||||
final IndexReader reader = context.reader;
|
||||
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight is not the same as the current reader's top-reader";
|
||||
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
|
||||
final TermState state = termStates
|
||||
.get(context.ord);
|
||||
if (state == null) { // term is not present in that reader
|
||||
|
|
|
@ -17,15 +17,21 @@ package org.apache.lucene.search;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
|
||||
/** Represents hits returned by {@link
|
||||
* IndexSearcher#search(Query,Filter,int)} and {@link
|
||||
* IndexSearcher#search(Query,int)}. */
|
||||
public class TopDocs {
|
||||
/** The total number of hits for the query.
|
||||
*/
|
||||
|
||||
/** The total number of hits for the query. */
|
||||
public int totalHits;
|
||||
|
||||
/** The top hits for the query. */
|
||||
public ScoreDoc[] scoreDocs;
|
||||
|
||||
/** Stores the maximum score value encountered, needed for normalizing. */
|
||||
private float maxScore;
|
||||
|
||||
|
@ -34,12 +40,12 @@ public class TopDocs {
|
|||
* scores are not tracked, this returns {@link Float#NaN}.
|
||||
*/
|
||||
public float getMaxScore() {
|
||||
return maxScore;
|
||||
return maxScore;
|
||||
}
|
||||
|
||||
/** Sets the maximum score value encountered. */
|
||||
public void setMaxScore(float maxScore) {
|
||||
this.maxScore=maxScore;
|
||||
this.maxScore=maxScore;
|
||||
}
|
||||
|
||||
/** Constructs a TopDocs with a default maxScore=Float.NaN. */
|
||||
|
@ -52,4 +58,199 @@ public class TopDocs {
|
|||
this.scoreDocs = scoreDocs;
|
||||
this.maxScore = maxScore;
|
||||
}
|
||||
|
||||
// Refers to one hit:
|
||||
private static class ShardRef {
|
||||
// Which shard (index into shardHits[]):
|
||||
final int shardIndex;
|
||||
|
||||
// Which hit within the shard:
|
||||
int hitIndex;
|
||||
|
||||
public ShardRef(int shardIndex) {
|
||||
this.shardIndex = shardIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")";
|
||||
}
|
||||
};
|
||||
|
||||
// Specialized MergeSortQueue that just merges by
|
||||
// relevance score, descending:
|
||||
private static class ScoreMergeSortQueue extends PriorityQueue<ShardRef> {
|
||||
final ScoreDoc[][] shardHits;
|
||||
|
||||
public ScoreMergeSortQueue(TopDocs[] shardHits) {
|
||||
super(shardHits.length);
|
||||
this.shardHits = new ScoreDoc[shardHits.length][];
|
||||
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
|
||||
this.shardHits[shardIDX] = shardHits[shardIDX].scoreDocs;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if first is < second
|
||||
public boolean lessThan(ShardRef first, ShardRef second) {
|
||||
assert first != second;
|
||||
final float firstScore = shardHits[first.shardIndex][first.hitIndex].score;
|
||||
final float secondScore = shardHits[second.shardIndex][second.hitIndex].score;
|
||||
|
||||
if (firstScore < secondScore) {
|
||||
return false;
|
||||
} else if (firstScore > secondScore) {
|
||||
return true;
|
||||
} else {
|
||||
// Tie break: earlier shard wins
|
||||
if (first.shardIndex < second.shardIndex) {
|
||||
return true;
|
||||
} else if (first.shardIndex > second.shardIndex) {
|
||||
return false;
|
||||
} else {
|
||||
// Tie break in same shard: resolve however the
|
||||
// shard had resolved it:
|
||||
assert first.hitIndex != second.hitIndex;
|
||||
return first.hitIndex < second.hitIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class MergeSortQueue extends PriorityQueue<ShardRef> {
|
||||
// These are really FieldDoc instances:
|
||||
final ScoreDoc[][] shardHits;
|
||||
final FieldComparator[] comparators;
|
||||
final int[] reverseMul;
|
||||
|
||||
public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException {
|
||||
super(shardHits.length);
|
||||
this.shardHits = new ScoreDoc[shardHits.length][];
|
||||
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
|
||||
final ScoreDoc[] shard = shardHits[shardIDX].scoreDocs;
|
||||
//System.out.println(" init shardIdx=" + shardIDX + " hits=" + shard);
|
||||
if (shard != null) {
|
||||
this.shardHits[shardIDX] = shard;
|
||||
// Fail gracefully if API is misused:
|
||||
for(int hitIDX=0;hitIDX<shard.length;hitIDX++) {
|
||||
final ScoreDoc sd = shard[hitIDX];
|
||||
if (!(sd instanceof FieldDoc)) {
|
||||
throw new IllegalArgumentException("shard " + shardIDX + " was not sorted by the provided Sort (expected FieldDoc but got ScoreDoc)");
|
||||
}
|
||||
final FieldDoc fd = (FieldDoc) sd;
|
||||
if (fd.fields == null) {
|
||||
throw new IllegalArgumentException("shard " + shardIDX + " did not set sort field values (FieldDoc.fields is null); you must pass fillFields=true to IndexSearcher.search on each shard");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final SortField[] sortFields = sort.getSort();
|
||||
comparators = new FieldComparator[sortFields.length];
|
||||
reverseMul = new int[sortFields.length];
|
||||
for(int compIDX=0;compIDX<sortFields.length;compIDX++) {
|
||||
final SortField sortField = sortFields[compIDX];
|
||||
comparators[compIDX] = sortField.getComparator(1, compIDX);
|
||||
reverseMul[compIDX] = sortField.getReverse() ? -1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if first is < second
|
||||
@SuppressWarnings("unchecked")
|
||||
public boolean lessThan(ShardRef first, ShardRef second) {
|
||||
assert first != second;
|
||||
final FieldDoc firstFD = (FieldDoc) shardHits[first.shardIndex][first.hitIndex];
|
||||
final FieldDoc secondFD = (FieldDoc) shardHits[second.shardIndex][second.hitIndex];
|
||||
//System.out.println(" lessThan:\n first=" + first + " doc=" + firstFD.doc + " score=" + firstFD.score + "\n second=" + second + " doc=" + secondFD.doc + " score=" + secondFD.score);
|
||||
|
||||
for(int compIDX=0;compIDX<comparators.length;compIDX++) {
|
||||
final FieldComparator comp = comparators[compIDX];
|
||||
//System.out.println(" cmp idx=" + compIDX + " cmp1=" + firstFD.fields[compIDX] + " cmp2=" + secondFD.fields[compIDX] + " reverse=" + reverseMul[compIDX]);
|
||||
|
||||
final int cmp = reverseMul[compIDX] * comp.compareValues(firstFD.fields[compIDX], secondFD.fields[compIDX]);
|
||||
|
||||
if (cmp != 0) {
|
||||
//System.out.println(" return " + (cmp < 0));
|
||||
return cmp < 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Tie break: earlier shard wins
|
||||
if (first.shardIndex < second.shardIndex) {
|
||||
//System.out.println(" return tb true");
|
||||
return true;
|
||||
} else if (first.shardIndex > second.shardIndex) {
|
||||
//System.out.println(" return tb false");
|
||||
return false;
|
||||
} else {
|
||||
// Tie break in same shard: resolve however the
|
||||
// shard had resolved it:
|
||||
//System.out.println(" return tb " + (first.hitIndex < second.hitIndex));
|
||||
assert first.hitIndex != second.hitIndex;
|
||||
return first.hitIndex < second.hitIndex;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Returns a new TopDocs, containing topN results across
|
||||
* the provided TopDocs, sorting by the specified {@link
|
||||
* Sort}. Each of the TopDocs must have been sorted by
|
||||
* the same Sort, and sort field values must have been
|
||||
* filled (ie, <code>fillFields=true</code> must be
|
||||
* passed to {@link
|
||||
* TopFieldCollector#create}.
|
||||
*
|
||||
* <p>Pass sort=null to merge sort by score descending.
|
||||
*
|
||||
* @lucene.experimental */
|
||||
public static TopDocs merge(Sort sort, int topN, TopDocs[] shardHits) throws IOException {
|
||||
|
||||
final PriorityQueue<ShardRef> queue;
|
||||
if (sort == null) {
|
||||
queue = new ScoreMergeSortQueue(shardHits);
|
||||
} else {
|
||||
queue = new MergeSortQueue(sort, shardHits);
|
||||
}
|
||||
|
||||
int totalHitCount = 0;
|
||||
float maxScore = Float.MIN_VALUE;
|
||||
for(int shardIDX=0;shardIDX<shardHits.length;shardIDX++) {
|
||||
final TopDocs shard = shardHits[shardIDX];
|
||||
if (shard.scoreDocs != null && shard.scoreDocs.length > 0) {
|
||||
totalHitCount += shard.totalHits;
|
||||
queue.add(new ShardRef(shardIDX));
|
||||
maxScore = Math.max(maxScore, shard.getMaxScore());
|
||||
//System.out.println(" maxScore now " + maxScore + " vs " + shard.getMaxScore());
|
||||
}
|
||||
}
|
||||
|
||||
final ScoreDoc[] hits = new ScoreDoc[Math.min(topN, totalHitCount)];
|
||||
|
||||
int hitUpto = 0;
|
||||
while(hitUpto < hits.length) {
|
||||
assert queue.size() > 0;
|
||||
ShardRef ref = queue.pop();
|
||||
final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex++];
|
||||
if (sort == null) {
|
||||
hits[hitUpto] = new ScoreDoc(hit.doc, hit.score, ref.shardIndex);
|
||||
} else {
|
||||
hits[hitUpto] = new FieldDoc(hit.doc, hit.score, ((FieldDoc) hit).fields, ref.shardIndex);
|
||||
}
|
||||
|
||||
//System.out.println(" hitUpto=" + hitUpto);
|
||||
//System.out.println(" doc=" + hits[hitUpto].doc + " score=" + hits[hitUpto].score);
|
||||
|
||||
hitUpto++;
|
||||
|
||||
if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) {
|
||||
// Not done with this these TopDocs yet:
|
||||
queue.add(ref);
|
||||
}
|
||||
}
|
||||
|
||||
if (sort == null) {
|
||||
return new TopDocs(totalHitCount, hits, maxScore);
|
||||
} else {
|
||||
return new TopFieldDocs(totalHitCount, hits, sort.getSort(), maxScore);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ public class CustomScoreQuery extends Query {
|
|||
boolean qStrict;
|
||||
|
||||
public CustomWeight(IndexSearcher searcher) throws IOException {
|
||||
this.subQueryWeight = subQuery.weight(searcher);
|
||||
this.subQueryWeight = subQuery.createWeight(searcher);
|
||||
this.valSrcWeights = new Weight[valSrcQueries.length];
|
||||
for(int i = 0; i < valSrcQueries.length; i++) {
|
||||
this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher);
|
||||
|
|
|
@ -308,16 +308,24 @@ public class RandomIndexWriter implements Closeable {
|
|||
return getReader(true);
|
||||
}
|
||||
|
||||
private boolean doRandomOptimize = true;
|
||||
|
||||
public void setDoRandomOptimize(boolean v) {
|
||||
doRandomOptimize = v;
|
||||
}
|
||||
|
||||
private void doRandomOptimize() throws IOException {
|
||||
final int segCount = w.getSegmentCount();
|
||||
if (r.nextBoolean() || segCount == 0) {
|
||||
// full optimize
|
||||
w.optimize();
|
||||
} else {
|
||||
// partial optimize
|
||||
final int limit = _TestUtil.nextInt(r, 1, segCount);
|
||||
w.optimize(limit);
|
||||
assert w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
|
||||
if (doRandomOptimize) {
|
||||
final int segCount = w.getSegmentCount();
|
||||
if (r.nextBoolean() || segCount == 0) {
|
||||
// full optimize
|
||||
w.optimize();
|
||||
} else {
|
||||
// partial optimize
|
||||
final int limit = _TestUtil.nextInt(r, 1, segCount);
|
||||
w.optimize(limit);
|
||||
assert w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
|
||||
}
|
||||
}
|
||||
switchDoDocValues();
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
|
||||
import org.apache.lucene.index.codecs.preflex.TermInfo;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
@ -102,7 +104,9 @@ final class TermInfosWriter implements Closeable {
|
|||
}
|
||||
|
||||
try {
|
||||
directory.deleteFile(segment + (isIndex ? ".tii" : ".tis"));
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "",
|
||||
(isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
|
||||
: PreFlexCodec.TERMS_EXTENSION)));
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
|
@ -119,7 +123,9 @@ final class TermInfosWriter implements Closeable {
|
|||
indexInterval = interval;
|
||||
fieldInfos = fis;
|
||||
isIndex = isi;
|
||||
output = directory.createOutput(segment + (isIndex ? ".tii" : ".tis"));
|
||||
output = directory.createOutput(IndexFileNames.segmentFileName(segment, "",
|
||||
(isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
|
||||
: PreFlexCodec.TERMS_EXTENSION)));
|
||||
boolean success = false;
|
||||
try {
|
||||
output.writeInt(FORMAT_CURRENT); // write format
|
||||
|
@ -139,7 +145,9 @@ final class TermInfosWriter implements Closeable {
|
|||
}
|
||||
|
||||
try {
|
||||
directory.deleteFile(segment + (isIndex ? ".tii" : ".tis"));
|
||||
directory.deleteFile(IndexFileNames.segmentFileName(segment, "",
|
||||
(isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
|
||||
: PreFlexCodec.TERMS_EXTENSION)));
|
||||
} catch (IOException ignored) {
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
|
||||
import org.apache.lucene.index.IndexReader.ReaderContext;
|
||||
|
||||
/**
|
||||
* Helper class that adds some extra checks to ensure correct
|
||||
* usage of {@code IndexSearcher} and {@code Weight}.
|
||||
* TODO: Extend this by more checks, that's just a start.
|
||||
*/
|
||||
public class AssertingIndexSearcher extends IndexSearcher {
|
||||
public AssertingIndexSearcher(IndexReader r) {
|
||||
super(r);
|
||||
}
|
||||
|
||||
public AssertingIndexSearcher(ReaderContext context) {
|
||||
super(context);
|
||||
}
|
||||
|
||||
public AssertingIndexSearcher(IndexReader r, ExecutorService ex) {
|
||||
super(r, ex);
|
||||
}
|
||||
|
||||
public AssertingIndexSearcher(ReaderContext context, ExecutorService ex) {
|
||||
super(context, ex);
|
||||
}
|
||||
|
||||
/** Ensures, that the returned {@code Weight} is not normalized again, which may produce wrong scores. */
|
||||
@Override
|
||||
public Weight createNormalizedWeight(Query query) throws IOException {
|
||||
final Weight w = super.createNormalizedWeight(query);
|
||||
return new Weight() {
|
||||
@Override
|
||||
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
|
||||
return w.explain(context, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery() {
|
||||
return w.getQuery();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getValue() {
|
||||
return w.getValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void normalize(float norm) {
|
||||
throw new IllegalStateException("Weight already normalized.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
|
||||
return w.scorer(context, scorerContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float sumOfSquaredWeights() throws IOException {
|
||||
throw new IllegalStateException("Weight already normalized.");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean scoresDocsOutOfOrder() {
|
||||
return w.scoresDocsOutOfOrder();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -198,7 +198,7 @@ public class QueryUtils {
|
|||
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
|
||||
//System.out.println("Checking "+q);
|
||||
final AtomicReaderContext[] readerContextArray = ReaderUtil.leaves(s.getTopReaderContext());
|
||||
if (q.weight(s).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
|
||||
if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
|
||||
|
||||
final int skip_op = 0;
|
||||
final int next_op = 1;
|
||||
|
@ -241,7 +241,7 @@ public class QueryUtils {
|
|||
lastDoc[0] = doc;
|
||||
try {
|
||||
if (scorer == null) {
|
||||
Weight w = q.weight(s);
|
||||
Weight w = s.createNormalizedWeight(q);
|
||||
scorer = w.scorer(readerContextArray[leafPtr], ScorerContext.def());
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ public class QueryUtils {
|
|||
if (lastReader[0] != null) {
|
||||
final IndexReader previousReader = lastReader[0];
|
||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||
Weight w = q.weight(indexSearcher);
|
||||
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||
if (scorer != null) {
|
||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||
|
@ -312,7 +312,7 @@ public class QueryUtils {
|
|||
// previous reader, hits NO_MORE_DOCS
|
||||
final IndexReader previousReader = lastReader[0];
|
||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
|
||||
Weight w = q.weight(indexSearcher);
|
||||
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||
Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def());
|
||||
if (scorer != null) {
|
||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||
|
@ -343,7 +343,7 @@ public class QueryUtils {
|
|||
try {
|
||||
long startMS = System.currentTimeMillis();
|
||||
for (int i=lastDoc[0]+1; i<=doc; i++) {
|
||||
Weight w = q.weight(s);
|
||||
Weight w = s.createNormalizedWeight(q);
|
||||
Scorer scorer = w.scorer(context[leafPtr], ScorerContext.def());
|
||||
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
|
||||
|
@ -370,7 +370,7 @@ public class QueryUtils {
|
|||
if (lastReader[0] != null) {
|
||||
final IndexReader previousReader = lastReader[0];
|
||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||
Weight w = q.weight(indexSearcher);
|
||||
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||
if (scorer != null) {
|
||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||
|
@ -394,7 +394,7 @@ public class QueryUtils {
|
|||
// previous reader, hits NO_MORE_DOCS
|
||||
final IndexReader previousReader = lastReader[0];
|
||||
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
|
||||
Weight w = q.weight(indexSearcher);
|
||||
Weight w = indexSearcher.createNormalizedWeight(q);
|
||||
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
|
||||
if (scorer != null) {
|
||||
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.FieldCache;
|
||||
import org.apache.lucene.search.FieldCache.CacheEntry;
|
||||
import org.apache.lucene.search.AssertingIndexSearcher;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
|
@ -1231,13 +1232,11 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* with one that returns null for getSequentialSubReaders.
|
||||
*/
|
||||
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
if (maybeWrap && rarely()) {
|
||||
return new IndexSearcher(new SlowMultiReaderWrapper(r));
|
||||
} else {
|
||||
return new IndexSearcher(r);
|
||||
r = new SlowMultiReaderWrapper(r);
|
||||
}
|
||||
return random.nextBoolean() ? new AssertingIndexSearcher(r) : new AssertingIndexSearcher(r.getTopReaderContext());
|
||||
} else {
|
||||
int threads = 0;
|
||||
final ExecutorService ex = (random.nextBoolean()) ? null
|
||||
|
@ -1246,20 +1245,31 @@ public abstract class LuceneTestCase extends Assert {
|
|||
if (ex != null && VERBOSE) {
|
||||
System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
|
||||
}
|
||||
return new IndexSearcher(r.getTopReaderContext(), ex) {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
if (ex != null) {
|
||||
ex.shutdown();
|
||||
try {
|
||||
ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return random.nextBoolean() ?
|
||||
new AssertingIndexSearcher(r, ex) {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
shutdownExecutorService(ex);
|
||||
}
|
||||
}
|
||||
};
|
||||
} : new AssertingIndexSearcher(r.getTopReaderContext(), ex) {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
shutdownExecutorService(ex);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
static void shutdownExecutorService(ExecutorService ex) {
|
||||
if (ex != null) {
|
||||
ex.shutdown();
|
||||
try {
|
||||
ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,10 +27,10 @@ import java.io.OutputStream;
|
|||
import java.io.PrintStream;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Enumeration;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.Map;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipFile;
|
||||
|
||||
|
@ -46,6 +46,9 @@ import org.apache.lucene.index.MergeScheduler;
|
|||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.apache.lucene.index.codecs.Codec;
|
||||
import org.apache.lucene.index.codecs.CodecProvider;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.junit.Assert;
|
||||
|
||||
|
@ -468,4 +471,24 @@ public class _TestUtil {
|
|||
newName.append(suffix);
|
||||
return new File(directory, newName.toString());
|
||||
}
|
||||
|
||||
public static void assertEquals(TopDocs expected, TopDocs actual) {
|
||||
Assert.assertEquals("wrong total hits", expected.totalHits, actual.totalHits);
|
||||
Assert.assertEquals("wrong maxScore", expected.getMaxScore(), actual.getMaxScore(), 0.0);
|
||||
Assert.assertEquals("wrong hit count", expected.scoreDocs.length, actual.scoreDocs.length);
|
||||
for(int hitIDX=0;hitIDX<expected.scoreDocs.length;hitIDX++) {
|
||||
final ScoreDoc expectedSD = expected.scoreDocs[hitIDX];
|
||||
final ScoreDoc actualSD = actual.scoreDocs[hitIDX];
|
||||
Assert.assertEquals("wrong hit docID", expectedSD.doc, actualSD.doc);
|
||||
Assert.assertEquals("wrong hit score", expectedSD.score, actualSD.score, 0.0);
|
||||
if (expectedSD instanceof FieldDoc) {
|
||||
Assert.assertTrue(actualSD instanceof FieldDoc);
|
||||
Assert.assertEquals("wrong sort field values",
|
||||
((FieldDoc) expectedSD).fields,
|
||||
((FieldDoc) actualSD).fields);
|
||||
} else {
|
||||
Assert.assertFalse(actualSD instanceof FieldDoc);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1375,6 +1375,11 @@ public class TestIndexWriter extends LuceneTestCase {
|
|||
IndexReader r2 = r.reopen();
|
||||
assertTrue(r != r2);
|
||||
files = Arrays.asList(dir.listAll());
|
||||
|
||||
// NOTE: here we rely on "Windows" behavior, ie, even
|
||||
// though IW wanted to delete _0.cfs since it was
|
||||
// optimized away, because we have a reader open
|
||||
// against this file, it should still be here:
|
||||
assertTrue(files.contains("_0.cfs"));
|
||||
// optimize created this
|
||||
//assertTrue(files.contains("_2.cfs"));
|
||||
|
|
|
@ -405,30 +405,30 @@ public class TestNRTThreads extends LuceneTestCase {
|
|||
for(int thread=0;thread<NUM_SEARCH_THREADS;thread++) {
|
||||
searchThreads[thread] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
public void run() {
|
||||
try {
|
||||
TermsEnum termsEnum = MultiFields.getTerms(s.getIndexReader(), "body").iterator();
|
||||
int seenTermCount = 0;
|
||||
int shift;
|
||||
int trigger;
|
||||
if (totTermCount.get() == 0) {
|
||||
if (totTermCount.get() < 10) {
|
||||
shift = 0;
|
||||
trigger = 1;
|
||||
} else {
|
||||
shift = random.nextInt(totTermCount.get()/10);
|
||||
trigger = totTermCount.get()/10;
|
||||
shift = random.nextInt(trigger);
|
||||
}
|
||||
while(System.currentTimeMillis() < searchStopTime) {
|
||||
BytesRef term = termsEnum.next();
|
||||
if (term == null) {
|
||||
if (seenTermCount == 0) {
|
||||
if (seenTermCount < 10) {
|
||||
break;
|
||||
}
|
||||
totTermCount.set(seenTermCount);
|
||||
seenTermCount = 0;
|
||||
trigger = totTermCount.get()/10;
|
||||
//System.out.println("trigger " + trigger);
|
||||
shift = random.nextInt(totTermCount.get()/10);
|
||||
shift = random.nextInt(trigger);
|
||||
termsEnum.seek(new BytesRef(""));
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ final class JustCompileSearch {
|
|||
|
||||
}
|
||||
|
||||
static final class JustCompileFieldComparator extends FieldComparator {
|
||||
static final class JustCompileFieldComparator extends FieldComparator<Object> {
|
||||
|
||||
@Override
|
||||
public int compare(int slot1, int slot2) {
|
||||
|
@ -132,10 +132,10 @@ final class JustCompileSearch {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Object value(int slot) {
|
||||
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
static final class JustCompileFieldComparatorSource extends FieldComparatorSource {
|
||||
|
|
|
@ -173,7 +173,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
|
|||
|
||||
QueryUtils.check(random, dq, s);
|
||||
assertTrue(s.getTopReaderContext().isAtomic);
|
||||
final Weight dw = dq.weight(s);
|
||||
final Weight dw = s.createNormalizedWeight(dq);
|
||||
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
||||
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
|
||||
if (skipOk) {
|
||||
|
@ -188,7 +188,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
|
|||
dq.add(tq("dek", "DOES_NOT_EXIST"));
|
||||
assertTrue(s.getTopReaderContext().isAtomic);
|
||||
QueryUtils.check(random, dq, s);
|
||||
final Weight dw = dq.weight(s);
|
||||
final Weight dw = s.createNormalizedWeight(dq);
|
||||
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
|
||||
assertTrue("firsttime skipTo found no match",
|
||||
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
|
||||
|
|
|
@ -139,7 +139,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
|
||||
@Override
|
||||
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator() {
|
||||
return new FieldComparator<Integer>() {
|
||||
|
||||
FieldCache.DocTermsIndex idIndex;
|
||||
private final int[] values = new int[numHits];
|
||||
|
@ -184,7 +184,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return Integer.valueOf(values[slot]);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -511,7 +511,7 @@ public class TestSort extends LuceneTestCase {
|
|||
assertMatches (empty, queryX, sort, "");
|
||||
}
|
||||
|
||||
static class MyFieldComparator extends FieldComparator {
|
||||
static class MyFieldComparator extends FieldComparator<Integer> {
|
||||
int[] docValues;
|
||||
int[] slotValues;
|
||||
int bottomValue;
|
||||
|
@ -527,6 +527,7 @@ public class TestSort extends LuceneTestCase {
|
|||
|
||||
@Override
|
||||
public int compare(int slot1, int slot2) {
|
||||
// values are small enough that overflow won't happen
|
||||
return slotValues[slot1] - slotValues[slot2];
|
||||
}
|
||||
|
||||
|
@ -553,7 +554,7 @@ public class TestSort extends LuceneTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return Integer.valueOf(slotValues[slot]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TestTermScorer extends LuceneTestCase {
|
|||
Term allTerm = new Term(FIELD, "all");
|
||||
TermQuery termQuery = new TermQuery(allTerm);
|
||||
|
||||
Weight weight = termQuery.weight(indexSearcher);
|
||||
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||
Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||
// we have 2 documents with the term all in them, one document for all the
|
||||
|
@ -134,7 +134,7 @@ public class TestTermScorer extends LuceneTestCase {
|
|||
Term allTerm = new Term(FIELD, "all");
|
||||
TermQuery termQuery = new TermQuery(allTerm);
|
||||
|
||||
Weight weight = termQuery.weight(indexSearcher);
|
||||
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||
assertTrue("next did not return a doc",
|
||||
|
@ -152,7 +152,7 @@ public class TestTermScorer extends LuceneTestCase {
|
|||
Term allTerm = new Term(FIELD, "all");
|
||||
TermQuery termQuery = new TermQuery(allTerm);
|
||||
|
||||
Weight weight = termQuery.weight(indexSearcher);
|
||||
Weight weight = indexSearcher.createNormalizedWeight(termQuery);
|
||||
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
|
||||
|
||||
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
package org.apache.lucene.search;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
public class TestTopDocsMerge extends LuceneTestCase {
|
||||
|
||||
private static class ShardSearcher extends IndexSearcher {
|
||||
private final IndexReader.AtomicReaderContext[] ctx;
|
||||
|
||||
public ShardSearcher(IndexReader.AtomicReaderContext ctx, IndexReader.ReaderContext parent) {
|
||||
super(parent);
|
||||
this.ctx = new IndexReader.AtomicReaderContext[] {ctx};
|
||||
}
|
||||
|
||||
public void search(Weight weight, Collector collector) throws IOException {
|
||||
search(ctx, weight, null, collector);
|
||||
}
|
||||
|
||||
public TopDocs search(Weight weight, int topN) throws IOException {
|
||||
return search(ctx, weight, null, topN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardSearcher(" + ctx[0] + ")";
|
||||
}
|
||||
}
|
||||
|
||||
public void testSort() throws Exception {
|
||||
|
||||
IndexReader reader = null;
|
||||
Directory dir = null;
|
||||
|
||||
final int numDocs = atLeast(1000);
|
||||
//final int numDocs = atLeast(50);
|
||||
|
||||
final String[] tokens = new String[] {"a", "b", "c", "d", "e"};
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: make index");
|
||||
}
|
||||
|
||||
{
|
||||
dir = newDirectory();
|
||||
final RandomIndexWriter w = new RandomIndexWriter(random, dir);
|
||||
// w.setDoRandomOptimize(false);
|
||||
|
||||
// w.w.getConfig().setMaxBufferedDocs(atLeast(100));
|
||||
|
||||
final String[] content = new String[atLeast(20)];
|
||||
|
||||
for(int contentIDX=0;contentIDX<content.length;contentIDX++) {
|
||||
final StringBuilder sb = new StringBuilder();
|
||||
final int numTokens = _TestUtil.nextInt(random, 1, 10);
|
||||
for(int tokenIDX=0;tokenIDX<numTokens;tokenIDX++) {
|
||||
sb.append(tokens[random.nextInt(tokens.length)]).append(' ');
|
||||
}
|
||||
content[contentIDX] = sb.toString();
|
||||
}
|
||||
|
||||
for(int docIDX=0;docIDX<numDocs;docIDX++) {
|
||||
final Document doc = new Document();
|
||||
doc.add(newField("string", _TestUtil.randomRealisticUnicodeString(random), Field.Index.NOT_ANALYZED));
|
||||
doc.add(newField("text", content[random.nextInt(content.length)], Field.Index.ANALYZED));
|
||||
doc.add(new NumericField("float").setFloatValue(random.nextFloat()));
|
||||
final int intValue;
|
||||
if (random.nextInt(100) == 17) {
|
||||
intValue = Integer.MIN_VALUE;
|
||||
} else if (random.nextInt(100) == 17) {
|
||||
intValue = Integer.MAX_VALUE;
|
||||
} else {
|
||||
intValue = random.nextInt();
|
||||
}
|
||||
doc.add(new NumericField("int").setIntValue(intValue));
|
||||
if (VERBOSE) {
|
||||
System.out.println(" doc=" + doc);
|
||||
}
|
||||
w.addDocument(doc);
|
||||
}
|
||||
|
||||
reader = w.getReader();
|
||||
w.close();
|
||||
}
|
||||
|
||||
// NOTE: sometimes reader has just one segment, which is
|
||||
// important to test
|
||||
final IndexSearcher searcher = newSearcher(reader);
|
||||
IndexReader[] subReaders = searcher.getIndexReader().getSequentialSubReaders();
|
||||
if (subReaders == null) {
|
||||
subReaders = new IndexReader[] {searcher.getIndexReader()};
|
||||
}
|
||||
final ShardSearcher[] subSearchers = new ShardSearcher[subReaders.length];
|
||||
final IndexReader.ReaderContext ctx = searcher.getTopReaderContext();
|
||||
|
||||
if (ctx instanceof IndexReader.AtomicReaderContext) {
|
||||
assert subSearchers.length == 1;
|
||||
subSearchers[0] = new ShardSearcher((IndexReader.AtomicReaderContext) ctx, ctx);
|
||||
} else {
|
||||
final IndexReader.CompositeReaderContext compCTX = (IndexReader.CompositeReaderContext) ctx;
|
||||
for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
|
||||
subSearchers[searcherIDX] = new ShardSearcher(compCTX.leaves[searcherIDX], compCTX);
|
||||
}
|
||||
}
|
||||
|
||||
final List<SortField> sortFields = new ArrayList<SortField>();
|
||||
sortFields.add(new SortField("string", SortField.STRING, true));
|
||||
sortFields.add(new SortField("string", SortField.STRING, false));
|
||||
sortFields.add(new SortField("int", SortField.INT, true));
|
||||
sortFields.add(new SortField("int", SortField.INT, false));
|
||||
sortFields.add(new SortField("float", SortField.FLOAT, true));
|
||||
sortFields.add(new SortField("float", SortField.FLOAT, false));
|
||||
sortFields.add(new SortField(null, SortField.SCORE, true));
|
||||
sortFields.add(new SortField(null, SortField.SCORE, false));
|
||||
sortFields.add(new SortField(null, SortField.DOC, true));
|
||||
sortFields.add(new SortField(null, SortField.DOC, false));
|
||||
|
||||
final int[] docStarts = new int[subSearchers.length];
|
||||
int docBase = 0;
|
||||
for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
|
||||
docStarts[subIDX] = docBase;
|
||||
docBase += subReaders[subIDX].maxDoc();
|
||||
//System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
|
||||
}
|
||||
|
||||
for(int iter=0;iter<1000*RANDOM_MULTIPLIER;iter++) {
|
||||
|
||||
// TODO: custom FieldComp...
|
||||
final Query query = new TermQuery(new Term("text", tokens[random.nextInt(tokens.length)]));
|
||||
|
||||
final Sort sort;
|
||||
if (random.nextInt(10) == 4) {
|
||||
// Sort by score
|
||||
sort = null;
|
||||
} else {
|
||||
final SortField[] randomSortFields = new SortField[_TestUtil.nextInt(random, 1, 3)];
|
||||
for(int sortIDX=0;sortIDX<randomSortFields.length;sortIDX++) {
|
||||
randomSortFields[sortIDX] = sortFields.get(random.nextInt(sortFields.size()));
|
||||
}
|
||||
sort = new Sort(randomSortFields);
|
||||
}
|
||||
|
||||
final int numHits = _TestUtil.nextInt(random, 1, numDocs+5);
|
||||
//final int numHits = 5;
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: search query=" + query + " sort=" + sort + " numHits=" + numHits);
|
||||
}
|
||||
|
||||
// First search on whole index:
|
||||
final TopDocs topHits;
|
||||
if (sort == null) {
|
||||
topHits = searcher.search(query, numHits);
|
||||
} else {
|
||||
final TopFieldCollector c = TopFieldCollector.create(sort, numHits, true, true, true, random.nextBoolean());
|
||||
searcher.search(query, c);
|
||||
topHits = c.topDocs(0, numHits);
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" top search: " + topHits.totalHits + " totalHits; hits=" + (topHits.scoreDocs == null ? "null" : topHits.scoreDocs.length));
|
||||
if (topHits.scoreDocs != null) {
|
||||
for(int hitIDX=0;hitIDX<topHits.scoreDocs.length;hitIDX++) {
|
||||
final ScoreDoc sd = topHits.scoreDocs[hitIDX];
|
||||
System.out.println(" doc=" + sd.doc + " score=" + sd.score);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ... then all shards:
|
||||
final Weight w = searcher.createNormalizedWeight(query);
|
||||
|
||||
final TopDocs[] shardHits = new TopDocs[subSearchers.length];
|
||||
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
|
||||
final TopDocs subHits;
|
||||
final ShardSearcher subSearcher = subSearchers[shardIDX];
|
||||
if (sort == null) {
|
||||
subHits = subSearcher.search(w, numHits);
|
||||
} else {
|
||||
final TopFieldCollector c = TopFieldCollector.create(sort, numHits, true, true, true, random.nextBoolean());
|
||||
subSearcher.search(w, c);
|
||||
subHits = c.topDocs(0, numHits);
|
||||
}
|
||||
|
||||
shardHits[shardIDX] = subHits;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" shard=" + shardIDX + " " + subHits.totalHits + " totalHits hits=" + (subHits.scoreDocs == null ? "null" : subHits.scoreDocs.length));
|
||||
if (subHits.scoreDocs != null) {
|
||||
for(ScoreDoc sd : subHits.scoreDocs) {
|
||||
System.out.println(" doc=" + sd.doc + " score=" + sd.score);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge:
|
||||
final TopDocs mergedHits = TopDocs.merge(sort, numHits, shardHits);
|
||||
|
||||
if (mergedHits.scoreDocs != null) {
|
||||
// Make sure the returned shards are correct:
|
||||
for(int hitIDX=0;hitIDX<mergedHits.scoreDocs.length;hitIDX++) {
|
||||
final ScoreDoc sd = mergedHits.scoreDocs[hitIDX];
|
||||
assertEquals("doc=" + sd.doc + " wrong shard",
|
||||
ReaderUtil.subIndex(sd.doc, docStarts),
|
||||
sd.shardIndex);
|
||||
}
|
||||
}
|
||||
|
||||
_TestUtil.assertEquals(topHits, mergedHits);
|
||||
}
|
||||
searcher.close();
|
||||
reader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -170,22 +170,20 @@ public class TestNearSpansOrdered extends LuceneTestCase {
|
|||
*/
|
||||
public void testSpanNearScorerSkipTo1() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
Weight w = q.weight(searcher);
|
||||
Weight w = searcher.createNormalizedWeight(q);
|
||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
Scorer s = w.scorer(leaves[0], ScorerContext.def());
|
||||
assertEquals(1, s.advance(1));
|
||||
}
|
||||
|
||||
/**
|
||||
* not a direct test of NearSpans, but a demonstration of how/when
|
||||
* this causes problems
|
||||
*/
|
||||
public void testSpanNearScorerExplain() throws Exception {
|
||||
SpanNearQuery q = makeQuery();
|
||||
ReaderContext topReaderContext = searcher.getTopReaderContext();
|
||||
AtomicReaderContext[] leaves = ReaderUtil.leaves(topReaderContext);
|
||||
|
||||
Explanation e = q.weight(searcher).explain(leaves[0], 1);
|
||||
Explanation e = searcher.explain(q, 1);
|
||||
assertTrue("Scorer explanation value for doc#1 isn't positive: "
|
||||
+ e.toString(),
|
||||
0.0f < e.getValue());
|
||||
|
|
|
@ -434,7 +434,7 @@ public class TestSpans extends LuceneTestCase {
|
|||
slop,
|
||||
ordered);
|
||||
|
||||
spanScorer = snq.weight(searcher).scorer(leaves[i], ScorerContext.def());
|
||||
spanScorer = searcher.createNormalizedWeight(snq).scorer(leaves[i], ScorerContext.def());
|
||||
} finally {
|
||||
searcher.setSimilarityProvider(oldSim);
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ abstract public class AbstractFirstPassGroupingCollector<GROUP_VALUE_TYPE> exten
|
|||
SearchGroup<GROUP_VALUE_TYPE> searchGroup = new SearchGroup<GROUP_VALUE_TYPE>();
|
||||
searchGroup.groupValue = group.groupValue;
|
||||
if (fillFields) {
|
||||
searchGroup.sortValues = new Comparable[sortFieldCount];
|
||||
searchGroup.sortValues = new Object[sortFieldCount];
|
||||
for(int sortFieldIDX=0;sortFieldIDX<sortFieldCount;sortFieldIDX++) {
|
||||
searchGroup.sortValues[sortFieldIDX] = comparators[sortFieldIDX].value(group.comparatorSlot);
|
||||
}
|
||||
|
|
|
@ -348,7 +348,7 @@ public class BlockGroupingCollector extends Collector {
|
|||
}
|
||||
totalGroupedHitCount += og.count;
|
||||
|
||||
final Comparable[] groupSortValues;
|
||||
final Object[] groupSortValues;
|
||||
|
||||
if (fillSortFields) {
|
||||
groupSortValues = new Comparable[comparators.length];
|
||||
|
|
|
@ -40,13 +40,13 @@ public class GroupDocs<GROUP_VALUE_TYPE> {
|
|||
|
||||
/** Matches the groupSort passed to {@link
|
||||
* AbstractFirstPassGroupingCollector}. */
|
||||
public final Comparable[] groupSortValues;
|
||||
public final Object[] groupSortValues;
|
||||
|
||||
public GroupDocs(float maxScore,
|
||||
int totalHits,
|
||||
ScoreDoc[] scoreDocs,
|
||||
GROUP_VALUE_TYPE groupValue,
|
||||
Comparable[] groupSortValues) {
|
||||
Object[] groupSortValues) {
|
||||
this.maxScore = maxScore;
|
||||
this.totalHits = totalHits;
|
||||
this.scoreDocs = scoreDocs;
|
||||
|
|
|
@ -17,6 +17,14 @@ package org.apache.lucene.search.grouping;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* Represents a group that is found during the first pass search.
|
||||
*
|
||||
|
@ -27,6 +35,287 @@ public class SearchGroup<GROUP_VALUE_TYPE> {
|
|||
/** The value that defines this group */
|
||||
public GROUP_VALUE_TYPE groupValue;
|
||||
|
||||
/** The sort values used during sorting. Can be <code>null</code>. */
|
||||
public Comparable[] sortValues;
|
||||
/** The sort values used during sorting. These are the
|
||||
* groupSort field values of the highest rank document
|
||||
* (by the groupSort) within the group. Can be
|
||||
* <code>null</code> if <code>fillFields=false</code> had
|
||||
* been passed to {@link AbstractFirstPassGroupingCollector#getTopGroups} */
|
||||
public Object[] sortValues;
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return("SearchGroup(groupValue=" + groupValue + " sortValues=" + Arrays.toString(sortValues) + ")");
|
||||
}
|
||||
|
||||
private static class ShardIter<T> {
|
||||
public final Iterator<SearchGroup<T>> iter;
|
||||
public final int shardIndex;
|
||||
|
||||
public ShardIter(Collection<SearchGroup<T>> shard, int shardIndex) {
|
||||
this.shardIndex = shardIndex;
|
||||
iter = shard.iterator();
|
||||
assert iter.hasNext();
|
||||
}
|
||||
|
||||
public SearchGroup<T> next() {
|
||||
assert iter.hasNext();
|
||||
final SearchGroup<T> group = iter.next();
|
||||
if (group.sortValues == null) {
|
||||
throw new IllegalArgumentException("group.sortValues is null; you must pass fillFields=true to the first pass collector");
|
||||
}
|
||||
return group;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardIter(shard=" + shardIndex + ")";
|
||||
}
|
||||
}
|
||||
|
||||
// Holds all shards currently on the same group
|
||||
private static class MergedGroup<T> {
|
||||
|
||||
// groupValue may be null!
|
||||
public final T groupValue;
|
||||
|
||||
public Object[] topValues;
|
||||
public final List<ShardIter<T>> shards = new ArrayList<ShardIter<T>>();
|
||||
public int minShardIndex;
|
||||
public boolean processed;
|
||||
public boolean inQueue;
|
||||
|
||||
public MergedGroup(T groupValue) {
|
||||
this.groupValue = groupValue;
|
||||
}
|
||||
|
||||
// Only for assert
|
||||
private boolean neverEquals(Object _other) {
|
||||
if (_other instanceof MergedGroup) {
|
||||
MergedGroup other = (MergedGroup) _other;
|
||||
if (groupValue == null) {
|
||||
assert other.groupValue != null;
|
||||
} else {
|
||||
assert !groupValue.equals(other.groupValue);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object _other) {
|
||||
// We never have another MergedGroup instance with
|
||||
// same groupValue
|
||||
assert neverEquals(_other);
|
||||
|
||||
if (_other instanceof MergedGroup) {
|
||||
MergedGroup other = (MergedGroup) _other;
|
||||
if (groupValue == null) {
|
||||
return other == null;
|
||||
} else {
|
||||
return groupValue.equals(other);
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
if (groupValue == null) {
|
||||
return 0;
|
||||
} else {
|
||||
return groupValue.hashCode();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class GroupComparator<T> implements Comparator<MergedGroup<T>> {
|
||||
|
||||
public final FieldComparator[] comparators;
|
||||
public final int[] reversed;
|
||||
|
||||
public GroupComparator(Sort groupSort) throws IOException {
|
||||
final SortField[] sortFields = groupSort.getSort();
|
||||
comparators = new FieldComparator[sortFields.length];
|
||||
reversed = new int[sortFields.length];
|
||||
for (int compIDX = 0; compIDX < sortFields.length; compIDX++) {
|
||||
final SortField sortField = sortFields[compIDX];
|
||||
comparators[compIDX] = sortField.getComparator(1, compIDX);
|
||||
reversed[compIDX] = sortField.getReverse() ? -1 : 1;
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public int compare(MergedGroup<T> group, MergedGroup<T> other) {
|
||||
if (group == other) {
|
||||
return 0;
|
||||
}
|
||||
//System.out.println("compare group=" + group + " other=" + other);
|
||||
final Object[] groupValues = group.topValues;
|
||||
final Object[] otherValues = other.topValues;
|
||||
//System.out.println(" groupValues=" + groupValues + " otherValues=" + otherValues);
|
||||
for (int compIDX = 0;compIDX < comparators.length; compIDX++) {
|
||||
final int c = reversed[compIDX] * comparators[compIDX].compareValues(groupValues[compIDX],
|
||||
otherValues[compIDX]);
|
||||
if (c != 0) {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
// Tie break by min shard index:
|
||||
assert group.minShardIndex != other.minShardIndex;
|
||||
return group.minShardIndex - other.minShardIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private static class GroupMerger<T> {
|
||||
|
||||
private final GroupComparator<T> groupComp;
|
||||
private final SortedSet<MergedGroup<T>> queue;
|
||||
private final Map<T,MergedGroup<T>> groupsSeen;
|
||||
|
||||
public GroupMerger(Sort groupSort) throws IOException {
|
||||
groupComp = new GroupComparator<T>(groupSort);
|
||||
queue = new TreeSet<MergedGroup<T>>(groupComp);
|
||||
groupsSeen = new HashMap<T,MergedGroup<T>>();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void updateNextGroup(int topN, ShardIter<T> shard) {
|
||||
while(shard.iter.hasNext()) {
|
||||
final SearchGroup<T> group = shard.next();
|
||||
MergedGroup<T> mergedGroup = groupsSeen.get(group.groupValue);
|
||||
final boolean isNew = mergedGroup == null;
|
||||
//System.out.println(" next group=" + (group.groupValue == null ? "null" : ((BytesRef) group.groupValue).utf8ToString()) + " sort=" + Arrays.toString(group.sortValues));
|
||||
|
||||
if (isNew) {
|
||||
// Start a new group:
|
||||
//System.out.println(" new");
|
||||
mergedGroup = new MergedGroup<T>(group.groupValue);
|
||||
mergedGroup.minShardIndex = shard.shardIndex;
|
||||
assert group.sortValues != null;
|
||||
mergedGroup.topValues = group.sortValues;
|
||||
groupsSeen.put(group.groupValue, mergedGroup);
|
||||
mergedGroup.inQueue = true;
|
||||
queue.add(mergedGroup);
|
||||
} else if (mergedGroup.processed) {
|
||||
// This shard produced a group that we already
|
||||
// processed; move on to next group...
|
||||
continue;
|
||||
} else {
|
||||
//System.out.println(" old");
|
||||
boolean competes = false;
|
||||
for(int compIDX=0;compIDX<groupComp.comparators.length;compIDX++) {
|
||||
final int cmp = groupComp.reversed[compIDX] * groupComp.comparators[compIDX].compareValues(group.sortValues[compIDX],
|
||||
mergedGroup.topValues[compIDX]);
|
||||
if (cmp < 0) {
|
||||
// Definitely competes
|
||||
competes = true;
|
||||
break;
|
||||
} else if (cmp > 0) {
|
||||
// Definitely does not compete
|
||||
break;
|
||||
} else if (compIDX == groupComp.comparators.length-1) {
|
||||
if (shard.shardIndex < mergedGroup.minShardIndex) {
|
||||
competes = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//System.out.println(" competes=" + competes);
|
||||
|
||||
if (competes) {
|
||||
// Group's sort changed -- remove & re-insert
|
||||
if (mergedGroup.inQueue) {
|
||||
queue.remove(mergedGroup);
|
||||
}
|
||||
mergedGroup.topValues = group.sortValues;
|
||||
mergedGroup.minShardIndex = shard.shardIndex;
|
||||
queue.add(mergedGroup);
|
||||
mergedGroup.inQueue = true;
|
||||
}
|
||||
}
|
||||
|
||||
mergedGroup.shards.add(shard);
|
||||
break;
|
||||
}
|
||||
|
||||
// Prune un-competitive groups:
|
||||
while(queue.size() > topN) {
|
||||
// TODO java 1.6: .pollLast
|
||||
final MergedGroup<T> group = queue.last();
|
||||
//System.out.println("PRUNE: " + group);
|
||||
queue.remove(group);
|
||||
group.inQueue = false;
|
||||
}
|
||||
}
|
||||
|
||||
public Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> shards, int offset, int topN) {
|
||||
|
||||
final int maxQueueSize = offset + topN;
|
||||
|
||||
//System.out.println("merge");
|
||||
// Init queue:
|
||||
for(int shardIDX=0;shardIDX<shards.size();shardIDX++) {
|
||||
final Collection<SearchGroup<T>> shard = shards.get(shardIDX);
|
||||
if (!shard.isEmpty()) {
|
||||
//System.out.println(" insert shard=" + shardIDX);
|
||||
updateNextGroup(maxQueueSize, new ShardIter<T>(shard, shardIDX));
|
||||
}
|
||||
}
|
||||
|
||||
// Pull merged topN groups:
|
||||
final List<SearchGroup<T>> newTopGroups = new ArrayList<SearchGroup<T>>();
|
||||
|
||||
int count = 0;
|
||||
|
||||
while(queue.size() != 0) {
|
||||
// TODO Java 1.6: pollFirst()
|
||||
final MergedGroup<T> group = queue.first();
|
||||
queue.remove(group);
|
||||
group.processed = true;
|
||||
//System.out.println(" pop: shards=" + group.shards + " group=" + (group.groupValue == null ? "null" : (((BytesRef) group.groupValue).utf8ToString())) + " sortValues=" + Arrays.toString(group.topValues));
|
||||
if (count++ >= offset) {
|
||||
final SearchGroup<T> newGroup = new SearchGroup<T>();
|
||||
newGroup.groupValue = group.groupValue;
|
||||
newGroup.sortValues = group.topValues;
|
||||
newTopGroups.add(newGroup);
|
||||
if (newTopGroups.size() == topN) {
|
||||
break;
|
||||
}
|
||||
//} else {
|
||||
// System.out.println(" skip < offset");
|
||||
}
|
||||
|
||||
// Advance all iters in this group:
|
||||
for(ShardIter<T> shardIter : group.shards) {
|
||||
updateNextGroup(maxQueueSize, shardIter);
|
||||
}
|
||||
}
|
||||
|
||||
if (newTopGroups.size() == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return newTopGroups;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Merges multiple collections of top groups, for example
|
||||
* obtained from separate index shards. The provided
|
||||
* groupSort must match how the groups were sorted, and
|
||||
* the provided SearchGroups must have been computed
|
||||
* with fillFields=true passed to {@link
|
||||
* AbstractFirstPassGroupingCollector#getTopGroups}.
|
||||
*
|
||||
* <p>NOTE: this returns null if the topGroups is empty.
|
||||
*/
|
||||
public static <T> Collection<SearchGroup<T>> merge(List<Collection<SearchGroup<T>>> topGroups, int offset, int topN, Sort groupSort)
|
||||
throws IOException {
|
||||
if (topGroups.size() == 0) {
|
||||
return null;
|
||||
} else {
|
||||
return new GroupMerger<T>(groupSort).merge(topGroups, offset, topN);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package org.apache.lucene.search.grouping;
|
||||
|
||||
import org.apache.lucene.search.SortField;
|
||||
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -19,6 +17,13 @@ import org.apache.lucene.search.SortField;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
|
||||
/** Represents result returned by a grouping search.
|
||||
*
|
||||
* @lucene.experimental */
|
||||
|
@ -58,4 +63,103 @@ public class TopGroups<GROUP_VALUE_TYPE> {
|
|||
this.groups = oldTopGroups.groups;
|
||||
this.totalGroupCount = totalGroupCount;
|
||||
}
|
||||
|
||||
/** Merges an array of TopGroups, for example obtained
|
||||
* from the second-pass collector across multiple
|
||||
* shards. Each TopGroups must have been sorted by the
|
||||
* same groupSort and docSort, and the top groups passed
|
||||
* to all second-pass collectors must be the same.
|
||||
*
|
||||
* <b>NOTE</b>: this cannot merge totalGroupCount; ie the
|
||||
* returned TopGroups will have null totalGroupCount.
|
||||
*
|
||||
* <b>NOTE</b>: the topDocs in each GroupDocs is actually
|
||||
* an instance of TopDocsAndShards
|
||||
*/
|
||||
public static <T> TopGroups<T> merge(TopGroups<T>[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN)
|
||||
throws IOException {
|
||||
|
||||
//System.out.println("TopGroups.merge");
|
||||
|
||||
if (shardGroups.length == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
int totalHitCount = 0;
|
||||
int totalGroupedHitCount = 0;
|
||||
|
||||
final int numGroups = shardGroups[0].groups.length;
|
||||
for(TopGroups<T> shard : shardGroups) {
|
||||
if (numGroups != shard.groups.length) {
|
||||
throw new IllegalArgumentException("number of groups differs across shards; you must pass same top groups to all shards' second-pass collector");
|
||||
}
|
||||
totalHitCount += shard.totalHitCount;
|
||||
totalGroupedHitCount += shard.totalGroupedHitCount;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
final GroupDocs<T>[] mergedGroupDocs = new GroupDocs[numGroups];
|
||||
|
||||
final TopDocs[] shardTopDocs = new TopDocs[shardGroups.length];
|
||||
|
||||
for(int groupIDX=0;groupIDX<numGroups;groupIDX++) {
|
||||
final T groupValue = shardGroups[0].groups[groupIDX].groupValue;
|
||||
//System.out.println(" merge groupValue=" + groupValue + " sortValues=" + Arrays.toString(shardGroups[0].groups[groupIDX].groupSortValues));
|
||||
float maxScore = Float.MIN_VALUE;
|
||||
int totalHits = 0;
|
||||
for(int shardIDX=0;shardIDX<shardGroups.length;shardIDX++) {
|
||||
//System.out.println(" shard=" + shardIDX);
|
||||
final TopGroups<T> shard = shardGroups[shardIDX];
|
||||
final GroupDocs shardGroupDocs = shard.groups[groupIDX];
|
||||
if (groupValue == null) {
|
||||
if (shardGroupDocs.groupValue != null) {
|
||||
throw new IllegalArgumentException("group values differ across shards; you must pass same top groups to all shards' second-pass collector");
|
||||
}
|
||||
} else if (!groupValue.equals(shardGroupDocs.groupValue)) {
|
||||
throw new IllegalArgumentException("group values differ across shards; you must pass same top groups to all shards' second-pass collector");
|
||||
}
|
||||
|
||||
/*
|
||||
for(ScoreDoc sd : shardGroupDocs.scoreDocs) {
|
||||
System.out.println(" doc=" + sd.doc);
|
||||
}
|
||||
*/
|
||||
|
||||
shardTopDocs[shardIDX] = new TopDocs(shardGroupDocs.totalHits,
|
||||
shardGroupDocs.scoreDocs,
|
||||
shardGroupDocs.maxScore);
|
||||
maxScore = Math.max(maxScore, shardGroupDocs.maxScore);
|
||||
totalHits += shardGroupDocs.totalHits;
|
||||
}
|
||||
|
||||
final TopDocs mergedTopDocs = TopDocs.merge(docSort, docOffset + docTopN, shardTopDocs);
|
||||
|
||||
// Slice;
|
||||
final ScoreDoc[] mergedScoreDocs;
|
||||
if (docOffset == 0) {
|
||||
mergedScoreDocs = mergedTopDocs.scoreDocs;
|
||||
} else if (docOffset >= mergedTopDocs.scoreDocs.length) {
|
||||
mergedScoreDocs = new ScoreDoc[0];
|
||||
} else {
|
||||
mergedScoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length - docOffset];
|
||||
System.arraycopy(mergedTopDocs.scoreDocs,
|
||||
docOffset,
|
||||
mergedScoreDocs,
|
||||
0,
|
||||
mergedTopDocs.scoreDocs.length - docOffset);
|
||||
}
|
||||
//System.out.println("SHARDS=" + Arrays.toString(mergedTopDocs.shardIndex));
|
||||
mergedGroupDocs[groupIDX] = new GroupDocs<T>(maxScore,
|
||||
totalHits,
|
||||
mergedScoreDocs,
|
||||
groupValue,
|
||||
shardGroups[0].groups[groupIDX].groupSortValues);
|
||||
}
|
||||
|
||||
return new TopGroups<T>(groupSort.getSort(),
|
||||
docSort == null ? null : docSort.getSort(),
|
||||
totalHitCount,
|
||||
totalGroupedHitCount,
|
||||
mergedGroupDocs);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
|
||||
package org.apache.lucene.search.grouping;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
|
@ -28,11 +31,9 @@ import org.apache.lucene.search.*;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.ReaderUtil;
|
||||
import org.apache.lucene.util._TestUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
// TODO
|
||||
// - should test relevance sort too
|
||||
// - test null
|
||||
|
@ -244,7 +245,6 @@ public class TestGrouping extends LuceneTestCase {
|
|||
return fields;
|
||||
}
|
||||
|
||||
/*
|
||||
private String groupToString(BytesRef b) {
|
||||
if (b == null) {
|
||||
return "null";
|
||||
|
@ -252,7 +252,6 @@ public class TestGrouping extends LuceneTestCase {
|
|||
return b.utf8ToString();
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
private TopGroups<BytesRef> slowGrouping(GroupDoc[] groupDocs,
|
||||
String searchTerm,
|
||||
|
@ -418,6 +417,38 @@ public class TestGrouping extends LuceneTestCase {
|
|||
return r;
|
||||
}
|
||||
|
||||
private static class ShardState {
|
||||
|
||||
public final ShardSearcher[] subSearchers;
|
||||
public final int[] docStarts;
|
||||
|
||||
public ShardState(IndexSearcher s) {
|
||||
IndexReader[] subReaders = s.getIndexReader().getSequentialSubReaders();
|
||||
if (subReaders == null) {
|
||||
subReaders = new IndexReader[] {s.getIndexReader()};
|
||||
}
|
||||
subSearchers = new ShardSearcher[subReaders.length];
|
||||
final IndexReader.ReaderContext ctx = s.getTopReaderContext();
|
||||
if (ctx instanceof IndexReader.AtomicReaderContext) {
|
||||
assert subSearchers.length == 1;
|
||||
subSearchers[0] = new ShardSearcher((IndexReader.AtomicReaderContext) ctx, ctx);
|
||||
} else {
|
||||
final IndexReader.CompositeReaderContext compCTX = (IndexReader.CompositeReaderContext) ctx;
|
||||
for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
|
||||
subSearchers[searcherIDX] = new ShardSearcher(compCTX.leaves[searcherIDX], compCTX);
|
||||
}
|
||||
}
|
||||
|
||||
docStarts = new int[subSearchers.length];
|
||||
int docBase = 0;
|
||||
for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
|
||||
docStarts[subIDX] = docBase;
|
||||
docBase += subReaders[subIDX].maxDoc();
|
||||
//System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testRandom() throws Exception {
|
||||
for(int iter=0;iter<3;iter++) {
|
||||
|
||||
|
@ -525,7 +556,8 @@ public class TestGrouping extends LuceneTestCase {
|
|||
Directory dir2 = null;
|
||||
|
||||
try {
|
||||
final IndexSearcher s = new IndexSearcher(r);
|
||||
final IndexSearcher s = newSearcher(r);
|
||||
final ShardState shards = new ShardState(s);
|
||||
|
||||
for(int contentID=0;contentID<3;contentID++) {
|
||||
final ScoreDoc[] hits = s.search(new TermQuery(new Term("content", "real"+contentID)), numDocs).scoreDocs;
|
||||
|
@ -549,7 +581,8 @@ public class TestGrouping extends LuceneTestCase {
|
|||
final Filter lastDocInBlock = new CachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("groupend", "x"))));
|
||||
final int[] docIDToID2 = FieldCache.DEFAULT.getInts(r2, "id");
|
||||
|
||||
final IndexSearcher s2 = new IndexSearcher(r2);
|
||||
final IndexSearcher s2 = newSearcher(r2);
|
||||
final ShardState shards2 = new ShardState(s2);
|
||||
|
||||
// Reader2 only increases maxDoc() vs reader, which
|
||||
// means a monotonic shift in scores, so we can
|
||||
|
@ -604,7 +637,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
|
||||
final int topNGroups = _TestUtil.nextInt(random, 1, 30);
|
||||
//final int topNGroups = 4;
|
||||
//final int topNGroups = 10;
|
||||
final int docsPerGroup = _TestUtil.nextInt(random, 1, 50);
|
||||
|
||||
final int groupOffset = _TestUtil.nextInt(random, 0, (topNGroups-1)/2);
|
||||
|
@ -658,7 +691,9 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
s.search(new TermQuery(new Term("content", searchTerm)), c);
|
||||
// Search top reader:
|
||||
final Query q = new TermQuery(new Term("content", searchTerm));
|
||||
s.search(q, c);
|
||||
|
||||
if (doCache && !useWrappingCollector) {
|
||||
if (cCache.isCached()) {
|
||||
|
@ -679,6 +714,18 @@ public class TestGrouping extends LuceneTestCase {
|
|||
|
||||
final Collection<SearchGroup<BytesRef>> topGroups = c1.getTopGroups(groupOffset, fillFields);
|
||||
final TopGroups groupsResult;
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: topGroups:");
|
||||
if (topGroups == null) {
|
||||
System.out.println(" null");
|
||||
} else {
|
||||
for(SearchGroup<BytesRef> groupx : topGroups) {
|
||||
System.out.println(" " + groupToString(groupx.groupValue) + " sort=" + Arrays.toString(groupx.sortValues));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final TopGroups<BytesRef> topGroupsShards = searchShards(s, shards.subSearchers, q, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
|
||||
|
||||
if (topGroups != null) {
|
||||
|
||||
|
@ -734,7 +781,13 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
assertEquals(docIDToID, expectedGroups, groupsResult, true, getScores);
|
||||
assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores);
|
||||
|
||||
// Confirm merged shards match:
|
||||
assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores);
|
||||
if (topGroupsShards != null) {
|
||||
verifyShards(shards.docStarts, topGroupsShards);
|
||||
}
|
||||
|
||||
final boolean needsScores = getScores || getMaxScores || docSort == null;
|
||||
final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
|
||||
|
@ -758,6 +811,8 @@ public class TestGrouping extends LuceneTestCase {
|
|||
groupsResult2 = tempTopGroups2;
|
||||
}
|
||||
|
||||
final TopGroups<BytesRef> topGroupsBlockShards = searchShards(s2, shards2.subSearchers, q, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
|
||||
|
||||
if (expectedGroups != null) {
|
||||
// Fixup scores for reader2
|
||||
for (GroupDocs groupDocsHits : expectedGroups.groups) {
|
||||
|
@ -799,8 +854,11 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
assertEquals(docIDToID2, expectedGroups, groupsResult2, false, getScores);
|
||||
assertEquals(docIDToID2, expectedGroups, groupsResult2, false, true, true, getScores);
|
||||
assertEquals(docIDToID2, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores);
|
||||
}
|
||||
s.close();
|
||||
s2.close();
|
||||
} finally {
|
||||
FieldCache.DEFAULT.purge(r);
|
||||
if (r2 != null) {
|
||||
|
@ -816,7 +874,93 @@ public class TestGrouping extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean testScores) {
|
||||
private void verifyShards(int[] docStarts, TopGroups<BytesRef> topGroups) {
|
||||
for(GroupDocs group : topGroups.groups) {
|
||||
for(int hitIDX=0;hitIDX<group.scoreDocs.length;hitIDX++) {
|
||||
final ScoreDoc sd = group.scoreDocs[hitIDX];
|
||||
assertEquals("doc=" + sd.doc + " wrong shard",
|
||||
ReaderUtil.subIndex(sd.doc, docStarts),
|
||||
sd.shardIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertEquals(Collection<SearchGroup<BytesRef>> groups1, Collection<SearchGroup<BytesRef>> groups2, boolean doSortValues) {
|
||||
assertEquals(groups1.size(), groups2.size());
|
||||
final Iterator<SearchGroup<BytesRef>> iter1 = groups1.iterator();
|
||||
final Iterator<SearchGroup<BytesRef>> iter2 = groups2.iterator();
|
||||
|
||||
while(iter1.hasNext()) {
|
||||
assertTrue(iter2.hasNext());
|
||||
|
||||
SearchGroup<BytesRef> group1 = iter1.next();
|
||||
SearchGroup<BytesRef> group2 = iter2.next();
|
||||
|
||||
assertEquals(group1.groupValue, group2.groupValue);
|
||||
if (doSortValues) {
|
||||
assertEquals(group1.sortValues, group2.sortValues);
|
||||
}
|
||||
}
|
||||
assertFalse(iter2.hasNext());
|
||||
}
|
||||
|
||||
private TopGroups<BytesRef> searchShards(IndexSearcher topSearcher, ShardSearcher[] subSearchers, Query query, Sort groupSort, Sort docSort, int groupOffset, int topNGroups, int docOffset,
|
||||
int topNDocs, boolean getScores, boolean getMaxScores) throws Exception {
|
||||
|
||||
// TODO: swap in caching, all groups collector here
|
||||
// too...
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers));
|
||||
}
|
||||
// Run 1st pass collector to get top groups per shard
|
||||
final Weight w = topSearcher.createNormalizedWeight(query);
|
||||
final List<Collection<SearchGroup<BytesRef>>> shardGroups = new ArrayList<Collection<SearchGroup<BytesRef>>>();
|
||||
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
|
||||
final TermFirstPassGroupingCollector c = new TermFirstPassGroupingCollector("group", groupSort, groupOffset+topNGroups);
|
||||
subSearchers[shardIDX].search(w, c);
|
||||
final Collection<SearchGroup<BytesRef>> topGroups = c.getTopGroups(0, true);
|
||||
if (topGroups != null) {
|
||||
if (VERBOSE) {
|
||||
System.out.println(" shard " + shardIDX + " s=" + subSearchers[shardIDX] + " " + topGroups.size() + " groups:");
|
||||
for(SearchGroup<BytesRef> group : topGroups) {
|
||||
System.out.println(" " + groupToString(group.groupValue) + " sort=" + Arrays.toString(group.sortValues));
|
||||
}
|
||||
}
|
||||
shardGroups.add(topGroups);
|
||||
}
|
||||
}
|
||||
|
||||
final Collection<SearchGroup<BytesRef>> mergedTopGroups = SearchGroup.merge(shardGroups, groupOffset, topNGroups, groupSort);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" merged:");
|
||||
if (mergedTopGroups == null) {
|
||||
System.out.println(" null");
|
||||
} else {
|
||||
for(SearchGroup<BytesRef> group : mergedTopGroups) {
|
||||
System.out.println(" " + groupToString(group.groupValue) + " sort=" + Arrays.toString(group.sortValues));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (mergedTopGroups != null) {
|
||||
|
||||
// Now 2nd pass:
|
||||
@SuppressWarnings("unchecked")
|
||||
final TopGroups<BytesRef>[] shardTopGroups = new TopGroups[subSearchers.length];
|
||||
for(int shardIDX=0;shardIDX<subSearchers.length;shardIDX++) {
|
||||
final TermSecondPassGroupingCollector c = new TermSecondPassGroupingCollector("group", mergedTopGroups, groupSort, docSort,
|
||||
docOffset + topNDocs, getScores, getMaxScores, true);
|
||||
subSearchers[shardIDX].search(w, c);
|
||||
shardTopGroups[shardIDX] = c.getTopGroups(0);
|
||||
}
|
||||
|
||||
return TopGroups.merge(shardTopGroups, groupSort, docSort, docOffset, topNDocs);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores) {
|
||||
if (expected == null) {
|
||||
assertNull(actual);
|
||||
return;
|
||||
|
@ -826,7 +970,7 @@ public class TestGrouping extends LuceneTestCase {
|
|||
assertEquals(expected.groups.length, actual.groups.length);
|
||||
assertEquals(expected.totalHitCount, actual.totalHitCount);
|
||||
assertEquals(expected.totalGroupedHitCount, actual.totalGroupedHitCount);
|
||||
if (expected.totalGroupCount != null) {
|
||||
if (expected.totalGroupCount != null && verifyTotalGroupCount) {
|
||||
assertEquals(expected.totalGroupCount, actual.totalGroupCount);
|
||||
}
|
||||
|
||||
|
@ -839,7 +983,9 @@ public class TestGrouping extends LuceneTestCase {
|
|||
if (verifyGroupValues) {
|
||||
assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
|
||||
}
|
||||
assertArrayEquals(expectedGroup.groupSortValues, actualGroup.groupSortValues);
|
||||
if (verifySortValues) {
|
||||
assertArrayEquals(expectedGroup.groupSortValues, actualGroup.groupSortValues);
|
||||
}
|
||||
|
||||
// TODO
|
||||
// assertEquals(expectedGroup.maxScore, actualGroup.maxScore);
|
||||
|
@ -860,8 +1006,32 @@ public class TestGrouping extends LuceneTestCase {
|
|||
// TODO: too anal for now
|
||||
//assertEquals(Float.NaN, actualFD.score);
|
||||
}
|
||||
assertArrayEquals(expectedFD.fields, actualFD.fields);
|
||||
if (verifySortValues) {
|
||||
assertArrayEquals(expectedFD.fields, actualFD.fields);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class ShardSearcher extends IndexSearcher {
|
||||
private final IndexReader.AtomicReaderContext[] ctx;
|
||||
|
||||
public ShardSearcher(IndexReader.AtomicReaderContext ctx, IndexReader.ReaderContext parent) {
|
||||
super(parent);
|
||||
this.ctx = new IndexReader.AtomicReaderContext[] {ctx};
|
||||
}
|
||||
|
||||
public void search(Weight weight, Collector collector) throws IOException {
|
||||
search(ctx, weight, null, collector);
|
||||
}
|
||||
|
||||
public TopDocs search(Weight weight, int topN) throws IOException {
|
||||
return search(ctx, weight, null, topN);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardSearcher(" + ctx[0] + ")";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -268,7 +268,16 @@ Other Changes
|
|||
|
||||
* SOLR-2576: Remove deprecated SpellingResult.add(Token, int).
|
||||
(James Dyer via rmuir)
|
||||
|
||||
|
||||
* LUCENE-3204: The maven-ant-tasks jar is now included in the source tree;
|
||||
users of the generate-maven-artifacts target no longer have to manually
|
||||
place this jar in the Ant classpath. NOTE: when Ant looks for the
|
||||
maven-ant-tasks jar, it looks first in its pre-existing classpath, so
|
||||
any copies it finds will be used instead of the copy included in the
|
||||
Lucene/Solr source tree. For this reason, it is recommeded to remove
|
||||
any copies of the maven-ant-tasks jar in the Ant classpath, e.g. under
|
||||
~/.ant/lib/ or under the Ant installation's lib/ directory. (Steve Rowe)
|
||||
|
||||
Documentation
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -14,7 +14,8 @@ $Id$
|
|||
|
||||
================== 3.3.0-dev ==============
|
||||
|
||||
(No Changes)
|
||||
* SOLR-2551: Check dataimport.properties for write access (if delta-import is supported
|
||||
in DIH configuration) before starting an import (C S, shalin)
|
||||
|
||||
================== 3.2.0 ==================
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.5
|
||||
*/
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.5
|
||||
*/
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.5
|
||||
*/
|
||||
public class BinURLDataSource extends DataSource<InputStream>{
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class CachedSqlEntityProcessor extends SqlEntityProcessor {
|
||||
|
|
|
@ -33,7 +33,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class ClobTransformer extends Transformer {
|
||||
|
|
|
@ -31,7 +31,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class ContentStreamDataSource extends DataSource<Reader> {
|
||||
|
|
|
@ -35,7 +35,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public abstract class Context {
|
||||
|
|
|
@ -30,7 +30,6 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
* </p>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class ContextImpl extends Context {
|
||||
|
|
|
@ -40,7 +40,6 @@ import java.util.*;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class DataConfig {
|
||||
|
|
|
@ -63,7 +63,6 @@ import org.xml.sax.InputSource;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class DataImportHandler extends RequestHandlerBase implements
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.commons.io.IOUtils;
|
|||
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
import java.io.File;
|
||||
import java.io.StringReader;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.*;
|
||||
|
@ -51,7 +52,6 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class DataImporter {
|
||||
|
@ -85,6 +85,8 @@ public class DataImporter {
|
|||
|
||||
private final Map<String , Object> coreScopeSession;
|
||||
|
||||
private boolean isDeltaImportSupported = false;
|
||||
|
||||
/**
|
||||
* Only for testing purposes
|
||||
*/
|
||||
|
@ -113,7 +115,9 @@ public class DataImporter {
|
|||
initEntity(e, fields, false);
|
||||
verifyWithSchema(fields);
|
||||
identifyPk(e);
|
||||
}
|
||||
if (e.allAttributes.containsKey(SqlEntityProcessor.DELTA_QUERY))
|
||||
isDeltaImportSupported = true;
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyWithSchema(Map<String, DataConfig.Field> fields) {
|
||||
|
@ -350,6 +354,7 @@ public class DataImporter {
|
|||
|
||||
try {
|
||||
docBuilder = new DocBuilder(this, writer, requestParams);
|
||||
checkWritablePersistFile(writer);
|
||||
docBuilder.execute();
|
||||
if (!requestParams.debug)
|
||||
cumulativeStatistics.add(docBuilder.importStatistics);
|
||||
|
@ -364,6 +369,15 @@ public class DataImporter {
|
|||
|
||||
}
|
||||
|
||||
private void checkWritablePersistFile(SolrWriter writer) {
|
||||
File persistFile = writer.getPersistFile();
|
||||
boolean isWritable = persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite();
|
||||
if (isDeltaImportSupported && !isWritable) {
|
||||
throw new DataImportHandlerException(SEVERE, persistFile.getAbsolutePath() +
|
||||
" is not writable. Delta imports are supported by data config but will not work.");
|
||||
}
|
||||
}
|
||||
|
||||
public void doDeltaImport(SolrWriter writer, RequestParams requestParams) {
|
||||
LOG.info("Starting Delta Import");
|
||||
setStatus(Status.RUNNING_DELTA_DUMP);
|
||||
|
@ -371,6 +385,7 @@ public class DataImporter {
|
|||
try {
|
||||
setIndexStartTime(new Date());
|
||||
docBuilder = new DocBuilder(this, writer, requestParams);
|
||||
checkWritablePersistFile(writer);
|
||||
docBuilder.execute();
|
||||
if (!requestParams.debug)
|
||||
cumulativeStatistics.add(docBuilder.importStatistics);
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public abstract class DataSource<T> {
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class DateFormatTransformer extends Transformer {
|
||||
|
|
|
@ -39,7 +39,6 @@ import java.util.Stack;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
class DebugLogger {
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.util.concurrent.*;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class DocBuilder {
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public abstract class EntityProcessor {
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.*;
|
|||
* <p/>
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class EntityProcessorBase extends EntityProcessor {
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Map;
|
|||
/**
|
||||
* A Wrapper over {@link EntityProcessor} instance which performs transforms and handles multi-row outputs correctly.
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class EntityProcessorWrapper extends EntityProcessor {
|
||||
|
|
|
@ -30,7 +30,6 @@ package org.apache.solr.handler.dataimport;
|
|||
* </p>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public abstract class Evaluator {
|
||||
|
|
|
@ -41,7 +41,6 @@ import java.util.regex.Pattern;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class EvaluatorBag {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.apache.solr.handler.dataimport;
|
|||
*
|
||||
* <b>This API is experimental and subject to change</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public interface EventListener {
|
||||
|
|
|
@ -42,7 +42,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* Supports String, BLOB, CLOB data types and there is an extra field (in the entity) 'encoding' for BLOB types
|
||||
*
|
||||
*
|
||||
* @since 1.4
|
||||
*/
|
||||
public class FieldReaderDataSource extends DataSource<Reader> {
|
||||
|
|
|
@ -43,7 +43,6 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
|
|||
* This may be used with any {@link EntityProcessor} which uses a {@link DataSource}<{@link InputStream}> eg: {@link TikaEntityProcessor}
|
||||
* <p/>
|
||||
*
|
||||
*
|
||||
* @since 3.1
|
||||
*/
|
||||
public class FieldStreamDataSource extends DataSource<InputStream> {
|
||||
|
|
|
@ -41,7 +41,6 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class FileDataSource extends DataSource<Reader> {
|
||||
|
|
|
@ -50,7 +50,6 @@ import java.util.regex.Pattern;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
* @see Pattern
|
||||
*/
|
||||
|
|
|
@ -30,7 +30,6 @@ import java.util.Map;
|
|||
* A {@link Transformer} implementation which strip off HTML tags using {@link HTMLStripCharFilter} This is useful
|
||||
* in case you don't need this HTML anyway.
|
||||
*
|
||||
*
|
||||
* @see HTMLStripCharFilter
|
||||
* @since solr 1.4
|
||||
*/
|
||||
|
|
|
@ -30,7 +30,6 @@ package org.apache.solr.handler.dataimport;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
* @deprecated use {@link org.apache.solr.handler.dataimport.URLDataSource} instead
|
||||
*/
|
||||
|
|
|
@ -33,7 +33,6 @@ import java.util.concurrent.Callable;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class JdbcDataSource extends
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.apache.commons.io.IOUtils;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
* @see Pattern
|
||||
*/
|
||||
|
|
|
@ -29,7 +29,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class LogTransformer extends Transformer {
|
||||
|
|
|
@ -28,7 +28,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class MockDataSource extends
|
||||
|
|
|
@ -42,7 +42,6 @@ import java.util.regex.Pattern;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class NumberFormatTransformer extends Transformer {
|
||||
|
|
|
@ -33,7 +33,6 @@ import java.util.Map;
|
|||
* <p>An implementation of {@link EntityProcessor} which reads data from a url/file and give out a row which contains one String
|
||||
* value. The name of the field is 'plainText'.
|
||||
*
|
||||
*
|
||||
* @since solr 1.4
|
||||
*/
|
||||
public class PlainTextEntityProcessor extends EntityProcessorBase {
|
||||
|
|
|
@ -36,7 +36,6 @@ import java.util.regex.Pattern;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
* @see Pattern
|
||||
*/
|
||||
|
|
|
@ -37,7 +37,6 @@ import java.util.Map;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class ScriptTransformer extends Transformer {
|
||||
|
|
|
@ -34,7 +34,6 @@ import java.util.Properties;
|
|||
* <p/>
|
||||
* <b>This API is experimental and may change in the future.</b>
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class SolrWriter {
|
||||
|
@ -100,13 +99,10 @@ public class SolrWriter {
|
|||
|
||||
try {
|
||||
props.putAll(p);
|
||||
String filePath = configDir;
|
||||
if (configDir != null && !configDir.endsWith(File.separator))
|
||||
filePath += File.separator;
|
||||
filePath += persistFilename;
|
||||
propOutput = new FileOutputStream(filePath);
|
||||
File persistFile = getPersistFile();
|
||||
propOutput = new FileOutputStream(persistFile);
|
||||
props.store(propOutput, null);
|
||||
log.info("Wrote last indexed time to " + persistFilename);
|
||||
log.info("Wrote last indexed time to " + persistFile.getAbsolutePath());
|
||||
} catch (FileNotFoundException e) {
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
|
||||
"Unable to persist Index Start Time", e);
|
||||
|
@ -123,6 +119,14 @@ public class SolrWriter {
|
|||
}
|
||||
}
|
||||
|
||||
File getPersistFile() {
|
||||
String filePath = configDir;
|
||||
if (configDir != null && !configDir.endsWith(File.separator))
|
||||
filePath += File.separator;
|
||||
filePath += persistFilename;
|
||||
return new File(filePath);
|
||||
}
|
||||
|
||||
void finish() {
|
||||
try {
|
||||
processor.finish();
|
||||
|
|
|
@ -20,6 +20,8 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -92,7 +94,37 @@ public class TestSqlEntityProcessorDelta extends AbstractDataImportHandlerTestCa
|
|||
public void testCompositePk_FullImport() throws Exception {
|
||||
add1document();
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testNonWritablePersistFile() throws Exception {
|
||||
// See SOLR-2551
|
||||
String configDir = h.getCore().getResourceLoader().getConfigDir();
|
||||
String filePath = configDir;
|
||||
if (configDir != null && !configDir.endsWith(File.separator))
|
||||
filePath += File.separator;
|
||||
filePath += "dataimport.properties";
|
||||
File f = new File(filePath);
|
||||
// execute the test only if we are able to set file to read only mode
|
||||
if ((f.exists() || f.createNewFile()) && f.setReadOnly()) {
|
||||
try {
|
||||
List parentRow = new ArrayList();
|
||||
parentRow.add(createMap("id", "1"));
|
||||
MockDataSource.setIterator(FULLIMPORT_QUERY, parentRow.iterator());
|
||||
|
||||
List childRow = new ArrayList();
|
||||
childRow.add(createMap("desc", "hello"));
|
||||
MockDataSource.setIterator("select * from y where y.A='1'", childRow
|
||||
.iterator());
|
||||
|
||||
runFullImport(dataConfig_delta);
|
||||
assertQ(req("id:1"), "//*[@numFound='0']");
|
||||
} finally {
|
||||
f.setWritable(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WORKS
|
||||
|
||||
@Test
|
||||
|
|
|
@ -24,9 +24,7 @@
|
|||
https://issues.apache.org/jira/browse/SOLR-1167
|
||||
-->
|
||||
<config xmlns:xi="http://www.w3.org/2001/XInclude">
|
||||
|
||||
<luceneMatchVersion>LUCENE_40</luceneMatchVersion>
|
||||
|
||||
<luceneMatchVersion>${tests.luceneMatchVersion:LUCENE_CURRENT}</luceneMatchVersion>
|
||||
<!--
|
||||
lib directives can be used to instruct Solr to load an Jars
|
||||
identified and use them to resolve any "plugins" specified in your
|
||||
|
@ -1064,6 +1062,8 @@
|
|||
</lst>
|
||||
<str name="analysisEngine">/TestExceptionAE.xml</str>
|
||||
<bool name="ignoreErrors">true</bool>
|
||||
<!-- This is optional. It is used for logging when text processing fails. Usually, set uniqueKey field name -->
|
||||
<str name="logField">id</str>
|
||||
<lst name="analyzeFields">
|
||||
<bool name="merge">false</bool>
|
||||
<arr name="fields">
|
||||
|
|
|
@ -476,8 +476,8 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator() {
|
||||
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator<Integer>() {
|
||||
|
||||
FieldCache.DocTermsIndex idIndex;
|
||||
private final int[] values = new int[numHits];
|
||||
|
@ -517,7 +517,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
};
|
||||
|
|
|
@ -59,12 +59,17 @@ public class ValueSourceAugmenter extends DocTransformer
|
|||
|
||||
@Override
|
||||
public void setContext( TransformContext context ) {
|
||||
IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
|
||||
readerContexts = reader.getTopReaderContext().leaves();
|
||||
docValuesArr = new DocValues[readerContexts.length];
|
||||
try {
|
||||
IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
|
||||
readerContexts = reader.getTopReaderContext().leaves();
|
||||
docValuesArr = new DocValues[readerContexts.length];
|
||||
|
||||
searcher = qparser.getReq().getSearcher();
|
||||
this.fcontext = ValueSource.newContext(searcher);
|
||||
searcher = qparser.getReq().getSearcher();
|
||||
fcontext = ValueSource.newContext(searcher);
|
||||
this.valueSource.createWeight(fcontext, searcher);
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -102,8 +102,8 @@ public class RandomSortField extends FieldType {
|
|||
|
||||
private static FieldComparatorSource randomComparatorSource = new FieldComparatorSource() {
|
||||
@Override
|
||||
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator() {
|
||||
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
|
||||
return new FieldComparator<Integer>() {
|
||||
int seed;
|
||||
private final int[] values = new int[numHits];
|
||||
int bottomVal;
|
||||
|
@ -135,7 +135,7 @@ public class RandomSortField extends FieldType {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable value(int slot) {
|
||||
public Integer value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
};
|
||||
|
|
|
@ -56,7 +56,7 @@ public class MissingStringLastComparatorSource extends FieldComparatorSource {
|
|||
|
||||
// Copied from Lucene's TermOrdValComparator and modified since the Lucene version couldn't
|
||||
// be extended.
|
||||
class TermOrdValComparator_SML extends FieldComparator {
|
||||
class TermOrdValComparator_SML extends FieldComparator<BytesRef> {
|
||||
private static final int NULL_ORD = Integer.MAX_VALUE;
|
||||
|
||||
private final int[] ords;
|
||||
|
@ -98,7 +98,7 @@ class TermOrdValComparator_SML extends FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ class TermOrdValComparator_SML extends FieldComparator {
|
|||
// ords) per-segment comparator. NOTE: this is messy;
|
||||
// we do this only because hotspot can't reliably inline
|
||||
// the underlying array access when looking up doc->ord
|
||||
private static abstract class PerSegmentComparator extends FieldComparator {
|
||||
private static abstract class PerSegmentComparator extends FieldComparator<BytesRef> {
|
||||
protected TermOrdValComparator_SML parent;
|
||||
protected final int[] ords;
|
||||
protected final BytesRef[] values;
|
||||
|
@ -199,7 +199,7 @@ class TermOrdValComparator_SML extends FieldComparator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable<?> value(int slot) {
|
||||
public BytesRef value(int slot) {
|
||||
return values==null ? parent.NULL_VAL : values[slot];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ public class BoostedQuery extends Query {
|
|||
|
||||
public BoostedWeight(IndexSearcher searcher) throws IOException {
|
||||
this.searcher = searcher;
|
||||
this.qWeight = q.weight(searcher);
|
||||
this.qWeight = q.createWeight(searcher);
|
||||
this.fcontext = boostVal.newContext(searcher);
|
||||
boostVal.createWeight(fcontext,searcher);
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ public class QueryValueSource extends ValueSource {
|
|||
|
||||
@Override
|
||||
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
|
||||
Weight w = q.weight(searcher);
|
||||
Weight w = searcher.createNormalizedWeight(q);
|
||||
context.put(this, w);
|
||||
}
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ class QueryDocValues extends FloatDocValues {
|
|||
this.q = vs.q;
|
||||
this.fcontext = fcontext;
|
||||
|
||||
Weight w = fcontext==null ? null : (Weight)fcontext.get(q);
|
||||
Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
|
||||
if (w == null) {
|
||||
IndexSearcher weightSearcher;
|
||||
if(fcontext == null) {
|
||||
|
@ -109,7 +109,8 @@ class QueryDocValues extends FloatDocValues {
|
|||
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
|
||||
}
|
||||
}
|
||||
w = q.weight(weightSearcher);
|
||||
vs.createWeight(fcontext, weightSearcher);
|
||||
w = (Weight)fcontext.get(vs);
|
||||
}
|
||||
weight = w;
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ public abstract class ValueSource implements Serializable {
|
|||
* off of the {@link org.apache.solr.search.function.DocValues} for a ValueSource
|
||||
* instead of the normal Lucene FieldComparator that works off of a FieldCache.
|
||||
*/
|
||||
class ValueSourceComparator extends FieldComparator {
|
||||
class ValueSourceComparator extends FieldComparator<Double> {
|
||||
private final double[] values;
|
||||
private DocValues docVals;
|
||||
private double bottom;
|
||||
|
@ -195,7 +195,7 @@ public abstract class ValueSource implements Serializable {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Comparable value(int slot) {
|
||||
public Double value(int slot) {
|
||||
return values[slot];
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue