diff --git a/dev-tools/maven/README.maven b/dev-tools/maven/README.maven
index 65954abf1d8..cbc9f75bdd2 100644
--- a/dev-tools/maven/README.maven
+++ b/dev-tools/maven/README.maven
@@ -21,18 +21,7 @@ A. How to use nightly Jenkins-built Lucene/Solr Maven artifacts
B. How to generate Lucene Maven artifacts
- 1. Prerequisites: JDK 1.5+, Ant 1.7.X, and maven-ant-tasks-2.1.1.jar
-
- In order to generate Maven artifacts for Lucene/Solr, you must first
- download the Maven ant tasks JAR (maven-ant-tasks-2.1.1.jar), e.g.
- from , and add it
- to any one of the following:
-
- a. Your $HOME/.ant/lib/ directory (C:\Users\username\.ant\lib\ under
- Windows Vista/7); or
- b. Your $ANT_HOME/lib/ directory (%ANT_HOME%\lib\ under Windows); or
- c. Your $CLASSPATH (%CLASSPATH% under Windows); or
- d. Your ant commond line: "-lib /path/to/maven-ant-tasks-2.1.1.jar".
+ 1. Prerequisites: JDK 1.5+ and Ant 1.7.X
2. Run the following command from the lucene/ directory:
@@ -47,8 +36,7 @@ B. How to generate Lucene Maven artifacts
C. How to generate Solr Maven artifacts
- 1. Prerequisites: JDK 1.6+; Ant 1.7.X; and maven-ant-tasks-2.1.1.jar
- (see item A.1. above for where to put the Maven ant tasks jar).
+ 1. Prerequisites: JDK 1.6+ and Ant 1.7.X
2. Run the following from the solr/ directory:
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index d1b60b78479..e3605378746 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -473,6 +473,11 @@ Changes in backwards compatibility policy
of IndexInput) as its first argument. (Robert Muir, Dawid Weiss,
Mike McCandless)
+* LUCENE-3208: Made deprecated methods Query.weight(Searcher) and
+ Searcher.createWeight() final to prevent override. If you have
+ overridden one of these methods, cut over to the non-deprecated
+ implementation. (Uwe Schindler, Robert Muir, Yonik Seeley)
+
Changes in runtime behavior
* LUCENE-2834: the hash used to compute the lock file name when the
@@ -511,6 +516,21 @@ Bug fixes
ArrayIndexOutOfBoundsException (selckin, Robert Muir, Mike
McCandless)
+* LUCENE-3208: IndexSearcher had its own private similarity field
+ and corresponding get/setter overriding Searcher's implementation. If you
+ setted a different Similarity instance on IndexSearcher, methods implemented
+ in the superclass Searcher were not using it, leading to strange bugs.
+ (Uwe Schindler, Robert Muir)
+
+API Changes
+
+* LUCENE-3208: Renamed protected IndexSearcher.createWeight() to expert
+ public method IndexSearcher.createNormalizedWeight() as this better describes
+ what this method does. The old method is still there for backwards
+ compatibility. Query.weight() was deprecated and simply delegates to
+ IndexSearcher. Both deprecated methods will be removed in Lucene 4.0.
+ (Uwe Schindler, Robert Muir, Yonik Seeley)
+
New Features
* LUCENE-3140: Added experimental FST implementation to Lucene.
@@ -520,10 +540,23 @@ New Features
algorithm over objects that implement the new TwoPhaseCommit interface (such
as IndexWriter). (Shai Erera)
+* LUCENE-3191: Added TopDocs.merge, to facilitate merging results from
+ different shards (Uwe Schindler, Mike McCandless)
+
Build
* LUCENE-1344: Create OSGi bundle using dev-tools/maven.
(Nicolas Lalevée, Luca Stancapiano via ryan)
+
+* LUCENE-3204: The maven-ant-tasks jar is now included in the source tree;
+ users of the generate-maven-artifacts target no longer have to manually
+ place this jar in the Ant classpath. NOTE: when Ant looks for the
+ maven-ant-tasks jar, it looks first in its pre-existing classpath, so
+ any copies it finds will be used instead of the copy included in the
+ Lucene/Solr source tree. For this reason, it is recommeded to remove
+ any copies of the maven-ant-tasks jar in the Ant classpath, e.g. under
+ ~/.ant/lib/ or under the Ant installation's lib/ directory. (Steve Rowe)
+
======================= Lucene 3.2.0 =======================
diff --git a/lucene/build.xml b/lucene/build.xml
index a582941caec..434e8f178e7 100644
--- a/lucene/build.xml
+++ b/lucene/build.xml
@@ -444,7 +444,10 @@
+ depends="package, jar-src, jar-test-framework-src, javadocs">
+
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index c69c0eb311a..a8a553f387d 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -64,6 +64,11 @@
+
+
+
+
+
@@ -196,11 +201,6 @@
-
-
@@ -322,20 +322,6 @@
-
- #
- ##########################################################################
- Maven ant tasks not found.
-
- Please download the Maven ant tasks JAR (maven-ant-tasks-2.1.1.jar)
- from http://maven.apache.org/ant-tasks/download.html and add it to your
- $$HOME/.ant/lib/ directory, or to your $$ANT_HOME/lib/ directory, or
- to your $$CLASSPATH, or add "-lib /path/to/maven-ant-tasks-2.1.1.jar"
- to the ant command.
- ##########################################################################
-
-
-
diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt
index b072e8655a8..118dc5787ea 100644
--- a/lucene/contrib/CHANGES.txt
+++ b/lucene/contrib/CHANGES.txt
@@ -75,6 +75,10 @@ New Features
allow an app to control which indexing changes must be visible to
which search requests. (Mike McCandless)
+ * LUCENE-3191: Added SearchGroup.merge and TopGroups.merge, to
+ facilitate doing grouping in a distributed environment (Uwe
+ Schindler, Mike McCandless)
+
API Changes
* LUCENE-3141: add getter method to access fragInfos in FieldFragList.
diff --git a/lucene/contrib/contrib-build.xml b/lucene/contrib/contrib-build.xml
index bbd35082ac2..cce14ff24c1 100644
--- a/lucene/contrib/contrib-build.xml
+++ b/lucene/contrib/contrib-build.xml
@@ -76,6 +76,9 @@
+
diff --git a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java
index b16d46fe464..6b1af4c02d0 100644
--- a/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java
+++ b/lucene/contrib/misc/src/test/org/apache/lucene/index/TestNRTManager.java
@@ -502,12 +502,12 @@ public class TestNRTManager extends LuceneTestCase {
int seenTermCount = 0;
int shift;
int trigger;
- if (totTermCount.get() == 0) {
+ if (totTermCount.get() < 10) {
shift = 0;
trigger = 1;
} else {
- shift = random.nextInt(totTermCount.get()/10);
trigger = totTermCount.get()/10;
+ shift = random.nextInt(trigger);
}
while(System.currentTimeMillis() < stopTime) {
@@ -518,13 +518,13 @@ public class TestNRTManager extends LuceneTestCase {
}
totTermCount.set(seenTermCount);
seenTermCount = 0;
- if (totTermCount.get() == 0) {
+ if (totTermCount.get() < 10) {
shift = 0;
trigger = 1;
} else {
trigger = totTermCount.get()/10;
//System.out.println("trigger " + trigger);
- shift = random.nextInt(totTermCount.get()/10);
+ shift = random.nextInt(trigger);
}
termsEnum.seek(new BytesRef(""));
continue;
diff --git a/lucene/contrib/queries/src/java/org/apache/lucene/search/SlowCollatedStringComparator.java b/lucene/contrib/queries/src/java/org/apache/lucene/search/SlowCollatedStringComparator.java
index 0f14e334dc2..784fe55b413 100644
--- a/lucene/contrib/queries/src/java/org/apache/lucene/search/SlowCollatedStringComparator.java
+++ b/lucene/contrib/queries/src/java/org/apache/lucene/search/SlowCollatedStringComparator.java
@@ -33,7 +33,7 @@ import org.apache.lucene.util.BytesRef;
* This class will be removed in Lucene 5.0
*/
@Deprecated
-public final class SlowCollatedStringComparator extends FieldComparator {
+public final class SlowCollatedStringComparator extends FieldComparator {
private final String[] values;
private DocTerms currentDocTerms;
@@ -99,8 +99,22 @@ public final class SlowCollatedStringComparator extends FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
final String s = values[slot];
return s == null ? null : new BytesRef(values[slot]);
}
+
+ @Override
+ public int compareValues(BytesRef first, BytesRef second) {
+ if (first == null) {
+ if (second == null) {
+ return 0;
+ }
+ return -1;
+ } else if (second == null) {
+ return 1;
+ } else {
+ return collator.compare(first, second);
+ }
+ }
}
diff --git a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
index f9564694297..b16546791b2 100644
--- a/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
+++ b/lucene/contrib/spatial/src/java/org/apache/lucene/spatial/tier/DistanceFieldComparatorSource.java
@@ -31,94 +31,91 @@ import org.apache.lucene.search.FieldComparatorSource;
*/
public class DistanceFieldComparatorSource extends FieldComparatorSource {
- private DistanceFilter distanceFilter;
- private DistanceScoreDocLookupComparator dsdlc;
+ private DistanceFilter distanceFilter;
+ private DistanceScoreDocLookupComparator dsdlc;
- public DistanceFieldComparatorSource(Filter distanceFilter) {
+ public DistanceFieldComparatorSource(Filter distanceFilter) {
+ this.distanceFilter = (DistanceFilter) distanceFilter;
+ }
- this.distanceFilter = (DistanceFilter) distanceFilter;
+ public void cleanUp() {
+ distanceFilter = null;
- }
+ if (dsdlc != null) {
+ dsdlc.cleanUp();
+ }
- public void cleanUp() {
- distanceFilter = null;
+ dsdlc = null;
+ }
- if (dsdlc != null)
- dsdlc.cleanUp();
+ @Override
+ public FieldComparator newComparator(String fieldname, int numHits,
+ int sortPos, boolean reversed) throws IOException {
+ dsdlc = new DistanceScoreDocLookupComparator(numHits);
+ return dsdlc;
+ }
- dsdlc = null;
- }
+ private class DistanceScoreDocLookupComparator extends FieldComparator {
- @Override
- public FieldComparator newComparator(String fieldname, int numHits,
- int sortPos, boolean reversed) throws IOException {
- dsdlc = new DistanceScoreDocLookupComparator(numHits);
- return dsdlc;
- }
-
- private class DistanceScoreDocLookupComparator extends FieldComparator {
-
- private double[] values;
- private double bottom;
- private int offset =0;
+ private double[] values;
+ private double bottom;
+ private int offset =0;
- public DistanceScoreDocLookupComparator(int numHits) {
- values = new double[numHits];
- return;
- }
+ public DistanceScoreDocLookupComparator(int numHits) {
+ values = new double[numHits];
+ return;
+ }
- @Override
- public int compare(int slot1, int slot2) {
- double a = values[slot1];
- double b = values[slot2];
- if (a > b)
- return 1;
- if (a < b)
- return -1;
+ @Override
+ public int compare(int slot1, int slot2) {
+ double a = values[slot1];
+ double b = values[slot2];
+ if (a > b)
+ return 1;
+ if (a < b)
+ return -1;
- return 0;
- }
+ return 0;
+ }
- public void cleanUp() {
- distanceFilter = null;
- }
+ public void cleanUp() {
+ distanceFilter = null;
+ }
- @Override
- public int compareBottom(int doc) {
- double v2 = distanceFilter.getDistance(doc+ offset);
+ @Override
+ public int compareBottom(int doc) {
+ double v2 = distanceFilter.getDistance(doc+ offset);
- if (bottom > v2) {
- return 1;
- } else if (bottom < v2) {
- return -1;
- }
- return 0;
- }
+ if (bottom > v2) {
+ return 1;
+ } else if (bottom < v2) {
+ return -1;
+ }
+ return 0;
+ }
- @Override
- public void copy(int slot, int doc) {
- values[slot] = distanceFilter.getDistance(doc + offset);
- }
+ @Override
+ public void copy(int slot, int doc) {
+ values[slot] = distanceFilter.getDistance(doc + offset);
+ }
- @Override
- public void setBottom(int slot) {
- this.bottom = values[slot];
-
- }
+ @Override
+ public void setBottom(int slot) {
+ this.bottom = values[slot];
+ }
@Override
public FieldComparator setNextReader(AtomicReaderContext context)
- throws IOException {
+ throws IOException {
// each reader in a segmented base
// has an offset based on the maxDocs of previous readers
offset = context.docBase;
return this;
}
- @Override
- public Comparable value(int slot) {
- return values[slot];
- }
- }
-
+ @Override
+ public Double value(int slot) {
+ return values[slot];
+ }
+ }
}
diff --git a/lucene/lib/maven-ant-tasks-2.1.1.jar b/lucene/lib/maven-ant-tasks-2.1.1.jar
new file mode 100644
index 00000000000..e5299ba4aef
--- /dev/null
+++ b/lucene/lib/maven-ant-tasks-2.1.1.jar
@@ -0,0 +1,2 @@
+AnyObjectId[7810a541b8350775d61aea353538560817cce06e] was removed in git history.
+Apache SVN contains full history.
\ No newline at end of file
diff --git a/lucene/lib/maven-ant-tasks-LICENSE-ASL.txt b/lucene/lib/maven-ant-tasks-LICENSE-ASL.txt
new file mode 100644
index 00000000000..d6456956733
--- /dev/null
+++ b/lucene/lib/maven-ant-tasks-LICENSE-ASL.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lucene/lib/maven-ant-tasks-NOTICE.txt b/lucene/lib/maven-ant-tasks-NOTICE.txt
new file mode 100755
index 00000000000..42fef0c2f2c
--- /dev/null
+++ b/lucene/lib/maven-ant-tasks-NOTICE.txt
@@ -0,0 +1,8 @@
+
+Maven Ant Tasks
+Copyright 2002-2010 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+
diff --git a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
index 745117daec0..6a9303c3c80 100644
--- a/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
+++ b/lucene/src/java/org/apache/lucene/index/BufferedDeletesStream.java
@@ -28,10 +28,10 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.lucene.index.IndexReader.AtomicReaderContext;
-import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.DocIdSet;
+import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Query;
-import org.apache.lucene.search.Scorer;
-import org.apache.lucene.search.Weight;
+import org.apache.lucene.search.QueryWrapperFilter;
/* Tracks the stream of {@link BufferedDeletes}.
* When DocumentsWriterPerThread flushes, its buffered
@@ -434,18 +434,16 @@ class BufferedDeletesStream {
// Delete by query
private synchronized long applyQueryDeletes(Iterable queriesIter, SegmentReader reader) throws IOException {
long delCount = 0;
- IndexSearcher searcher = new IndexSearcher(reader);
- assert searcher.getTopReaderContext().isAtomic;
- final AtomicReaderContext readerContext = (AtomicReaderContext) searcher.getTopReaderContext();
- try {
- for (QueryAndLimit ent : queriesIter) {
- Query query = ent.query;
- int limit = ent.limit;
- Weight weight = query.weight(searcher);
- Scorer scorer = weight.scorer(readerContext, Weight.ScorerContext.def());
- if (scorer != null) {
+ final AtomicReaderContext readerContext = (AtomicReaderContext) reader.getTopReaderContext();
+ for (QueryAndLimit ent : queriesIter) {
+ Query query = ent.query;
+ int limit = ent.limit;
+ final DocIdSet docs = new QueryWrapperFilter(query).getDocIdSet(readerContext);
+ if (docs != null) {
+ final DocIdSetIterator it = docs.iterator();
+ if (it != null) {
while(true) {
- int doc = scorer.nextDoc();
+ int doc = it.nextDoc();
if (doc >= limit)
break;
@@ -459,8 +457,6 @@ class BufferedDeletesStream {
}
}
}
- } finally {
- searcher.close();
}
return delCount;
diff --git a/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java b/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
index af299ca9e26..3c8f532e52c 100644
--- a/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
+++ b/lucene/src/java/org/apache/lucene/index/DocumentsWriterFlushControl.java
@@ -97,9 +97,25 @@ public final class DocumentsWriterFlushControl {
// for this assert we must be tolerant to ram buffer changes!
maxConfiguredRamBuffer = Math.max(maxRamMB, maxConfiguredRamBuffer);
final long ram = flushBytes + activeBytes;
+ final long ramBufferBytes = (long) (maxConfiguredRamBuffer * 1024 * 1024);
// take peakDelta into account - worst case is that all flushing, pending and blocked DWPT had maxMem and the last doc had the peakDelta
- final long expected = (long)(2 * (maxConfiguredRamBuffer * 1024 * 1024)) + ((numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta);
- assert ram <= expected : "ram was " + ram + " expected: " + expected + " flush mem: " + flushBytes + " active: " + activeBytes + " pending: " + numPending + " flushing: " + numFlushingDWPT() + " blocked: " + numBlockedFlushes() + " peakDelta: " + peakDelta ;
+ final long expected = (long)(2 * (ramBufferBytes)) + ((numPending + numFlushingDWPT() + numBlockedFlushes()) * peakDelta);
+ if (peakDelta < (ramBufferBytes >> 1)) {
+ /*
+ * if we are indexing with very low maxRamBuffer like 0.1MB memory can
+ * easily overflow if we check out some DWPT based on docCount and have
+ * several DWPT in flight indexing large documents (compared to the ram
+ * buffer). This means that those DWPT and their threads will not hit
+ * the stall control before asserting the memory which would in turn
+ * fail. To prevent this we only assert if the the largest document seen
+ * is smaller than the 1/2 of the maxRamBufferMB
+ */
+ assert ram <= expected : "ram was " + ram + " expected: " + expected
+ + " flush mem: " + flushBytes + " activeMem: " + activeBytes
+ + " pendingMem: " + numPending + " flushingMem: "
+ + numFlushingDWPT() + " blockedMem: " + numBlockedFlushes()
+ + " peakDeltaMem: " + peakDelta;
+ }
}
return true;
}
diff --git a/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java b/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
index 82e976098ff..3fc72663d54 100644
--- a/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
+++ b/lucene/src/java/org/apache/lucene/index/SlowMultiReaderWrapper.java
@@ -60,6 +60,11 @@ public final class SlowMultiReaderWrapper extends FilterIndexReader {
readerContext = new AtomicReaderContext(this); // emulate atomic reader!
}
+ @Override
+ public String toString() {
+ return "SlowMultiReaderWrapper(" + in + ")";
+ }
+
@Override
public Fields fields() throws IOException {
return MultiFields.getFields(in);
diff --git a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
index 28b19a52090..025b0bf840f 100644
--- a/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
+++ b/lucene/src/java/org/apache/lucene/index/codecs/preflex/PreFlexFields.java
@@ -92,7 +92,7 @@ public class PreFlexFields extends FieldsProducer {
// make sure that all index files have been read or are kept open
// so that if an index update removes them we'll still have them
- freqStream = dir.openInput(info.name + ".frq", readBufferSize);
+ freqStream = dir.openInput(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.FREQ_EXTENSION), readBufferSize);
boolean anyProx = false;
for (FieldInfo fi : fieldInfos) {
if (fi.isIndexed) {
@@ -105,7 +105,7 @@ public class PreFlexFields extends FieldsProducer {
}
if (anyProx) {
- proxStream = dir.openInput(info.name + ".prx", readBufferSize);
+ proxStream = dir.openInput(IndexFileNames.segmentFileName(info.name, "", PreFlexCodec.PROX_EXTENSION), readBufferSize);
} else {
proxStream = null;
}
diff --git a/lucene/src/java/org/apache/lucene/search/FieldComparator.java b/lucene/src/java/org/apache/lucene/search/FieldComparator.java
index d7565317abe..4bb9406411b 100644
--- a/lucene/src/java/org/apache/lucene/search/FieldComparator.java
+++ b/lucene/src/java/org/apache/lucene/search/FieldComparator.java
@@ -96,7 +96,7 @@ import org.apache.lucene.util.packed.PackedInts;
*
* @lucene.experimental
*/
-public abstract class FieldComparator {
+public abstract class FieldComparator {
/**
* Compare hit at slot1 with hit at slot2.
@@ -176,13 +176,21 @@ public abstract class FieldComparator {
* Return the actual value in the slot.
*
* @param slot the value
- * @return value in this slot upgraded to Comparable
+ * @return value in this slot
*/
- public abstract Comparable> value(int slot);
+ public abstract T value(int slot);
-
+ /** Returns -1 if first is less than second. Default
+ * impl to assume the type implements Comparable and
+ * invoke .compareTo; be sure to override this method if
+ * your FieldComparator's type isn't a Comparable or
+ * if your values may sometimes be null */
+ @SuppressWarnings("unchecked")
+ public int compareValues(T first, T second) {
+ return ((Comparable) first).compareTo(second);
+ }
- public static abstract class NumericComparator extends FieldComparator {
+ public static abstract class NumericComparator extends FieldComparator {
protected final CachedArrayCreator creator;
protected T cached;
protected final boolean checkMissing;
@@ -203,7 +211,7 @@ public abstract class FieldComparator {
/** Parses field's values as byte (using {@link
* FieldCache#getBytes} and sorts by ascending value */
- public static final class ByteComparator extends NumericComparator {
+ public static final class ByteComparator extends NumericComparator {
private byte[] docValues;
private final byte[] values;
private final byte missingValue;
@@ -252,7 +260,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Byte value(int slot) {
return Byte.valueOf(values[slot]);
}
}
@@ -260,13 +268,12 @@ public abstract class FieldComparator {
/** Parses field's values as double (using {@link
* FieldCache#getDoubles} and sorts by ascending value */
- public static final class DoubleComparator extends NumericComparator {
+ public static final class DoubleComparator extends NumericComparator {
private double[] docValues;
private final double[] values;
private final double missingValue;
private double bottom;
-
DoubleComparator(int numHits, DoubleValuesCreator creator, Double missingValue ) {
super( creator, missingValue != null );
values = new double[numHits];
@@ -324,13 +331,13 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Double value(int slot) {
return Double.valueOf(values[slot]);
}
}
/** Uses float index values to sort by ascending value */
- public static final class FloatDocValuesComparator extends FieldComparator {
+ public static final class FloatDocValuesComparator extends FieldComparator {
private final double[] values;
private Source currentReaderValues;
private final String field;
@@ -386,14 +393,14 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Double value(int slot) {
return Double.valueOf(values[slot]);
}
}
/** Parses field's values as float (using {@link
* FieldCache#getFloats} and sorts by ascending value */
- public static final class FloatComparator extends NumericComparator {
+ public static final class FloatComparator extends NumericComparator {
private float[] docValues;
private final float[] values;
private final float missingValue;
@@ -460,14 +467,14 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Float value(int slot) {
return Float.valueOf(values[slot]);
}
}
/** Parses field's values as short (using {@link
* FieldCache#getShorts} and sorts by ascending value */
- public static final class ShortComparator extends NumericComparator {
+ public static final class ShortComparator extends NumericComparator {
private short[] docValues;
private final short[] values;
private short bottom;
@@ -516,14 +523,14 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Short value(int slot) {
return Short.valueOf(values[slot]);
}
}
/** Parses field's values as int (using {@link
* FieldCache#getInts} and sorts by ascending value */
- public static final class IntComparator extends NumericComparator {
+ public static final class IntComparator extends NumericComparator {
private int[] docValues;
private final int[] values;
private int bottom; // Value of bottom of queue
@@ -594,13 +601,13 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Integer value(int slot) {
return Integer.valueOf(values[slot]);
}
}
/** Loads int index values and sorts by ascending value. */
- public static final class IntDocValuesComparator extends FieldComparator {
+ public static final class IntDocValuesComparator extends FieldComparator {
private final long[] values;
private Source currentReaderValues;
private final String field;
@@ -660,14 +667,14 @@ public abstract class FieldComparator {
}
@Override
- public Comparable value(int slot) {
+ public Long value(int slot) {
return Long.valueOf(values[slot]);
}
}
/** Parses field's values as long (using {@link
* FieldCache#getLongs} and sorts by ascending value */
- public static final class LongComparator extends NumericComparator {
+ public static final class LongComparator extends NumericComparator {
private long[] docValues;
private final long[] values;
private long bottom;
@@ -735,7 +742,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Long value(int slot) {
return Long.valueOf(values[slot]);
}
}
@@ -746,7 +753,7 @@ public abstract class FieldComparator {
* using {@link TopScoreDocCollector} directly (which {@link
* IndexSearcher#search} uses when no {@link Sort} is
* specified). */
- public static final class RelevanceComparator extends FieldComparator {
+ public static final class RelevanceComparator extends FieldComparator {
private final float[] scores;
private float bottom;
private Scorer scorer;
@@ -791,15 +798,21 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Float value(int slot) {
return Float.valueOf(scores[slot]);
}
+
+ // Override because we sort reverse of natural Float order:
+ @Override
+ public int compareValues(Float first, Float second) {
+ // Reversed intentionally because relevance by default
+ // sorts descending:
+ return second.compareTo(first);
+ }
}
-
-
/** Sorts by ascending docID */
- public static final class DocComparator extends FieldComparator {
+ public static final class DocComparator extends FieldComparator {
private final int[] docIDs;
private int docBase;
private int bottom;
@@ -840,7 +853,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public Integer value(int slot) {
return Integer.valueOf(docIDs[slot]);
}
}
@@ -854,7 +867,7 @@ public abstract class FieldComparator {
* to large results, this comparator will be much faster
* than {@link TermValComparator}. For very small
* result sets it may be slower. */
- public static final class TermOrdValComparator extends FieldComparator {
+ public static final class TermOrdValComparator extends FieldComparator {
/** @lucene.internal */
final int[] ords;
/** @lucene.internal */
@@ -920,7 +933,7 @@ public abstract class FieldComparator {
* the underlying array access when looking up doc->ord
* @lucene.internal
*/
- abstract class PerSegmentComparator extends FieldComparator {
+ abstract class PerSegmentComparator extends FieldComparator {
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
@@ -938,9 +951,22 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
return TermOrdValComparator.this.value(slot);
}
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return val1.compareTo(val2);
+ }
}
// Used per-segment when bit width of doc->ord is 8:
@@ -1244,7 +1270,7 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
return values[slot];
}
}
@@ -1253,7 +1279,7 @@ public abstract class FieldComparator {
* comparisons are done using BytesRef.compareTo, which is
* slow for medium to large result sets but possibly
* very fast for very small results sets. */
- public static final class TermValComparator extends FieldComparator {
+ public static final class TermValComparator extends FieldComparator {
private BytesRef[] values;
private DocTerms docTerms;
@@ -1316,9 +1342,22 @@ public abstract class FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
return values[slot];
}
+
+ @Override
+ public int compareValues(BytesRef val1, BytesRef val2) {
+ if (val1 == null) {
+ if (val2 == null) {
+ return 0;
+ }
+ return -1;
+ } else if (val2 == null) {
+ return 1;
+ }
+ return val1.compareTo(val2);
+ }
}
final protected static int binarySearch(BytesRef br, DocTermsIndex a, BytesRef key) {
diff --git a/lucene/src/java/org/apache/lucene/search/FieldDoc.java b/lucene/src/java/org/apache/lucene/search/FieldDoc.java
index e6a5b1a34b1..c72dd684242 100644
--- a/lucene/src/java/org/apache/lucene/search/FieldDoc.java
+++ b/lucene/src/java/org/apache/lucene/search/FieldDoc.java
@@ -40,24 +40,31 @@ public class FieldDoc extends ScoreDoc {
/** Expert: The values which are used to sort the referenced document.
* The order of these will match the original sort criteria given by a
- * Sort object. Each Object will be either an Integer, Float or String,
- * depending on the type of values in the terms of the original field.
+ * Sort object. Each Object will have been returned from
+ * the value
method corresponding
+ * FieldComparator used to sort this field.
* @see Sort
* @see IndexSearcher#search(Query,Filter,int,Sort)
*/
- public Comparable[] fields;
+ public Object[] fields;
/** Expert: Creates one of these objects with empty sort information. */
- public FieldDoc (int doc, float score) {
+ public FieldDoc(int doc, float score) {
super (doc, score);
}
/** Expert: Creates one of these objects with the given sort information. */
- public FieldDoc (int doc, float score, Comparable[] fields) {
+ public FieldDoc(int doc, float score, Object[] fields) {
super (doc, score);
this.fields = fields;
}
+ /** Expert: Creates one of these objects with the given sort information. */
+ public FieldDoc(int doc, float score, Object[] fields, int shardIndex) {
+ super (doc, score, shardIndex);
+ this.fields = fields;
+ }
+
// A convenience method for debugging.
@Override
public String toString() {
diff --git a/lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java b/lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java
index 601a662ac98..d32f0a231a4 100644
--- a/lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java
+++ b/lucene/src/java/org/apache/lucene/search/FieldValueHitQueue.java
@@ -200,7 +200,7 @@ public abstract class FieldValueHitQueue extends PriorityQueue[] fields = new Comparable[n];
+ final Object[] fields = new Object[n];
for (int i = 0; i < n; ++i) {
fields[i] = comparators[i].value(entry.slot);
}
diff --git a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
index 8429ec0c9af..ce404f9ff4a 100644
--- a/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
+++ b/lucene/src/java/org/apache/lucene/search/IndexSearcher.java
@@ -289,7 +289,7 @@ public class IndexSearcher {
*/
public TopDocs search(Query query, Filter filter, int n)
throws IOException {
- return search(createWeight(query), filter, n);
+ return search(createNormalizedWeight(query), filter, n);
}
/** Lower-level search API.
@@ -310,7 +310,7 @@ public class IndexSearcher {
*/
public void search(Query query, Filter filter, Collector results)
throws IOException {
- search(leafContexts, createWeight(query), filter, results);
+ search(leafContexts, createNormalizedWeight(query), filter, results);
}
/** Lower-level search API.
@@ -328,7 +328,7 @@ public class IndexSearcher {
*/
public void search(Query query, Collector results)
throws IOException {
- search(leafContexts, createWeight(query), null, results);
+ search(leafContexts, createNormalizedWeight(query), null, results);
}
/** Search implementation with arbitrary sorting. Finds
@@ -344,7 +344,7 @@ public class IndexSearcher {
*/
public TopFieldDocs search(Query query, Filter filter, int n,
Sort sort) throws IOException {
- return search(createWeight(query), filter, n, sort);
+ return search(createNormalizedWeight(query), filter, n, sort);
}
/**
@@ -357,7 +357,7 @@ public class IndexSearcher {
*/
public TopFieldDocs search(Query query, int n,
Sort sort) throws IOException {
- return search(createWeight(query), null, n, sort);
+ return search(createNormalizedWeight(query), null, n, sort);
}
/** Expert: Low-level search implementation. Finds the top n
@@ -443,7 +443,7 @@ public class IndexSearcher {
* Collector)}.
*/
protected TopFieldDocs search(Weight weight, Filter filter, int nDocs,
- Sort sort, boolean fillFields)
+ Sort sort, boolean fillFields)
throws IOException {
if (sort == null) throw new NullPointerException();
@@ -623,7 +623,7 @@ public class IndexSearcher {
* entire index.
*/
public Explanation explain(Query query, int doc) throws IOException {
- return explain(createWeight(query), doc);
+ return explain(createNormalizedWeight(query), doc);
}
/** Expert: low-level implementation method
@@ -665,13 +665,23 @@ public class IndexSearcher {
}
/**
- * creates a weight for query
- * @return new weight
+ * Creates a normalized weight for a top-level {@link Query}.
+ * The query is rewritten by this method and {@link Query#createWeight} called,
+ * afterwards the {@link Weight} is normalized. The returned {@code Weight}
+ * can then directly be used to get a {@link Scorer}.
+ * @lucene.internal
*/
- protected Weight createWeight(Query query) throws IOException {
- return query.weight(this);
+ public Weight createNormalizedWeight(Query query) throws IOException {
+ query = rewrite(query);
+ Weight weight = query.createWeight(this);
+ float sum = weight.sumOfSquaredWeights();
+ float norm = getSimilarityProvider().queryNorm(sum);
+ if (Float.isInfinite(norm) || Float.isNaN(norm))
+ norm = 1.0f;
+ weight.normalize(norm);
+ return weight;
}
-
+
/**
* Returns this searchers the top-level {@link ReaderContext}.
* @see IndexReader#getTopReaderContext()
diff --git a/lucene/src/java/org/apache/lucene/search/Query.java b/lucene/src/java/org/apache/lucene/search/Query.java
index 40ec80d44a5..714b62836a4 100644
--- a/lucene/src/java/org/apache/lucene/search/Query.java
+++ b/lucene/src/java/org/apache/lucene/search/Query.java
@@ -91,21 +91,6 @@ public abstract class Query implements Cloneable {
throw new UnsupportedOperationException();
}
- /**
- * Expert: Constructs and initializes a Weight for a top-level query.
- */
- public Weight weight(IndexSearcher searcher) throws IOException {
- Query query = searcher.rewrite(this);
- Weight weight = query.createWeight(searcher);
- float sum = weight.sumOfSquaredWeights();
- float norm = searcher.getSimilarityProvider().queryNorm(sum);
- if (Float.isInfinite(norm) || Float.isNaN(norm))
- norm = 1.0f;
- weight.normalize(norm);
- return weight;
- }
-
-
/** Expert: called to re-write queries into primitive queries. For example,
* a PrefixQuery will be rewritten into a BooleanQuery that consists
* of TermQuerys.
diff --git a/lucene/src/java/org/apache/lucene/search/QueryWrapperFilter.java b/lucene/src/java/org/apache/lucene/search/QueryWrapperFilter.java
index 175e36d2d45..c0cb638fb1f 100644
--- a/lucene/src/java/org/apache/lucene/search/QueryWrapperFilter.java
+++ b/lucene/src/java/org/apache/lucene/search/QueryWrapperFilter.java
@@ -52,7 +52,7 @@ public class QueryWrapperFilter extends Filter {
// get a private context that is used to rewrite, createWeight and score eventually
assert context.reader.getTopReaderContext().isAtomic;
final AtomicReaderContext privateContext = (AtomicReaderContext) context.reader.getTopReaderContext();
- final Weight weight = query.weight(new IndexSearcher(privateContext));
+ final Weight weight = new IndexSearcher(privateContext).createNormalizedWeight(query);
return new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
diff --git a/lucene/src/java/org/apache/lucene/search/ScoreDoc.java b/lucene/src/java/org/apache/lucene/search/ScoreDoc.java
index 47d9fa7d5a7..5bccf2572d5 100644
--- a/lucene/src/java/org/apache/lucene/search/ScoreDoc.java
+++ b/lucene/src/java/org/apache/lucene/search/ScoreDoc.java
@@ -17,21 +17,30 @@ package org.apache.lucene.search;
* limitations under the License.
*/
-/** Expert: Returned by low-level search implementations.
- * @see TopDocs */
+/** Holds one hit in {@link TopDocs}. */
+
public class ScoreDoc {
- /** Expert: The score of this document for the query. */
+
+ /** The score of this document for the query. */
public float score;
- /** Expert: A hit document's number.
- * @see IndexSearcher#doc(int)
- */
+ /** A hit document's number.
+ * @see IndexSearcher#doc(int) */
public int doc;
- /** Expert: Constructs a ScoreDoc. */
+ /** Only set by {@link TopDocs#merge} */
+ public int shardIndex;
+
+ /** Constructs a ScoreDoc. */
public ScoreDoc(int doc, float score) {
+ this(doc, score, -1);
+ }
+
+ /** Constructs a ScoreDoc. */
+ public ScoreDoc(int doc, float score, int shardIndex) {
this.doc = doc;
this.score = score;
+ this.shardIndex = shardIndex;
}
// A convenience method for debugging.
@@ -39,5 +48,4 @@ public class ScoreDoc {
public String toString() {
return "doc=" + doc + " score=" + score;
}
-
}
diff --git a/lucene/src/java/org/apache/lucene/search/SortField.java b/lucene/src/java/org/apache/lucene/search/SortField.java
index 8fdc66c36e8..60e0a1a9660 100644
--- a/lucene/src/java/org/apache/lucene/search/SortField.java
+++ b/lucene/src/java/org/apache/lucene/search/SortField.java
@@ -91,10 +91,10 @@ public class SortField {
public static final int BYTES = 12;
/** Represents sorting by document score (relevance). */
- public static final SortField FIELD_SCORE = new SortField (null, SCORE);
+ public static final SortField FIELD_SCORE = new SortField(null, SCORE);
/** Represents sorting by document number (index order). */
- public static final SortField FIELD_DOC = new SortField (null, DOC);
+ public static final SortField FIELD_DOC = new SortField(null, DOC);
private String field;
private int type; // defaults to determining type dynamically
@@ -111,7 +111,7 @@ public class SortField {
* type
is SCORE or DOC.
* @param type Type of values in the terms.
*/
- public SortField (String field, int type) {
+ public SortField(String field, int type) {
initFieldType(field, type);
}
@@ -122,7 +122,7 @@ public class SortField {
* @param type Type of values in the terms.
* @param reverse True if natural order should be reversed.
*/
- public SortField (String field, int type, boolean reverse) {
+ public SortField(String field, int type, boolean reverse) {
initFieldType(field, type);
this.reverse = reverse;
}
@@ -140,7 +140,7 @@ public class SortField {
* @deprecated (4.0) use EntryCreator version
*/
@Deprecated
- public SortField (String field, FieldCache.Parser parser) {
+ public SortField(String field, FieldCache.Parser parser) {
this(field, parser, false);
}
@@ -158,7 +158,7 @@ public class SortField {
* @deprecated (4.0) use EntryCreator version
*/
@Deprecated
- public SortField (String field, FieldCache.Parser parser, boolean reverse) {
+ public SortField(String field, FieldCache.Parser parser, boolean reverse) {
if (field == null) {
throw new IllegalArgumentException("field can only be null when type is SCORE or DOC");
}
@@ -225,7 +225,7 @@ public class SortField {
* @param field Name of field to sort by; cannot be null
.
* @param comparator Returns a comparator for sorting hits.
*/
- public SortField (String field, FieldComparatorSource comparator) {
+ public SortField(String field, FieldComparatorSource comparator) {
initFieldType(field, CUSTOM);
this.comparatorSource = comparator;
}
@@ -235,7 +235,7 @@ public class SortField {
* @param comparator Returns a comparator for sorting hits.
* @param reverse True if natural order should be reversed.
*/
- public SortField (String field, FieldComparatorSource comparator, boolean reverse) {
+ public SortField(String field, FieldComparatorSource comparator, boolean reverse) {
initFieldType(field, CUSTOM);
this.reverse = reverse;
this.comparatorSource = comparator;
diff --git a/lucene/src/java/org/apache/lucene/search/TermQuery.java b/lucene/src/java/org/apache/lucene/search/TermQuery.java
index cb729ababc5..290d106c0f9 100644
--- a/lucene/src/java/org/apache/lucene/search/TermQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/TermQuery.java
@@ -89,7 +89,7 @@ public class TermQuery extends Query {
public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
final String field = term.field();
final IndexReader reader = context.reader;
- assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight is not the same as the current reader's top-reader";
+ assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
final TermState state = termStates
.get(context.ord);
if (state == null) { // term is not present in that reader
diff --git a/lucene/src/java/org/apache/lucene/search/TopDocs.java b/lucene/src/java/org/apache/lucene/search/TopDocs.java
index ba5f3e45252..7c9c05c9f7f 100644
--- a/lucene/src/java/org/apache/lucene/search/TopDocs.java
+++ b/lucene/src/java/org/apache/lucene/search/TopDocs.java
@@ -17,15 +17,21 @@ package org.apache.lucene.search;
* limitations under the License.
*/
+import java.io.IOException;
+
+import org.apache.lucene.util.PriorityQueue;
+
/** Represents hits returned by {@link
* IndexSearcher#search(Query,Filter,int)} and {@link
* IndexSearcher#search(Query,int)}. */
public class TopDocs {
- /** The total number of hits for the query.
- */
+
+ /** The total number of hits for the query. */
public int totalHits;
+
/** The top hits for the query. */
public ScoreDoc[] scoreDocs;
+
/** Stores the maximum score value encountered, needed for normalizing. */
private float maxScore;
@@ -34,12 +40,12 @@ public class TopDocs {
* scores are not tracked, this returns {@link Float#NaN}.
*/
public float getMaxScore() {
- return maxScore;
+ return maxScore;
}
/** Sets the maximum score value encountered. */
public void setMaxScore(float maxScore) {
- this.maxScore=maxScore;
+ this.maxScore=maxScore;
}
/** Constructs a TopDocs with a default maxScore=Float.NaN. */
@@ -52,4 +58,199 @@ public class TopDocs {
this.scoreDocs = scoreDocs;
this.maxScore = maxScore;
}
+
+ // Refers to one hit:
+ private static class ShardRef {
+ // Which shard (index into shardHits[]):
+ final int shardIndex;
+
+ // Which hit within the shard:
+ int hitIndex;
+
+ public ShardRef(int shardIndex) {
+ this.shardIndex = shardIndex;
+ }
+
+ @Override
+ public String toString() {
+ return "ShardRef(shardIndex=" + shardIndex + " hitIndex=" + hitIndex + ")";
+ }
+ };
+
+ // Specialized MergeSortQueue that just merges by
+ // relevance score, descending:
+ private static class ScoreMergeSortQueue extends PriorityQueue {
+ final ScoreDoc[][] shardHits;
+
+ public ScoreMergeSortQueue(TopDocs[] shardHits) {
+ super(shardHits.length);
+ this.shardHits = new ScoreDoc[shardHits.length][];
+ for(int shardIDX=0;shardIDX secondScore) {
+ return true;
+ } else {
+ // Tie break: earlier shard wins
+ if (first.shardIndex < second.shardIndex) {
+ return true;
+ } else if (first.shardIndex > second.shardIndex) {
+ return false;
+ } else {
+ // Tie break in same shard: resolve however the
+ // shard had resolved it:
+ assert first.hitIndex != second.hitIndex;
+ return first.hitIndex < second.hitIndex;
+ }
+ }
+ }
+ }
+
+ private static class MergeSortQueue extends PriorityQueue {
+ // These are really FieldDoc instances:
+ final ScoreDoc[][] shardHits;
+ final FieldComparator[] comparators;
+ final int[] reverseMul;
+
+ public MergeSortQueue(Sort sort, TopDocs[] shardHits) throws IOException {
+ super(shardHits.length);
+ this.shardHits = new ScoreDoc[shardHits.length][];
+ for(int shardIDX=0;shardIDX second.shardIndex) {
+ //System.out.println(" return tb false");
+ return false;
+ } else {
+ // Tie break in same shard: resolve however the
+ // shard had resolved it:
+ //System.out.println(" return tb " + (first.hitIndex < second.hitIndex));
+ assert first.hitIndex != second.hitIndex;
+ return first.hitIndex < second.hitIndex;
+ }
+ }
+ }
+
+ /** Returns a new TopDocs, containing topN results across
+ * the provided TopDocs, sorting by the specified {@link
+ * Sort}. Each of the TopDocs must have been sorted by
+ * the same Sort, and sort field values must have been
+ * filled (ie, fillFields=true
must be
+ * passed to {@link
+ * TopFieldCollector#create}.
+ *
+ * Pass sort=null to merge sort by score descending.
+ *
+ * @lucene.experimental */
+ public static TopDocs merge(Sort sort, int topN, TopDocs[] shardHits) throws IOException {
+
+ final PriorityQueue queue;
+ if (sort == null) {
+ queue = new ScoreMergeSortQueue(shardHits);
+ } else {
+ queue = new MergeSortQueue(sort, shardHits);
+ }
+
+ int totalHitCount = 0;
+ float maxScore = Float.MIN_VALUE;
+ for(int shardIDX=0;shardIDX 0) {
+ totalHitCount += shard.totalHits;
+ queue.add(new ShardRef(shardIDX));
+ maxScore = Math.max(maxScore, shard.getMaxScore());
+ //System.out.println(" maxScore now " + maxScore + " vs " + shard.getMaxScore());
+ }
+ }
+
+ final ScoreDoc[] hits = new ScoreDoc[Math.min(topN, totalHitCount)];
+
+ int hitUpto = 0;
+ while(hitUpto < hits.length) {
+ assert queue.size() > 0;
+ ShardRef ref = queue.pop();
+ final ScoreDoc hit = shardHits[ref.shardIndex].scoreDocs[ref.hitIndex++];
+ if (sort == null) {
+ hits[hitUpto] = new ScoreDoc(hit.doc, hit.score, ref.shardIndex);
+ } else {
+ hits[hitUpto] = new FieldDoc(hit.doc, hit.score, ((FieldDoc) hit).fields, ref.shardIndex);
+ }
+
+ //System.out.println(" hitUpto=" + hitUpto);
+ //System.out.println(" doc=" + hits[hitUpto].doc + " score=" + hits[hitUpto].score);
+
+ hitUpto++;
+
+ if (ref.hitIndex < shardHits[ref.shardIndex].scoreDocs.length) {
+ // Not done with this these TopDocs yet:
+ queue.add(ref);
+ }
+ }
+
+ if (sort == null) {
+ return new TopDocs(totalHitCount, hits, maxScore);
+ } else {
+ return new TopFieldDocs(totalHitCount, hits, sort.getSort(), maxScore);
+ }
+ }
}
diff --git a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
index 8a5ba9abf41..9ea258f30c6 100755
--- a/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
+++ b/lucene/src/java/org/apache/lucene/search/function/CustomScoreQuery.java
@@ -187,7 +187,7 @@ public class CustomScoreQuery extends Query {
boolean qStrict;
public CustomWeight(IndexSearcher searcher) throws IOException {
- this.subQueryWeight = subQuery.weight(searcher);
+ this.subQueryWeight = subQuery.createWeight(searcher);
this.valSrcWeights = new Weight[valSrcQueries.length];
for(int i = 0; i < valSrcQueries.length; i++) {
this.valSrcWeights[i] = valSrcQueries[i].createWeight(searcher);
diff --git a/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java b/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
index 9703385f9b9..3bdbc369807 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/RandomIndexWriter.java
@@ -308,16 +308,24 @@ public class RandomIndexWriter implements Closeable {
return getReader(true);
}
+ private boolean doRandomOptimize = true;
+
+ public void setDoRandomOptimize(boolean v) {
+ doRandomOptimize = v;
+ }
+
private void doRandomOptimize() throws IOException {
- final int segCount = w.getSegmentCount();
- if (r.nextBoolean() || segCount == 0) {
- // full optimize
- w.optimize();
- } else {
- // partial optimize
- final int limit = _TestUtil.nextInt(r, 1, segCount);
- w.optimize(limit);
- assert w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
+ if (doRandomOptimize) {
+ final int segCount = w.getSegmentCount();
+ if (r.nextBoolean() || segCount == 0) {
+ // full optimize
+ w.optimize();
+ } else {
+ // partial optimize
+ final int limit = _TestUtil.nextInt(r, 1, segCount);
+ w.optimize(limit);
+ assert w.getSegmentCount() <= limit: "limit=" + limit + " actual=" + w.getSegmentCount();
+ }
}
switchDoDocValues();
}
diff --git a/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java b/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java
index 1b7644b69f3..d3e00255efb 100644
--- a/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java
+++ b/lucene/src/test-framework/org/apache/lucene/index/codecs/preflexrw/TermInfosWriter.java
@@ -22,6 +22,8 @@ import java.io.Closeable;
import java.io.IOException;
import org.apache.lucene.index.FieldInfos;
+import org.apache.lucene.index.IndexFileNames;
+import org.apache.lucene.index.codecs.preflex.PreFlexCodec;
import org.apache.lucene.index.codecs.preflex.TermInfo;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IndexOutput;
@@ -102,7 +104,9 @@ final class TermInfosWriter implements Closeable {
}
try {
- directory.deleteFile(segment + (isIndex ? ".tii" : ".tis"));
+ directory.deleteFile(IndexFileNames.segmentFileName(segment, "",
+ (isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
+ : PreFlexCodec.TERMS_EXTENSION)));
} catch (IOException ignored) {
}
}
@@ -119,7 +123,9 @@ final class TermInfosWriter implements Closeable {
indexInterval = interval;
fieldInfos = fis;
isIndex = isi;
- output = directory.createOutput(segment + (isIndex ? ".tii" : ".tis"));
+ output = directory.createOutput(IndexFileNames.segmentFileName(segment, "",
+ (isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
+ : PreFlexCodec.TERMS_EXTENSION)));
boolean success = false;
try {
output.writeInt(FORMAT_CURRENT); // write format
@@ -139,7 +145,9 @@ final class TermInfosWriter implements Closeable {
}
try {
- directory.deleteFile(segment + (isIndex ? ".tii" : ".tis"));
+ directory.deleteFile(IndexFileNames.segmentFileName(segment, "",
+ (isIndex ? PreFlexCodec.TERMS_INDEX_EXTENSION
+ : PreFlexCodec.TERMS_EXTENSION)));
} catch (IOException ignored) {
}
}
diff --git a/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java b/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
new file mode 100644
index 00000000000..41541264955
--- /dev/null
+++ b/lucene/src/test-framework/org/apache/lucene/search/AssertingIndexSearcher.java
@@ -0,0 +1,90 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.concurrent.ExecutorService;
+import java.io.IOException;
+
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexReader.AtomicReaderContext;
+import org.apache.lucene.index.IndexReader.ReaderContext;
+
+/**
+ * Helper class that adds some extra checks to ensure correct
+ * usage of {@code IndexSearcher} and {@code Weight}.
+ * TODO: Extend this by more checks, that's just a start.
+ */
+public class AssertingIndexSearcher extends IndexSearcher {
+ public AssertingIndexSearcher(IndexReader r) {
+ super(r);
+ }
+
+ public AssertingIndexSearcher(ReaderContext context) {
+ super(context);
+ }
+
+ public AssertingIndexSearcher(IndexReader r, ExecutorService ex) {
+ super(r, ex);
+ }
+
+ public AssertingIndexSearcher(ReaderContext context, ExecutorService ex) {
+ super(context, ex);
+ }
+
+ /** Ensures, that the returned {@code Weight} is not normalized again, which may produce wrong scores. */
+ @Override
+ public Weight createNormalizedWeight(Query query) throws IOException {
+ final Weight w = super.createNormalizedWeight(query);
+ return new Weight() {
+ @Override
+ public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
+ return w.explain(context, doc);
+ }
+
+ @Override
+ public Query getQuery() {
+ return w.getQuery();
+ }
+
+ @Override
+ public float getValue() {
+ return w.getValue();
+ }
+
+ @Override
+ public void normalize(float norm) {
+ throw new IllegalStateException("Weight already normalized.");
+ }
+
+ @Override
+ public Scorer scorer(AtomicReaderContext context, ScorerContext scorerContext) throws IOException {
+ return w.scorer(context, scorerContext);
+ }
+
+ @Override
+ public float sumOfSquaredWeights() throws IOException {
+ throw new IllegalStateException("Weight already normalized.");
+ }
+
+ @Override
+ public boolean scoresDocsOutOfOrder() {
+ return w.scoresDocsOutOfOrder();
+ }
+ };
+ }
+}
diff --git a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java
index 9cdec64ad23..699af49f51c 100644
--- a/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java
+++ b/lucene/src/test-framework/org/apache/lucene/search/QueryUtils.java
@@ -198,7 +198,7 @@ public class QueryUtils {
public static void checkSkipTo(final Query q, final IndexSearcher s) throws IOException {
//System.out.println("Checking "+q);
final AtomicReaderContext[] readerContextArray = ReaderUtil.leaves(s.getTopReaderContext());
- if (q.weight(s).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
+ if (s.createNormalizedWeight(q).scoresDocsOutOfOrder()) return; // in this case order of skipTo() might differ from that of next().
final int skip_op = 0;
final int next_op = 1;
@@ -241,7 +241,7 @@ public class QueryUtils {
lastDoc[0] = doc;
try {
if (scorer == null) {
- Weight w = q.weight(s);
+ Weight w = s.createNormalizedWeight(q);
scorer = w.scorer(readerContextArray[leafPtr], ScorerContext.def());
}
@@ -286,7 +286,7 @@ public class QueryUtils {
if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
- Weight w = q.weight(indexSearcher);
+ Weight w = indexSearcher.createNormalizedWeight(q);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
@@ -312,7 +312,7 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader, false);
- Weight w = q.weight(indexSearcher);
+ Weight w = indexSearcher.createNormalizedWeight(q);
Scorer scorer = w.scorer((AtomicReaderContext)previousReader.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
@@ -343,7 +343,7 @@ public class QueryUtils {
try {
long startMS = System.currentTimeMillis();
for (int i=lastDoc[0]+1; i<=doc; i++) {
- Weight w = q.weight(s);
+ Weight w = s.createNormalizedWeight(q);
Scorer scorer = w.scorer(context[leafPtr], ScorerContext.def());
Assert.assertTrue("query collected "+doc+" but skipTo("+i+") says no more docs!",scorer.advance(i) != DocIdSetIterator.NO_MORE_DOCS);
Assert.assertEquals("query collected "+doc+" but skipTo("+i+") got to "+scorer.docID(),doc,scorer.docID());
@@ -370,7 +370,7 @@ public class QueryUtils {
if (lastReader[0] != null) {
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
- Weight w = q.weight(indexSearcher);
+ Weight w = indexSearcher.createNormalizedWeight(q);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
@@ -394,7 +394,7 @@ public class QueryUtils {
// previous reader, hits NO_MORE_DOCS
final IndexReader previousReader = lastReader[0];
IndexSearcher indexSearcher = LuceneTestCase.newSearcher(previousReader);
- Weight w = q.weight(indexSearcher);
+ Weight w = indexSearcher.createNormalizedWeight(q);
Scorer scorer = w.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def());
if (scorer != null) {
boolean more = scorer.advance(lastDoc[0] + 1) != DocIdSetIterator.NO_MORE_DOCS;
diff --git a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
index 1df2902507b..e4243c332c2 100644
--- a/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
+++ b/lucene/src/test-framework/org/apache/lucene/util/LuceneTestCase.java
@@ -55,6 +55,7 @@ import org.apache.lucene.index.codecs.standard.StandardCodec;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.FieldCache.CacheEntry;
+import org.apache.lucene.search.AssertingIndexSearcher;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
@@ -1231,13 +1232,11 @@ public abstract class LuceneTestCase extends Assert {
* with one that returns null for getSequentialSubReaders.
*/
public static IndexSearcher newSearcher(IndexReader r, boolean maybeWrap) throws IOException {
-
if (random.nextBoolean()) {
if (maybeWrap && rarely()) {
- return new IndexSearcher(new SlowMultiReaderWrapper(r));
- } else {
- return new IndexSearcher(r);
+ r = new SlowMultiReaderWrapper(r);
}
+ return random.nextBoolean() ? new AssertingIndexSearcher(r) : new AssertingIndexSearcher(r.getTopReaderContext());
} else {
int threads = 0;
final ExecutorService ex = (random.nextBoolean()) ? null
@@ -1246,20 +1245,31 @@ public abstract class LuceneTestCase extends Assert {
if (ex != null && VERBOSE) {
System.out.println("NOTE: newSearcher using ExecutorService with " + threads + " threads");
}
- return new IndexSearcher(r.getTopReaderContext(), ex) {
- @Override
- public void close() throws IOException {
- super.close();
- if (ex != null) {
- ex.shutdown();
- try {
- ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
+ return random.nextBoolean() ?
+ new AssertingIndexSearcher(r, ex) {
+ @Override
+ public void close() throws IOException {
+ super.close();
+ shutdownExecutorService(ex);
}
- }
- };
+ } : new AssertingIndexSearcher(r.getTopReaderContext(), ex) {
+ @Override
+ public void close() throws IOException {
+ super.close();
+ shutdownExecutorService(ex);
+ }
+ };
+ }
+ }
+
+ static void shutdownExecutorService(ExecutorService ex) {
+ if (ex != null) {
+ ex.shutdown();
+ try {
+ ex.awaitTermination(1000, TimeUnit.MILLISECONDS);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
}
}
diff --git a/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java b/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
index 1d2317b74c3..a42941e51cd 100644
--- a/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
+++ b/lucene/src/test-framework/org/apache/lucene/util/_TestUtil.java
@@ -27,10 +27,10 @@ import java.io.OutputStream;
import java.io.PrintStream;
import java.lang.reflect.Method;
import java.util.Enumeration;
-import java.util.List;
-import java.util.Random;
-import java.util.Map;
import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
@@ -46,6 +46,9 @@ import org.apache.lucene.index.MergeScheduler;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.index.codecs.Codec;
import org.apache.lucene.index.codecs.CodecProvider;
+import org.apache.lucene.search.FieldDoc;
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.junit.Assert;
@@ -468,4 +471,24 @@ public class _TestUtil {
newName.append(suffix);
return new File(directory, newName.toString());
}
+
+ public static void assertEquals(TopDocs expected, TopDocs actual) {
+ Assert.assertEquals("wrong total hits", expected.totalHits, actual.totalHits);
+ Assert.assertEquals("wrong maxScore", expected.getMaxScore(), actual.getMaxScore(), 0.0);
+ Assert.assertEquals("wrong hit count", expected.scoreDocs.length, actual.scoreDocs.length);
+ for(int hitIDX=0;hitIDX {
@Override
public int compare(int slot1, int slot2) {
@@ -132,10 +132,10 @@ final class JustCompileSearch {
}
@Override
- public Comparable> value(int slot) {
+ public Object value(int slot) {
throw new UnsupportedOperationException(UNSUPPORTED_MSG);
}
-
+
}
static final class JustCompileFieldComparatorSource extends FieldComparatorSource {
diff --git a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
index 272384b0e01..e8a6b69a948 100644
--- a/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
+++ b/lucene/src/test/org/apache/lucene/search/TestDisjunctionMaxQuery.java
@@ -173,7 +173,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
QueryUtils.check(random, dq, s);
assertTrue(s.getTopReaderContext().isAtomic);
- final Weight dw = dq.weight(s);
+ final Weight dw = s.createNormalizedWeight(dq);
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
final boolean skipOk = ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS;
if (skipOk) {
@@ -188,7 +188,7 @@ public class TestDisjunctionMaxQuery extends LuceneTestCase {
dq.add(tq("dek", "DOES_NOT_EXIST"));
assertTrue(s.getTopReaderContext().isAtomic);
QueryUtils.check(random, dq, s);
- final Weight dw = dq.weight(s);
+ final Weight dw = s.createNormalizedWeight(dq);
final Scorer ds = dw.scorer((AtomicReaderContext)s.getTopReaderContext(), ScorerContext.def());
assertTrue("firsttime skipTo found no match",
ds.advance(3) != DocIdSetIterator.NO_MORE_DOCS);
diff --git a/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java b/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java
index 896220025e8..23e1afb145e 100644
--- a/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java
+++ b/lucene/src/test/org/apache/lucene/search/TestElevationComparator.java
@@ -139,7 +139,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
@Override
public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
- return new FieldComparator() {
+ return new FieldComparator() {
FieldCache.DocTermsIndex idIndex;
private final int[] values = new int[numHits];
@@ -184,7 +184,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
}
@Override
- public Comparable> value(int slot) {
+ public Integer value(int slot) {
return Integer.valueOf(values[slot]);
}
};
diff --git a/lucene/src/test/org/apache/lucene/search/TestSort.java b/lucene/src/test/org/apache/lucene/search/TestSort.java
index 4c84601b682..52059ffef47 100644
--- a/lucene/src/test/org/apache/lucene/search/TestSort.java
+++ b/lucene/src/test/org/apache/lucene/search/TestSort.java
@@ -511,7 +511,7 @@ public class TestSort extends LuceneTestCase {
assertMatches (empty, queryX, sort, "");
}
- static class MyFieldComparator extends FieldComparator {
+ static class MyFieldComparator extends FieldComparator {
int[] docValues;
int[] slotValues;
int bottomValue;
@@ -527,6 +527,7 @@ public class TestSort extends LuceneTestCase {
@Override
public int compare(int slot1, int slot2) {
+ // values are small enough that overflow won't happen
return slotValues[slot1] - slotValues[slot2];
}
@@ -553,7 +554,7 @@ public class TestSort extends LuceneTestCase {
}
@Override
- public Comparable> value(int slot) {
+ public Integer value(int slot) {
return Integer.valueOf(slotValues[slot]);
}
}
diff --git a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
index d4f919856b4..fbdbb0c0b03 100644
--- a/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
+++ b/lucene/src/test/org/apache/lucene/search/TestTermScorer.java
@@ -73,7 +73,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = termQuery.weight(indexSearcher);
+ Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
Scorer ts = weight.scorer((AtomicReaderContext)indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
// we have 2 documents with the term all in them, one document for all the
@@ -134,7 +134,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = termQuery.weight(indexSearcher);
+ Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
assertTrue("next did not return a doc",
@@ -152,7 +152,7 @@ public class TestTermScorer extends LuceneTestCase {
Term allTerm = new Term(FIELD, "all");
TermQuery termQuery = new TermQuery(allTerm);
- Weight weight = termQuery.weight(indexSearcher);
+ Weight weight = indexSearcher.createNormalizedWeight(termQuery);
assertTrue(indexSearcher.getTopReaderContext().isAtomic);
Scorer ts = weight.scorer((AtomicReaderContext) indexSearcher.getTopReaderContext(), ScorerContext.def().scoreDocsInOrder(true).topScorer(true));
diff --git a/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java b/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java
new file mode 100644
index 00000000000..7c459f7fd16
--- /dev/null
+++ b/lucene/src/test/org/apache/lucene/search/TestTopDocsMerge.java
@@ -0,0 +1,244 @@
+package org.apache.lucene.search;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.NumericField;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.ReaderUtil;
+import org.apache.lucene.util._TestUtil;
+
+public class TestTopDocsMerge extends LuceneTestCase {
+
+ private static class ShardSearcher extends IndexSearcher {
+ private final IndexReader.AtomicReaderContext[] ctx;
+
+ public ShardSearcher(IndexReader.AtomicReaderContext ctx, IndexReader.ReaderContext parent) {
+ super(parent);
+ this.ctx = new IndexReader.AtomicReaderContext[] {ctx};
+ }
+
+ public void search(Weight weight, Collector collector) throws IOException {
+ search(ctx, weight, null, collector);
+ }
+
+ public TopDocs search(Weight weight, int topN) throws IOException {
+ return search(ctx, weight, null, topN);
+ }
+
+ @Override
+ public String toString() {
+ return "ShardSearcher(" + ctx[0] + ")";
+ }
+ }
+
+ public void testSort() throws Exception {
+
+ IndexReader reader = null;
+ Directory dir = null;
+
+ final int numDocs = atLeast(1000);
+ //final int numDocs = atLeast(50);
+
+ final String[] tokens = new String[] {"a", "b", "c", "d", "e"};
+
+ if (VERBOSE) {
+ System.out.println("TEST: make index");
+ }
+
+ {
+ dir = newDirectory();
+ final RandomIndexWriter w = new RandomIndexWriter(random, dir);
+ // w.setDoRandomOptimize(false);
+
+ // w.w.getConfig().setMaxBufferedDocs(atLeast(100));
+
+ final String[] content = new String[atLeast(20)];
+
+ for(int contentIDX=0;contentIDX exten
SearchGroup searchGroup = new SearchGroup();
searchGroup.groupValue = group.groupValue;
if (fillFields) {
- searchGroup.sortValues = new Comparable[sortFieldCount];
+ searchGroup.sortValues = new Object[sortFieldCount];
for(int sortFieldIDX=0;sortFieldIDX {
/** Matches the groupSort passed to {@link
* AbstractFirstPassGroupingCollector}. */
- public final Comparable[] groupSortValues;
+ public final Object[] groupSortValues;
public GroupDocs(float maxScore,
int totalHits,
ScoreDoc[] scoreDocs,
GROUP_VALUE_TYPE groupValue,
- Comparable[] groupSortValues) {
+ Object[] groupSortValues) {
this.maxScore = maxScore;
this.totalHits = totalHits;
this.scoreDocs = scoreDocs;
diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
index 11820da35f6..f2ecb42acc7 100644
--- a/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
+++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/SearchGroup.java
@@ -17,6 +17,14 @@ package org.apache.lucene.search.grouping;
* limitations under the License.
*/
+import java.io.IOException;
+import java.util.*;
+
+import org.apache.lucene.search.FieldComparator;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.util.BytesRef;
+
/**
* Represents a group that is found during the first pass search.
*
@@ -27,6 +35,287 @@ public class SearchGroup {
/** The value that defines this group */
public GROUP_VALUE_TYPE groupValue;
- /** The sort values used during sorting. Can be null
. */
- public Comparable[] sortValues;
+ /** The sort values used during sorting. These are the
+ * groupSort field values of the highest rank document
+ * (by the groupSort) within the group. Can be
+ * null
if fillFields=false
had
+ * been passed to {@link AbstractFirstPassGroupingCollector#getTopGroups} */
+ public Object[] sortValues;
+
+ @Override
+ public String toString() {
+ return("SearchGroup(groupValue=" + groupValue + " sortValues=" + Arrays.toString(sortValues) + ")");
+ }
+
+ private static class ShardIter {
+ public final Iterator> iter;
+ public final int shardIndex;
+
+ public ShardIter(Collection> shard, int shardIndex) {
+ this.shardIndex = shardIndex;
+ iter = shard.iterator();
+ assert iter.hasNext();
+ }
+
+ public SearchGroup next() {
+ assert iter.hasNext();
+ final SearchGroup group = iter.next();
+ if (group.sortValues == null) {
+ throw new IllegalArgumentException("group.sortValues is null; you must pass fillFields=true to the first pass collector");
+ }
+ return group;
+ }
+
+ @Override
+ public String toString() {
+ return "ShardIter(shard=" + shardIndex + ")";
+ }
+ }
+
+ // Holds all shards currently on the same group
+ private static class MergedGroup {
+
+ // groupValue may be null!
+ public final T groupValue;
+
+ public Object[] topValues;
+ public final List> shards = new ArrayList>();
+ public int minShardIndex;
+ public boolean processed;
+ public boolean inQueue;
+
+ public MergedGroup(T groupValue) {
+ this.groupValue = groupValue;
+ }
+
+ // Only for assert
+ private boolean neverEquals(Object _other) {
+ if (_other instanceof MergedGroup) {
+ MergedGroup other = (MergedGroup) _other;
+ if (groupValue == null) {
+ assert other.groupValue != null;
+ } else {
+ assert !groupValue.equals(other.groupValue);
+ }
+ }
+ return true;
+ }
+
+ @Override
+ public boolean equals(Object _other) {
+ // We never have another MergedGroup instance with
+ // same groupValue
+ assert neverEquals(_other);
+
+ if (_other instanceof MergedGroup) {
+ MergedGroup other = (MergedGroup) _other;
+ if (groupValue == null) {
+ return other == null;
+ } else {
+ return groupValue.equals(other);
+ }
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public int hashCode() {
+ if (groupValue == null) {
+ return 0;
+ } else {
+ return groupValue.hashCode();
+ }
+ }
+ }
+
+ private static class GroupComparator implements Comparator> {
+
+ public final FieldComparator[] comparators;
+ public final int[] reversed;
+
+ public GroupComparator(Sort groupSort) throws IOException {
+ final SortField[] sortFields = groupSort.getSort();
+ comparators = new FieldComparator[sortFields.length];
+ reversed = new int[sortFields.length];
+ for (int compIDX = 0; compIDX < sortFields.length; compIDX++) {
+ final SortField sortField = sortFields[compIDX];
+ comparators[compIDX] = sortField.getComparator(1, compIDX);
+ reversed[compIDX] = sortField.getReverse() ? -1 : 1;
+ }
+ }
+
+ @SuppressWarnings("unchecked")
+ public int compare(MergedGroup group, MergedGroup other) {
+ if (group == other) {
+ return 0;
+ }
+ //System.out.println("compare group=" + group + " other=" + other);
+ final Object[] groupValues = group.topValues;
+ final Object[] otherValues = other.topValues;
+ //System.out.println(" groupValues=" + groupValues + " otherValues=" + otherValues);
+ for (int compIDX = 0;compIDX < comparators.length; compIDX++) {
+ final int c = reversed[compIDX] * comparators[compIDX].compareValues(groupValues[compIDX],
+ otherValues[compIDX]);
+ if (c != 0) {
+ return c;
+ }
+ }
+
+ // Tie break by min shard index:
+ assert group.minShardIndex != other.minShardIndex;
+ return group.minShardIndex - other.minShardIndex;
+ }
+ }
+
+ private static class GroupMerger {
+
+ private final GroupComparator groupComp;
+ private final SortedSet> queue;
+ private final Map> groupsSeen;
+
+ public GroupMerger(Sort groupSort) throws IOException {
+ groupComp = new GroupComparator(groupSort);
+ queue = new TreeSet>(groupComp);
+ groupsSeen = new HashMap>();
+ }
+
+ @SuppressWarnings("unchecked")
+ private void updateNextGroup(int topN, ShardIter shard) {
+ while(shard.iter.hasNext()) {
+ final SearchGroup group = shard.next();
+ MergedGroup mergedGroup = groupsSeen.get(group.groupValue);
+ final boolean isNew = mergedGroup == null;
+ //System.out.println(" next group=" + (group.groupValue == null ? "null" : ((BytesRef) group.groupValue).utf8ToString()) + " sort=" + Arrays.toString(group.sortValues));
+
+ if (isNew) {
+ // Start a new group:
+ //System.out.println(" new");
+ mergedGroup = new MergedGroup(group.groupValue);
+ mergedGroup.minShardIndex = shard.shardIndex;
+ assert group.sortValues != null;
+ mergedGroup.topValues = group.sortValues;
+ groupsSeen.put(group.groupValue, mergedGroup);
+ mergedGroup.inQueue = true;
+ queue.add(mergedGroup);
+ } else if (mergedGroup.processed) {
+ // This shard produced a group that we already
+ // processed; move on to next group...
+ continue;
+ } else {
+ //System.out.println(" old");
+ boolean competes = false;
+ for(int compIDX=0;compIDX 0) {
+ // Definitely does not compete
+ break;
+ } else if (compIDX == groupComp.comparators.length-1) {
+ if (shard.shardIndex < mergedGroup.minShardIndex) {
+ competes = true;
+ }
+ }
+ }
+
+ //System.out.println(" competes=" + competes);
+
+ if (competes) {
+ // Group's sort changed -- remove & re-insert
+ if (mergedGroup.inQueue) {
+ queue.remove(mergedGroup);
+ }
+ mergedGroup.topValues = group.sortValues;
+ mergedGroup.minShardIndex = shard.shardIndex;
+ queue.add(mergedGroup);
+ mergedGroup.inQueue = true;
+ }
+ }
+
+ mergedGroup.shards.add(shard);
+ break;
+ }
+
+ // Prune un-competitive groups:
+ while(queue.size() > topN) {
+ // TODO java 1.6: .pollLast
+ final MergedGroup group = queue.last();
+ //System.out.println("PRUNE: " + group);
+ queue.remove(group);
+ group.inQueue = false;
+ }
+ }
+
+ public Collection> merge(List>> shards, int offset, int topN) {
+
+ final int maxQueueSize = offset + topN;
+
+ //System.out.println("merge");
+ // Init queue:
+ for(int shardIDX=0;shardIDX> shard = shards.get(shardIDX);
+ if (!shard.isEmpty()) {
+ //System.out.println(" insert shard=" + shardIDX);
+ updateNextGroup(maxQueueSize, new ShardIter(shard, shardIDX));
+ }
+ }
+
+ // Pull merged topN groups:
+ final List> newTopGroups = new ArrayList>();
+
+ int count = 0;
+
+ while(queue.size() != 0) {
+ // TODO Java 1.6: pollFirst()
+ final MergedGroup group = queue.first();
+ queue.remove(group);
+ group.processed = true;
+ //System.out.println(" pop: shards=" + group.shards + " group=" + (group.groupValue == null ? "null" : (((BytesRef) group.groupValue).utf8ToString())) + " sortValues=" + Arrays.toString(group.topValues));
+ if (count++ >= offset) {
+ final SearchGroup newGroup = new SearchGroup();
+ newGroup.groupValue = group.groupValue;
+ newGroup.sortValues = group.topValues;
+ newTopGroups.add(newGroup);
+ if (newTopGroups.size() == topN) {
+ break;
+ }
+ //} else {
+ // System.out.println(" skip < offset");
+ }
+
+ // Advance all iters in this group:
+ for(ShardIter shardIter : group.shards) {
+ updateNextGroup(maxQueueSize, shardIter);
+ }
+ }
+
+ if (newTopGroups.size() == 0) {
+ return null;
+ } else {
+ return newTopGroups;
+ }
+ }
+ }
+
+ /** Merges multiple collections of top groups, for example
+ * obtained from separate index shards. The provided
+ * groupSort must match how the groups were sorted, and
+ * the provided SearchGroups must have been computed
+ * with fillFields=true passed to {@link
+ * AbstractFirstPassGroupingCollector#getTopGroups}.
+ *
+ * NOTE: this returns null if the topGroups is empty.
+ */
+ public static Collection> merge(List>> topGroups, int offset, int topN, Sort groupSort)
+ throws IOException {
+ if (topGroups.size() == 0) {
+ return null;
+ } else {
+ return new GroupMerger(groupSort).merge(topGroups, offset, topN);
+ }
+ }
}
diff --git a/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java b/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
index a46aa410c20..c5c376e2474 100644
--- a/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
+++ b/modules/grouping/src/java/org/apache/lucene/search/grouping/TopGroups.java
@@ -1,7 +1,5 @@
package org.apache.lucene.search.grouping;
-import org.apache.lucene.search.SortField;
-
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
@@ -19,6 +17,13 @@ import org.apache.lucene.search.SortField;
* limitations under the License.
*/
+import java.io.IOException;
+
+import org.apache.lucene.search.ScoreDoc;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.lucene.search.TopDocs;
+
/** Represents result returned by a grouping search.
*
* @lucene.experimental */
@@ -58,4 +63,103 @@ public class TopGroups {
this.groups = oldTopGroups.groups;
this.totalGroupCount = totalGroupCount;
}
+
+ /** Merges an array of TopGroups, for example obtained
+ * from the second-pass collector across multiple
+ * shards. Each TopGroups must have been sorted by the
+ * same groupSort and docSort, and the top groups passed
+ * to all second-pass collectors must be the same.
+ *
+ * NOTE: this cannot merge totalGroupCount; ie the
+ * returned TopGroups will have null totalGroupCount.
+ *
+ * NOTE: the topDocs in each GroupDocs is actually
+ * an instance of TopDocsAndShards
+ */
+ public static TopGroups merge(TopGroups[] shardGroups, Sort groupSort, Sort docSort, int docOffset, int docTopN)
+ throws IOException {
+
+ //System.out.println("TopGroups.merge");
+
+ if (shardGroups.length == 0) {
+ return null;
+ }
+
+ int totalHitCount = 0;
+ int totalGroupedHitCount = 0;
+
+ final int numGroups = shardGroups[0].groups.length;
+ for(TopGroups shard : shardGroups) {
+ if (numGroups != shard.groups.length) {
+ throw new IllegalArgumentException("number of groups differs across shards; you must pass same top groups to all shards' second-pass collector");
+ }
+ totalHitCount += shard.totalHitCount;
+ totalGroupedHitCount += shard.totalGroupedHitCount;
+ }
+
+ @SuppressWarnings("unchecked")
+ final GroupDocs[] mergedGroupDocs = new GroupDocs[numGroups];
+
+ final TopDocs[] shardTopDocs = new TopDocs[shardGroups.length];
+
+ for(int groupIDX=0;groupIDX slowGrouping(GroupDoc[] groupDocs,
String searchTerm,
@@ -418,6 +417,38 @@ public class TestGrouping extends LuceneTestCase {
return r;
}
+ private static class ShardState {
+
+ public final ShardSearcher[] subSearchers;
+ public final int[] docStarts;
+
+ public ShardState(IndexSearcher s) {
+ IndexReader[] subReaders = s.getIndexReader().getSequentialSubReaders();
+ if (subReaders == null) {
+ subReaders = new IndexReader[] {s.getIndexReader()};
+ }
+ subSearchers = new ShardSearcher[subReaders.length];
+ final IndexReader.ReaderContext ctx = s.getTopReaderContext();
+ if (ctx instanceof IndexReader.AtomicReaderContext) {
+ assert subSearchers.length == 1;
+ subSearchers[0] = new ShardSearcher((IndexReader.AtomicReaderContext) ctx, ctx);
+ } else {
+ final IndexReader.CompositeReaderContext compCTX = (IndexReader.CompositeReaderContext) ctx;
+ for(int searcherIDX=0;searcherIDX> topGroups = c1.getTopGroups(groupOffset, fillFields);
final TopGroups groupsResult;
+ if (VERBOSE) {
+ System.out.println("TEST: topGroups:");
+ if (topGroups == null) {
+ System.out.println(" null");
+ } else {
+ for(SearchGroup groupx : topGroups) {
+ System.out.println(" " + groupToString(groupx.groupValue) + " sort=" + Arrays.toString(groupx.sortValues));
+ }
+ }
+ }
+
+ final TopGroups topGroupsShards = searchShards(s, shards.subSearchers, q, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
if (topGroups != null) {
@@ -734,7 +781,13 @@ public class TestGrouping extends LuceneTestCase {
}
}
}
- assertEquals(docIDToID, expectedGroups, groupsResult, true, getScores);
+ assertEquals(docIDToID, expectedGroups, groupsResult, true, true, true, getScores);
+
+ // Confirm merged shards match:
+ assertEquals(docIDToID, expectedGroups, topGroupsShards, true, false, fillFields, getScores);
+ if (topGroupsShards != null) {
+ verifyShards(shards.docStarts, topGroupsShards);
+ }
final boolean needsScores = getScores || getMaxScores || docSort == null;
final BlockGroupingCollector c3 = new BlockGroupingCollector(groupSort, groupOffset+topNGroups, needsScores, lastDocInBlock);
@@ -758,6 +811,8 @@ public class TestGrouping extends LuceneTestCase {
groupsResult2 = tempTopGroups2;
}
+ final TopGroups topGroupsBlockShards = searchShards(s2, shards2.subSearchers, q, groupSort, docSort, groupOffset, topNGroups, docOffset, docsPerGroup, getScores, getMaxScores);
+
if (expectedGroups != null) {
// Fixup scores for reader2
for (GroupDocs groupDocsHits : expectedGroups.groups) {
@@ -799,8 +854,11 @@ public class TestGrouping extends LuceneTestCase {
}
}
- assertEquals(docIDToID2, expectedGroups, groupsResult2, false, getScores);
+ assertEquals(docIDToID2, expectedGroups, groupsResult2, false, true, true, getScores);
+ assertEquals(docIDToID2, expectedGroups, topGroupsBlockShards, false, false, fillFields, getScores);
}
+ s.close();
+ s2.close();
} finally {
FieldCache.DEFAULT.purge(r);
if (r2 != null) {
@@ -816,7 +874,93 @@ public class TestGrouping extends LuceneTestCase {
}
}
- private void assertEquals(int[] docIDtoID, TopGroups expected, TopGroups actual, boolean verifyGroupValues, boolean testScores) {
+ private void verifyShards(int[] docStarts, TopGroups topGroups) {
+ for(GroupDocs group : topGroups.groups) {
+ for(int hitIDX=0;hitIDX> groups1, Collection> groups2, boolean doSortValues) {
+ assertEquals(groups1.size(), groups2.size());
+ final Iterator> iter1 = groups1.iterator();
+ final Iterator> iter2 = groups2.iterator();
+
+ while(iter1.hasNext()) {
+ assertTrue(iter2.hasNext());
+
+ SearchGroup group1 = iter1.next();
+ SearchGroup group2 = iter2.next();
+
+ assertEquals(group1.groupValue, group2.groupValue);
+ if (doSortValues) {
+ assertEquals(group1.sortValues, group2.sortValues);
+ }
+ }
+ assertFalse(iter2.hasNext());
+ }
+
+ private TopGroups searchShards(IndexSearcher topSearcher, ShardSearcher[] subSearchers, Query query, Sort groupSort, Sort docSort, int groupOffset, int topNGroups, int docOffset,
+ int topNDocs, boolean getScores, boolean getMaxScores) throws Exception {
+
+ // TODO: swap in caching, all groups collector here
+ // too...
+ if (VERBOSE) {
+ System.out.println("TEST: " + subSearchers.length + " shards: " + Arrays.toString(subSearchers));
+ }
+ // Run 1st pass collector to get top groups per shard
+ final Weight w = topSearcher.createNormalizedWeight(query);
+ final List>> shardGroups = new ArrayList>>();
+ for(int shardIDX=0;shardIDX> topGroups = c.getTopGroups(0, true);
+ if (topGroups != null) {
+ if (VERBOSE) {
+ System.out.println(" shard " + shardIDX + " s=" + subSearchers[shardIDX] + " " + topGroups.size() + " groups:");
+ for(SearchGroup group : topGroups) {
+ System.out.println(" " + groupToString(group.groupValue) + " sort=" + Arrays.toString(group.sortValues));
+ }
+ }
+ shardGroups.add(topGroups);
+ }
+ }
+
+ final Collection> mergedTopGroups = SearchGroup.merge(shardGroups, groupOffset, topNGroups, groupSort);
+ if (VERBOSE) {
+ System.out.println(" merged:");
+ if (mergedTopGroups == null) {
+ System.out.println(" null");
+ } else {
+ for(SearchGroup group : mergedTopGroups) {
+ System.out.println(" " + groupToString(group.groupValue) + " sort=" + Arrays.toString(group.sortValues));
+ }
+ }
+ }
+
+ if (mergedTopGroups != null) {
+
+ // Now 2nd pass:
+ @SuppressWarnings("unchecked")
+ final TopGroups[] shardTopGroups = new TopGroups[subSearchers.length];
+ for(int shardIDX=0;shardIDX
* This API is experimental and may change in the future.
*
- *
* @since solr 1.5
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinFileDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
index 7635c1acdaa..986404ca8cf 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinFileDataSource.java
@@ -37,7 +37,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.5
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinURLDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
index e3faac06c8e..fb8a47e7fd0 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/BinURLDataSource.java
@@ -32,7 +32,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.5
*/
public class BinURLDataSource extends DataSource{
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
index 18dc9c6f356..a2ef3b97dd7 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/CachedSqlEntityProcessor.java
@@ -32,7 +32,6 @@ import java.util.Map;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class CachedSqlEntityProcessor extends SqlEntityProcessor {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java
index 6840f65f85b..45aff148a11 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ClobTransformer.java
@@ -33,7 +33,6 @@ import java.util.Map;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.4
*/
public class ClobTransformer extends Transformer {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
index fbbb7ce8694..a2de2876ee5 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContentStreamDataSource.java
@@ -31,7 +31,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.4
*/
public class ContentStreamDataSource extends DataSource {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Context.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Context.java
index 4e517b6a0bb..d629d8f6e4d 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Context.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Context.java
@@ -35,7 +35,6 @@ import java.util.Map;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public abstract class Context {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
index c120945e9e1..87b1ae2be45 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ContextImpl.java
@@ -30,7 +30,6 @@ import java.util.concurrent.ConcurrentHashMap;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class ContextImpl extends Context {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataConfig.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataConfig.java
index 8fb9aeb32c5..ab958860acb 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataConfig.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataConfig.java
@@ -40,7 +40,6 @@ import java.util.*;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class DataConfig {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
index 3b039b5c60e..5da8b133a06 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImportHandler.java
@@ -63,7 +63,6 @@ import org.xml.sax.InputSource;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class DataImportHandler extends RequestHandlerBase implements
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
index 5fee9a65387..85ad0930c5d 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataImporter.java
@@ -39,6 +39,7 @@ import org.apache.commons.io.IOUtils;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
+import java.io.File;
import java.io.StringReader;
import java.text.SimpleDateFormat;
import java.util.*;
@@ -51,7 +52,6 @@ import java.util.concurrent.ConcurrentHashMap;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class DataImporter {
@@ -85,6 +85,8 @@ public class DataImporter {
private final Map coreScopeSession;
+ private boolean isDeltaImportSupported = false;
+
/**
* Only for testing purposes
*/
@@ -113,7 +115,9 @@ public class DataImporter {
initEntity(e, fields, false);
verifyWithSchema(fields);
identifyPk(e);
- }
+ if (e.allAttributes.containsKey(SqlEntityProcessor.DELTA_QUERY))
+ isDeltaImportSupported = true;
+ }
}
private void verifyWithSchema(Map fields) {
@@ -350,6 +354,7 @@ public class DataImporter {
try {
docBuilder = new DocBuilder(this, writer, requestParams);
+ checkWritablePersistFile(writer);
docBuilder.execute();
if (!requestParams.debug)
cumulativeStatistics.add(docBuilder.importStatistics);
@@ -364,6 +369,15 @@ public class DataImporter {
}
+ private void checkWritablePersistFile(SolrWriter writer) {
+ File persistFile = writer.getPersistFile();
+ boolean isWritable = persistFile.exists() ? persistFile.canWrite() : persistFile.getParentFile().canWrite();
+ if (isDeltaImportSupported && !isWritable) {
+ throw new DataImportHandlerException(SEVERE, persistFile.getAbsolutePath() +
+ " is not writable. Delta imports are supported by data config but will not work.");
+ }
+ }
+
public void doDeltaImport(SolrWriter writer, RequestParams requestParams) {
LOG.info("Starting Delta Import");
setStatus(Status.RUNNING_DELTA_DUMP);
@@ -371,6 +385,7 @@ public class DataImporter {
try {
setIndexStartTime(new Date());
docBuilder = new DocBuilder(this, writer, requestParams);
+ checkWritablePersistFile(writer);
docBuilder.execute();
if (!requestParams.debug)
cumulativeStatistics.add(docBuilder.importStatistics);
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataSource.java
index d63ecfb3042..e76e4ee0091 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DataSource.java
@@ -36,7 +36,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public abstract class DataSource {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
index 7100515d2fb..f093f973f1b 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DateFormatTransformer.java
@@ -37,7 +37,6 @@ import org.slf4j.LoggerFactory;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class DateFormatTransformer extends Transformer {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
index f177e9b7c06..4b2ebafafba 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DebugLogger.java
@@ -39,7 +39,6 @@ import java.util.Stack;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
class DebugLogger {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
index 82265d143fd..a1091493e58 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DocBuilder.java
@@ -37,7 +37,6 @@ import java.util.concurrent.*;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class DocBuilder {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
index 9c7d96370c1..f285be31273 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
@@ -36,7 +36,6 @@ import java.util.Map;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public abstract class EntityProcessor {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
index 631a0d448d3..9aaa5374841 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorBase.java
@@ -29,7 +29,6 @@ import java.util.*;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.3
*/
public class EntityProcessorBase extends EntityProcessor {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
index c022355da9d..c85dec109a8 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessorWrapper.java
@@ -32,7 +32,6 @@ import java.util.Map;
/**
* A Wrapper over {@link EntityProcessor} instance which performs transforms and handles multi-row outputs correctly.
*
- *
* @since solr 1.4
*/
public class EntityProcessorWrapper extends EntityProcessor {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java
index 9732ca70ed0..3393ad4ace2 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/Evaluator.java
@@ -30,7 +30,6 @@ package org.apache.solr.handler.dataimport;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public abstract class Evaluator {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
index 95b81c76780..076734e2d95 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EvaluatorBag.java
@@ -41,7 +41,6 @@ import java.util.regex.Pattern;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class EvaluatorBag {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EventListener.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EventListener.java
index fd9e918d231..b3cbee20ae8 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EventListener.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EventListener.java
@@ -21,7 +21,6 @@ package org.apache.solr.handler.dataimport;
*
* This API is experimental and subject to change
*
- *
* @since solr 1.4
*/
public interface EventListener {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
index 6f8f7b4c99a..0fa0ddbbf94 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldReaderDataSource.java
@@ -42,7 +42,6 @@ import java.util.Properties;
*
* Supports String, BLOB, CLOB data types and there is an extra field (in the entity) 'encoding' for BLOB types
*
- *
* @since 1.4
*/
public class FieldReaderDataSource extends DataSource {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
index c8f5b1ac894..252e8f08cc6 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FieldStreamDataSource.java
@@ -43,7 +43,6 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
* This may be used with any {@link EntityProcessor} which uses a {@link DataSource}<{@link InputStream}> eg: {@link TikaEntityProcessor}
*
*
- *
* @since 3.1
*/
public class FieldStreamDataSource extends DataSource {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileDataSource.java
index bbc5ed94b93..ca37c7368bc 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileDataSource.java
@@ -41,7 +41,6 @@ import static org.apache.solr.handler.dataimport.DataImportHandlerException.SEVE
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class FileDataSource extends DataSource {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
index 7cea6f711d0..520943f4e04 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/FileListEntityProcessor.java
@@ -50,7 +50,6 @@ import java.util.regex.Pattern;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
* @see Pattern
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HTMLStripTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HTMLStripTransformer.java
index d02df3f7d92..526976c4cc2 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HTMLStripTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HTMLStripTransformer.java
@@ -30,7 +30,6 @@ import java.util.Map;
* A {@link Transformer} implementation which strip off HTML tags using {@link HTMLStripCharFilter} This is useful
* in case you don't need this HTML anyway.
*
- *
* @see HTMLStripCharFilter
* @since solr 1.4
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HttpDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HttpDataSource.java
index fb30439fd00..a65831056db 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HttpDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/HttpDataSource.java
@@ -30,7 +30,6 @@ package org.apache.solr.handler.dataimport;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
* @deprecated use {@link org.apache.solr.handler.dataimport.URLDataSource} instead
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
index e7c13ebd730..df4b33f06a7 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/JdbcDataSource.java
@@ -33,7 +33,6 @@ import java.util.concurrent.Callable;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class JdbcDataSource extends
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
index 1921204f509..e526e83c800 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
@@ -52,7 +52,6 @@ import org.apache.commons.io.IOUtils;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.4
* @see Pattern
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LogTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LogTransformer.java
index 2e0052f6c43..704a03cba4d 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LogTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LogTransformer.java
@@ -29,7 +29,6 @@ import java.util.Map;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.4
*/
public class LogTransformer extends Transformer {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java
index 9e1c3ee21b6..32048eaa1f1 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/MockDataSource.java
@@ -28,7 +28,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class MockDataSource extends
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
index 35b3aa45902..754e39b198d 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/NumberFormatTransformer.java
@@ -42,7 +42,6 @@ import java.util.regex.Pattern;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class NumberFormatTransformer extends Transformer {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
index 2ae8a72e3d6..b88c85fc1fa 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/PlainTextEntityProcessor.java
@@ -33,7 +33,6 @@ import java.util.Map;
* An implementation of {@link EntityProcessor} which reads data from a url/file and give out a row which contains one String
* value. The name of the field is 'plainText'.
*
- *
* @since solr 1.4
*/
public class PlainTextEntityProcessor extends EntityProcessorBase {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java
index a9bb9055c88..d680c9d0ffb 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/RegexTransformer.java
@@ -36,7 +36,6 @@ import java.util.regex.Pattern;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
* @see Pattern
*/
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ScriptTransformer.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
index 39cb4331153..a4ea3afad3a 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/ScriptTransformer.java
@@ -37,7 +37,6 @@ import java.util.Map;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class ScriptTransformer extends Transformer {
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
index ec3dd1e6255..e7bbb6c000f 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
@@ -34,7 +34,6 @@ import java.util.Properties;
*
* This API is experimental and may change in the future.
*
- *
* @since solr 1.3
*/
public class SolrWriter {
@@ -100,13 +99,10 @@ public class SolrWriter {
try {
props.putAll(p);
- String filePath = configDir;
- if (configDir != null && !configDir.endsWith(File.separator))
- filePath += File.separator;
- filePath += persistFilename;
- propOutput = new FileOutputStream(filePath);
+ File persistFile = getPersistFile();
+ propOutput = new FileOutputStream(persistFile);
props.store(propOutput, null);
- log.info("Wrote last indexed time to " + persistFilename);
+ log.info("Wrote last indexed time to " + persistFile.getAbsolutePath());
} catch (FileNotFoundException e) {
throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
"Unable to persist Index Start Time", e);
@@ -123,6 +119,14 @@ public class SolrWriter {
}
}
+ File getPersistFile() {
+ String filePath = configDir;
+ if (configDir != null && !configDir.endsWith(File.separator))
+ filePath += File.separator;
+ filePath += persistFilename;
+ return new File(filePath);
+ }
+
void finish() {
try {
processor.finish();
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta.java
index 60f1ae5ecd0..4cddebaa2e2 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSqlEntityProcessorDelta.java
@@ -20,6 +20,8 @@ import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import java.io.File;
+import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -92,7 +94,37 @@ public class TestSqlEntityProcessorDelta extends AbstractDataImportHandlerTestCa
public void testCompositePk_FullImport() throws Exception {
add1document();
}
-
+
+ @Test
+ @SuppressWarnings("unchecked")
+ public void testNonWritablePersistFile() throws Exception {
+ // See SOLR-2551
+ String configDir = h.getCore().getResourceLoader().getConfigDir();
+ String filePath = configDir;
+ if (configDir != null && !configDir.endsWith(File.separator))
+ filePath += File.separator;
+ filePath += "dataimport.properties";
+ File f = new File(filePath);
+ // execute the test only if we are able to set file to read only mode
+ if ((f.exists() || f.createNewFile()) && f.setReadOnly()) {
+ try {
+ List parentRow = new ArrayList();
+ parentRow.add(createMap("id", "1"));
+ MockDataSource.setIterator(FULLIMPORT_QUERY, parentRow.iterator());
+
+ List childRow = new ArrayList();
+ childRow.add(createMap("desc", "hello"));
+ MockDataSource.setIterator("select * from y where y.A='1'", childRow
+ .iterator());
+
+ runFullImport(dataConfig_delta);
+ assertQ(req("id:1"), "//*[@numFound='0']");
+ } finally {
+ f.setWritable(true);
+ }
+ }
+ }
+
// WORKS
@Test
diff --git a/solr/contrib/uima/src/test-files/solr-uima/conf/solrconfig.xml b/solr/contrib/uima/src/test-files/solr-uima/conf/solrconfig.xml
index 192728d4c65..bdcf94937cc 100644
--- a/solr/contrib/uima/src/test-files/solr-uima/conf/solrconfig.xml
+++ b/solr/contrib/uima/src/test-files/solr-uima/conf/solrconfig.xml
@@ -24,9 +24,7 @@
https://issues.apache.org/jira/browse/SOLR-1167
-->
-
- LUCENE_40
-
+ ${tests.luceneMatchVersion:LUCENE_CURRENT}
+ id
false
diff --git a/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java b/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
index 008bc91ca17..1ce4993bdea 100644
--- a/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
+++ b/solr/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
@@ -476,8 +476,8 @@ class ElevationComparatorSource extends FieldComparatorSource {
}
@Override
- public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
- return new FieldComparator() {
+ public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+ return new FieldComparator() {
FieldCache.DocTermsIndex idIndex;
private final int[] values = new int[numHits];
@@ -517,7 +517,7 @@ class ElevationComparatorSource extends FieldComparatorSource {
}
@Override
- public Comparable value(int slot) {
+ public Integer value(int slot) {
return values[slot];
}
};
diff --git a/solr/src/java/org/apache/solr/response/transform/ValueSourceAugmenter.java b/solr/src/java/org/apache/solr/response/transform/ValueSourceAugmenter.java
index 89d9b975d06..406df22af5c 100644
--- a/solr/src/java/org/apache/solr/response/transform/ValueSourceAugmenter.java
+++ b/solr/src/java/org/apache/solr/response/transform/ValueSourceAugmenter.java
@@ -59,12 +59,17 @@ public class ValueSourceAugmenter extends DocTransformer
@Override
public void setContext( TransformContext context ) {
- IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
- readerContexts = reader.getTopReaderContext().leaves();
- docValuesArr = new DocValues[readerContexts.length];
+ try {
+ IndexReader reader = qparser.getReq().getSearcher().getIndexReader();
+ readerContexts = reader.getTopReaderContext().leaves();
+ docValuesArr = new DocValues[readerContexts.length];
- searcher = qparser.getReq().getSearcher();
- this.fcontext = ValueSource.newContext(searcher);
+ searcher = qparser.getReq().getSearcher();
+ fcontext = ValueSource.newContext(searcher);
+ this.valueSource.createWeight(fcontext, searcher);
+ } catch (IOException e) {
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
+ }
}
diff --git a/solr/src/java/org/apache/solr/schema/RandomSortField.java b/solr/src/java/org/apache/solr/schema/RandomSortField.java
index 80353fd567c..872c67c50ef 100644
--- a/solr/src/java/org/apache/solr/schema/RandomSortField.java
+++ b/solr/src/java/org/apache/solr/schema/RandomSortField.java
@@ -102,8 +102,8 @@ public class RandomSortField extends FieldType {
private static FieldComparatorSource randomComparatorSource = new FieldComparatorSource() {
@Override
- public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
- return new FieldComparator() {
+ public FieldComparator newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
+ return new FieldComparator() {
int seed;
private final int[] values = new int[numHits];
int bottomVal;
@@ -135,7 +135,7 @@ public class RandomSortField extends FieldType {
}
@Override
- public Comparable value(int slot) {
+ public Integer value(int slot) {
return values[slot];
}
};
diff --git a/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java b/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
index a9d0cb0c0d7..ca5ff34ef58 100644
--- a/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
+++ b/solr/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
@@ -56,7 +56,7 @@ public class MissingStringLastComparatorSource extends FieldComparatorSource {
// Copied from Lucene's TermOrdValComparator and modified since the Lucene version couldn't
// be extended.
-class TermOrdValComparator_SML extends FieldComparator {
+class TermOrdValComparator_SML extends FieldComparator {
private static final int NULL_ORD = Integer.MAX_VALUE;
private final int[] ords;
@@ -98,7 +98,7 @@ class TermOrdValComparator_SML extends FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
throw new UnsupportedOperationException();
}
@@ -111,7 +111,7 @@ class TermOrdValComparator_SML extends FieldComparator {
// ords) per-segment comparator. NOTE: this is messy;
// we do this only because hotspot can't reliably inline
// the underlying array access when looking up doc->ord
- private static abstract class PerSegmentComparator extends FieldComparator {
+ private static abstract class PerSegmentComparator extends FieldComparator {
protected TermOrdValComparator_SML parent;
protected final int[] ords;
protected final BytesRef[] values;
@@ -199,7 +199,7 @@ class TermOrdValComparator_SML extends FieldComparator {
}
@Override
- public Comparable> value(int slot) {
+ public BytesRef value(int slot) {
return values==null ? parent.NULL_VAL : values[slot];
}
}
diff --git a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
index c065aef4318..ef26ad6daa4 100755
--- a/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
+++ b/solr/src/java/org/apache/solr/search/function/BoostedQuery.java
@@ -67,7 +67,7 @@ public class BoostedQuery extends Query {
public BoostedWeight(IndexSearcher searcher) throws IOException {
this.searcher = searcher;
- this.qWeight = q.weight(searcher);
+ this.qWeight = q.createWeight(searcher);
this.fcontext = boostVal.newContext(searcher);
boostVal.createWeight(fcontext,searcher);
}
diff --git a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
index 01cf3700c73..f0c15c6f929 100755
--- a/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/QueryValueSource.java
@@ -68,7 +68,7 @@ public class QueryValueSource extends ValueSource {
@Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
- Weight w = q.weight(searcher);
+ Weight w = searcher.createNormalizedWeight(q);
context.put(this, w);
}
}
@@ -98,7 +98,7 @@ class QueryDocValues extends FloatDocValues {
this.q = vs.q;
this.fcontext = fcontext;
- Weight w = fcontext==null ? null : (Weight)fcontext.get(q);
+ Weight w = fcontext==null ? null : (Weight)fcontext.get(vs);
if (w == null) {
IndexSearcher weightSearcher;
if(fcontext == null) {
@@ -109,7 +109,8 @@ class QueryDocValues extends FloatDocValues {
weightSearcher = new IndexSearcher(ReaderUtil.getTopLevelContext(readerContext));
}
}
- w = q.weight(weightSearcher);
+ vs.createWeight(fcontext, weightSearcher);
+ w = (Weight)fcontext.get(vs);
}
weight = w;
}
diff --git a/solr/src/java/org/apache/solr/search/function/ValueSource.java b/solr/src/java/org/apache/solr/search/function/ValueSource.java
index 52189c05bf1..367e8a7ec7b 100644
--- a/solr/src/java/org/apache/solr/search/function/ValueSource.java
+++ b/solr/src/java/org/apache/solr/search/function/ValueSource.java
@@ -141,7 +141,7 @@ public abstract class ValueSource implements Serializable {
* off of the {@link org.apache.solr.search.function.DocValues} for a ValueSource
* instead of the normal Lucene FieldComparator that works off of a FieldCache.
*/
- class ValueSourceComparator extends FieldComparator {
+ class ValueSourceComparator extends FieldComparator {
private final double[] values;
private DocValues docVals;
private double bottom;
@@ -195,7 +195,7 @@ public abstract class ValueSource implements Serializable {
}
@Override
- public Comparable value(int slot) {
+ public Double value(int slot) {
return values[slot];
}
}