mirror of https://github.com/apache/lucene.git
Merge remote-tracking branch 'origin/branch_6x' into branch_6x
Conflicts: solr/CHANGES.txt
This commit is contained in:
commit
501d73b4b8
|
@ -5,8 +5,15 @@ http://s.apache.org/luceneversions
|
|||
|
||||
======================= Lucene 6.2.0 =======================
|
||||
|
||||
API Changes
|
||||
|
||||
* ScoringWrapperSpans was removed since it had no purpose or effect as of Lucene 5.5.
|
||||
|
||||
New Features
|
||||
|
||||
* LUCENE-7381: Add point based DoubleRangeField and RangeFieldQuery for
|
||||
indexing and querying on Ranges up to 4 dimensions (Nick Knize)
|
||||
|
||||
* LUCENE-6968: LSH Filter (Tommaso Teofili, Andy Hind, Cao Manh Dat)
|
||||
|
||||
* LUCENE-7302: IndexWriter methods that change the index now return a
|
||||
|
@ -44,6 +51,12 @@ Bug Fixes
|
|||
wrong default AttributeFactory for new Tokenizers.
|
||||
(Terry Smith, Uwe Schindler)
|
||||
|
||||
* LUCENE-7389: Fix FieldType.setDimensions(...) validation for the dimensionNumBytes
|
||||
parameter. (Martijn van Groningen)
|
||||
|
||||
* LUCENE-7391: Fix performance regression in MemoryIndex's fields() introduced
|
||||
in Lucene 6. (Steve Mason via David Smiley)
|
||||
|
||||
Improvements
|
||||
|
||||
* LUCENE-7323: Compound file writing now verifies the incoming
|
||||
|
@ -97,6 +110,11 @@ Improvements
|
|||
|
||||
* LUCENE-7385: Improve/fix assert messages in SpanScorer. (David Smiley)
|
||||
|
||||
* LUCENE-7390: Improve performance of indexing points by allowing the
|
||||
codec to use transient heap in proportion to IndexWriter's RAM
|
||||
buffer, instead of a fixed 16.0 MB. A custom codec can still
|
||||
override the buffer size itself. (Mike McCandless)
|
||||
|
||||
Optimizations
|
||||
|
||||
* LUCENE-7330, LUCENE-7339: Speed up conjunction queries. (Adrien Grand)
|
||||
|
@ -108,6 +126,9 @@ Optimizations
|
|||
* LUCENE-7371: Point values are now better compressed using run-length
|
||||
encoding. (Adrien Grand)
|
||||
|
||||
* LUCENE-7311: Cached term queries do not seek the terms dictionary anymore.
|
||||
(Adrien Grand)
|
||||
|
||||
Other
|
||||
|
||||
* LUCENE-4787: Fixed some highlighting javadocs. (Michael Dodsworth via Adrien
|
||||
|
@ -123,6 +144,9 @@ Other
|
|||
* LUCENE-7372: Factor out an org.apache.lucene.search.FilterWeight class.
|
||||
(Christine Poerschke, Adrien Grand, David Smiley)
|
||||
|
||||
* LUCENE-7384: Removed ScoringWrapperSpans. And tweaked SpanWeight.buildSimWeight() to
|
||||
reuse the existing Similarity instead of creating a new one. (David Smiley)
|
||||
|
||||
======================= Lucene 6.1.0 =======================
|
||||
|
||||
New Features
|
||||
|
|
|
@ -68,7 +68,7 @@ class SimpleTextPointsWriter extends PointsWriter {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException {
|
||||
|
||||
boolean singleValuePerDoc = values.size(fieldInfo.name) == values.getDocCount(fieldInfo.name);
|
||||
|
||||
|
@ -79,7 +79,7 @@ class SimpleTextPointsWriter extends PointsWriter {
|
|||
fieldInfo.getPointDimensionCount(),
|
||||
fieldInfo.getPointNumBytes(),
|
||||
BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE,
|
||||
BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP,
|
||||
maxMBSortInHeap,
|
||||
values.size(fieldInfo.name),
|
||||
singleValuePerDoc) {
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/** Abstract API to write points
|
||||
*
|
||||
|
@ -34,8 +35,9 @@ public abstract class PointsWriter implements Closeable {
|
|||
protected PointsWriter() {
|
||||
}
|
||||
|
||||
/** Write all values contained in the provided reader */
|
||||
public abstract void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException;
|
||||
/** Write all values contained in the provided reader. {@code maxMBSortInHeap} is the maximum
|
||||
* transient heap that can be used to sort values, before spilling to disk for offline sorting */
|
||||
public abstract void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException;
|
||||
|
||||
/** Default naive merge implementation for one field: it just re-indexes all the values
|
||||
* from the incoming segment. The default codec overrides this for 1D fields and uses
|
||||
|
@ -145,7 +147,10 @@ public abstract class PointsWriter implements Closeable {
|
|||
public int getDocCount(String fieldName) {
|
||||
return finalDocCount;
|
||||
}
|
||||
});
|
||||
},
|
||||
// TODO: also let merging of > 1D fields tap into IW's indexing buffer size, somehow (1D fields do an optimized merge sort
|
||||
// and don't need heap)
|
||||
BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP);
|
||||
}
|
||||
|
||||
/** Default merge implementation to merge incoming points readers by visiting all their points and
|
||||
|
|
|
@ -39,7 +39,9 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.bkd.BKDReader;
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/** Writes dimensional values */
|
||||
/** Writes dimensional values
|
||||
*
|
||||
* @lucene.experimental */
|
||||
public class Lucene60PointsWriter extends PointsWriter implements Closeable {
|
||||
|
||||
/** Output used to write the BKD tree data file */
|
||||
|
@ -50,15 +52,13 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
|
|||
|
||||
final SegmentWriteState writeState;
|
||||
final int maxPointsInLeafNode;
|
||||
final double maxMBSortInHeap;
|
||||
private boolean finished;
|
||||
|
||||
/** Full constructor */
|
||||
public Lucene60PointsWriter(SegmentWriteState writeState, int maxPointsInLeafNode, double maxMBSortInHeap) throws IOException {
|
||||
public Lucene60PointsWriter(SegmentWriteState writeState, int maxPointsInLeafNode) throws IOException {
|
||||
assert writeState.fieldInfos.hasPointValues();
|
||||
this.writeState = writeState;
|
||||
this.maxPointsInLeafNode = maxPointsInLeafNode;
|
||||
this.maxMBSortInHeap = maxMBSortInHeap;
|
||||
String dataFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name,
|
||||
writeState.segmentSuffix,
|
||||
Lucene60PointsFormat.DATA_EXTENSION);
|
||||
|
@ -80,11 +80,11 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
|
|||
|
||||
/** Uses the defaults values for {@code maxPointsInLeafNode} (1024) and {@code maxMBSortInHeap} (16.0) */
|
||||
public Lucene60PointsWriter(SegmentWriteState writeState) throws IOException {
|
||||
this(writeState, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP);
|
||||
this(writeState, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException {
|
||||
|
||||
boolean singleValuePerDoc = values.size(fieldInfo.name) == values.getDocCount(fieldInfo.name);
|
||||
|
||||
|
@ -173,7 +173,8 @@ public class Lucene60PointsWriter extends PointsWriter implements Closeable {
|
|||
fieldInfo.getPointDimensionCount(),
|
||||
fieldInfo.getPointNumBytes(),
|
||||
maxPointsInLeafNode,
|
||||
maxMBSortInHeap,
|
||||
// NOTE: not used, since BKDWriter.merge does a merge sort:
|
||||
BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP,
|
||||
totMaxSize,
|
||||
singleValuePerDoc)) {
|
||||
List<BKDReader> bkdReaders = new ArrayList<>();
|
||||
|
|
|
@ -374,7 +374,7 @@ public class FieldType implements IndexableFieldType {
|
|||
if (dimensionNumBytes < 0) {
|
||||
throw new IllegalArgumentException("dimensionNumBytes must be >= 0; got " + dimensionNumBytes);
|
||||
}
|
||||
if (dimensionCount > PointValues.MAX_NUM_BYTES) {
|
||||
if (dimensionNumBytes > PointValues.MAX_NUM_BYTES) {
|
||||
throw new IllegalArgumentException("dimensionNumBytes must be <= " + PointValues.MAX_NUM_BYTES + "; got " + dimensionNumBytes);
|
||||
}
|
||||
if (dimensionCount == 0) {
|
||||
|
|
|
@ -153,7 +153,7 @@ class DocumentsWriterPerThread {
|
|||
final Allocator byteBlockAllocator;
|
||||
final IntBlockPool.Allocator intBlockAllocator;
|
||||
private final AtomicLong pendingNumDocs;
|
||||
private final LiveIndexWriterConfig indexWriterConfig;
|
||||
final LiveIndexWriterConfig indexWriterConfig;
|
||||
private final boolean enableTestPoints;
|
||||
private final IndexWriter indexWriter;
|
||||
|
||||
|
|
|
@ -762,7 +762,7 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
|
|||
* {@link #getConfig()}.
|
||||
*
|
||||
* <p>
|
||||
* <b>NOTE:</b> after ths writer is created, the given configuration instance
|
||||
* <b>NOTE:</b> after this writer is created, the given configuration instance
|
||||
* cannot be passed to another writer.
|
||||
*
|
||||
* @param d
|
||||
|
|
|
@ -168,9 +168,14 @@ public class LiveIndexWriterConfig {
|
|||
|
||||
/**
|
||||
* Determines the amount of RAM that may be used for buffering added documents
|
||||
* and deletions before they are flushed to the Directory. Generally for
|
||||
* faster indexing performance it's best to flush by RAM usage instead of
|
||||
* document count and use as large a RAM buffer as you can.
|
||||
* and deletions before beginning to flush them to the Directory. For
|
||||
* faster indexing performance it's best to use as large a RAM buffer as you can.
|
||||
* <p>
|
||||
* Note that this setting is not a hard limit on memory usage during indexing, as
|
||||
* transient and non-trivial memory well beyond this buffer size may be used,
|
||||
* for example due to segment merges or writing points to new segments.
|
||||
* For application stability the available memory in the JVM
|
||||
* should be significantly larger than the RAM buffer used for indexing.
|
||||
* <p>
|
||||
* When this is set, the writer will flush whenever buffered documents and
|
||||
* deletions use this much RAM. Pass in
|
||||
|
@ -178,14 +183,6 @@ public class LiveIndexWriterConfig {
|
|||
* due to RAM usage. Note that if flushing by document count is also enabled,
|
||||
* then the flush will be triggered by whichever comes first.
|
||||
* <p>
|
||||
* The maximum RAM limit is inherently determined by the JVMs available
|
||||
* memory. Yet, an {@link IndexWriter} session can consume a significantly
|
||||
* larger amount of memory than the given RAM limit since this limit is just
|
||||
* an indicator when to flush memory resident documents to the Directory.
|
||||
* Flushes are likely happen concurrently while other threads adding documents
|
||||
* to the writer. For application stability the available memory in the JVM
|
||||
* should be significantly larger than the RAM buffer used for indexing.
|
||||
* <p>
|
||||
* <b>NOTE</b>: the account of RAM usage for pending deletions is only
|
||||
* approximate. Specifically, if you delete by Query, Lucene currently has no
|
||||
* way to measure the RAM usage of individual Queries so the accounting will
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.util.ArrayUtil;
|
|||
import org.apache.lucene.util.ByteBlockPool;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.Counter;
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/** Buffers up pending byte[][] value(s) per doc, then flushes when segment flushes. */
|
||||
class PointValuesWriter {
|
||||
|
@ -35,6 +36,7 @@ class PointValuesWriter {
|
|||
private int numDocs;
|
||||
private int lastDocID = -1;
|
||||
private final byte[] packedValue;
|
||||
private final LiveIndexWriterConfig indexWriterConfig;
|
||||
|
||||
public PointValuesWriter(DocumentsWriterPerThread docWriter, FieldInfo fieldInfo) {
|
||||
this.fieldInfo = fieldInfo;
|
||||
|
@ -43,6 +45,7 @@ class PointValuesWriter {
|
|||
docIDs = new int[16];
|
||||
iwBytesUsed.addAndGet(16 * Integer.BYTES);
|
||||
packedValue = new byte[fieldInfo.getPointDimensionCount() * fieldInfo.getPointNumBytes()];
|
||||
indexWriterConfig = docWriter.indexWriterConfig;
|
||||
}
|
||||
|
||||
// TODO: if exactly the same value is added to exactly the same doc, should we dedup?
|
||||
|
@ -124,6 +127,7 @@ class PointValuesWriter {
|
|||
public int getDocCount(String fieldName) {
|
||||
return numDocs;
|
||||
}
|
||||
});
|
||||
},
|
||||
Math.max(indexWriterConfig.getRAMBufferSizeMB()/8.0, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,9 +38,6 @@ import java.io.IOException;
|
|||
* <li> {@link #compareBottom} Compare a new hit (docID)
|
||||
* against the "weakest" (bottom) entry in the queue.
|
||||
*
|
||||
* <li> {@link #compareBottom} Compare a new hit (docID)
|
||||
* against the "weakest" (bottom) entry in the queue.
|
||||
*
|
||||
* <li> {@link #compareTop} Compare a new hit (docID)
|
||||
* against the top value previously set by a call to
|
||||
* {@link FieldComparator#setTopValue}.
|
||||
|
@ -95,8 +92,8 @@ public interface LeafFieldComparator {
|
|||
*
|
||||
* @param doc that was hit
|
||||
* @return any {@code N < 0} if the doc's value is sorted after
|
||||
* the bottom entry (not competitive), any {@code N > 0} if the
|
||||
* doc's value is sorted before the bottom entry and {@code 0} if
|
||||
* the top entry (not competitive), any {@code N > 0} if the
|
||||
* doc's value is sorted before the top entry and {@code 0} if
|
||||
* they are equal.
|
||||
*/
|
||||
int compareTop(int doc) throws IOException;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.index.ReaderUtil;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.similarities.Similarity.SimScorer;
|
||||
|
@ -51,8 +52,10 @@ public class TermQuery extends Query {
|
|||
public TermWeight(IndexSearcher searcher, boolean needsScores, TermContext termStates)
|
||||
throws IOException {
|
||||
super(TermQuery.this);
|
||||
if (needsScores && termStates == null) {
|
||||
throw new IllegalStateException("termStates are required when scores are needed");
|
||||
}
|
||||
this.needsScores = needsScores;
|
||||
assert termStates != null : "TermContext must not be null";
|
||||
this.termStates = termStates;
|
||||
this.similarity = searcher.getSimilarity(needsScores);
|
||||
|
||||
|
@ -62,12 +65,10 @@ public class TermQuery extends Query {
|
|||
collectionStats = searcher.collectionStatistics(term.field());
|
||||
termStats = searcher.termStatistics(term, termStates);
|
||||
} else {
|
||||
// do not bother computing actual stats, scores are not needed
|
||||
// we do not need the actual stats, use fake stats with docFreq=maxDoc and ttf=-1
|
||||
final int maxDoc = searcher.getIndexReader().maxDoc();
|
||||
final int docFreq = termStates.docFreq();
|
||||
final long totalTermFreq = termStates.totalTermFreq();
|
||||
collectionStats = new CollectionStatistics(term.field(), maxDoc, -1, -1, -1);
|
||||
termStats = new TermStatistics(term.bytes(), docFreq, totalTermFreq);
|
||||
termStats = new TermStatistics(term.bytes(), maxDoc, -1);
|
||||
}
|
||||
|
||||
this.stats = similarity.computeWeight(collectionStats, termStats);
|
||||
|
@ -95,7 +96,7 @@ public class TermQuery extends Query {
|
|||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
|
||||
assert termStates == null || termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);;
|
||||
final TermsEnum termsEnum = getTermsEnum(context);
|
||||
if (termsEnum == null) {
|
||||
return null;
|
||||
|
@ -110,17 +111,30 @@ public class TermQuery extends Query {
|
|||
* the term does not exist in the given context
|
||||
*/
|
||||
private TermsEnum getTermsEnum(LeafReaderContext context) throws IOException {
|
||||
final TermState state = termStates.get(context.ord);
|
||||
if (state == null) { // term is not present in that reader
|
||||
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
|
||||
return null;
|
||||
if (termStates != null) {
|
||||
// TermQuery either used as a Query or the term states have been provided at construction time
|
||||
assert termStates.topReaderContext == ReaderUtil.getTopLevelContext(context) : "The top-reader used to create Weight (" + termStates.topReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
|
||||
final TermState state = termStates.get(context.ord);
|
||||
if (state == null) { // term is not present in that reader
|
||||
assert termNotInReader(context.reader(), term) : "no termstate found but term exists in reader term=" + term;
|
||||
return null;
|
||||
}
|
||||
final TermsEnum termsEnum = context.reader().terms(term.field()).iterator();
|
||||
termsEnum.seekExact(term.bytes(), state);
|
||||
return termsEnum;
|
||||
} else {
|
||||
// TermQuery used as a filter, so the term states have not been built up front
|
||||
Terms terms = context.reader().terms(term.field());
|
||||
if (terms == null) {
|
||||
return null;
|
||||
}
|
||||
final TermsEnum termsEnum = terms.iterator();
|
||||
if (termsEnum.seekExact(term.bytes())) {
|
||||
return termsEnum;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
// System.out.println("LD=" + reader.getLiveDocs() + " set?=" +
|
||||
// (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null"));
|
||||
final TermsEnum termsEnum = context.reader().terms(term.field())
|
||||
.iterator();
|
||||
termsEnum.seekExact(term.bytes(), state);
|
||||
return termsEnum;
|
||||
}
|
||||
|
||||
private boolean termNotInReader(LeafReader reader, Term term) throws IOException {
|
||||
|
@ -178,9 +192,15 @@ public class TermQuery extends Query {
|
|||
final TermContext termState;
|
||||
if (perReaderTermState == null
|
||||
|| perReaderTermState.topReaderContext != context) {
|
||||
// make TermQuery single-pass if we don't have a PRTS or if the context
|
||||
// differs!
|
||||
termState = TermContext.build(context, term);
|
||||
if (needsScores) {
|
||||
// make TermQuery single-pass if we don't have a PRTS or if the context
|
||||
// differs!
|
||||
termState = TermContext.build(context, term);
|
||||
} else {
|
||||
// do not compute the term state, this will help save seeks in the terms
|
||||
// dict on segments that have a cache entry for this query
|
||||
termState = null;
|
||||
}
|
||||
} else {
|
||||
// PRTS was pre-build for this IS
|
||||
termState = this.perReaderTermState;
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search.spans;
|
||||
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.apache.lucene.search.TwoPhaseIterator;
|
||||
|
||||
/**
|
||||
* A Spans that wraps another Spans with a different SimScorer
|
||||
*/
|
||||
public class ScoringWrapperSpans extends Spans {
|
||||
|
||||
private final Spans in;
|
||||
|
||||
/**
|
||||
* Creates a new ScoringWrapperSpans
|
||||
* @param spans the scorer to wrap
|
||||
* @param simScorer the SimScorer to use for scoring
|
||||
*/
|
||||
public ScoringWrapperSpans(Spans spans, Similarity.SimScorer simScorer) {
|
||||
this.in = spans;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextStartPosition() throws IOException {
|
||||
return in.nextStartPosition();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int startPosition() {
|
||||
return in.startPosition();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int endPosition() {
|
||||
return in.endPosition();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int width() {
|
||||
return in.width();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void collect(SpanCollector collector) throws IOException {
|
||||
in.collect(collector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return in.docID();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return in.nextDoc();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
return in.advance(target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return in.cost();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TwoPhaseIterator asTwoPhaseIterator() {
|
||||
return in.asTwoPhaseIterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float positionsCost() {
|
||||
return in.positionsCost();
|
||||
}
|
||||
}
|
|
@ -126,7 +126,7 @@ public final class SpanNotQuery extends SpanQuery {
|
|||
|
||||
Spans excludeSpans = excludeWeight.getSpans(context, requiredPostings);
|
||||
if (excludeSpans == null) {
|
||||
return new ScoringWrapperSpans(includeSpans, getSimScorer(context));
|
||||
return includeSpans;
|
||||
}
|
||||
|
||||
TwoPhaseIterator excludeTwoPhase = excludeSpans.asTwoPhaseIterator();
|
||||
|
|
|
@ -161,7 +161,7 @@ public final class SpanOrQuery extends SpanQuery {
|
|||
if (subSpans.size() == 0) {
|
||||
return null;
|
||||
} else if (subSpans.size() == 1) {
|
||||
return new ScoringWrapperSpans(subSpans.get(0), getSimScorer(context));
|
||||
return subSpans.get(0);
|
||||
}
|
||||
|
||||
DisiPriorityQueue byDocQueue = new DisiPriorityQueue(subSpans.size());
|
||||
|
|
|
@ -99,7 +99,7 @@ public abstract class SpanWeight extends Weight {
|
|||
i++;
|
||||
}
|
||||
CollectionStatistics collectionStats = searcher.collectionStatistics(query.getField());
|
||||
return searcher.getSimilarity(true).computeWeight(collectionStats, termStats);
|
||||
return similarity.computeWeight(collectionStats, termStats);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -204,7 +204,7 @@ public class BKDWriter implements Closeable {
|
|||
// all recursive halves (i.e. 16 + 8 + 4 + 2) so the memory usage is 2X
|
||||
// what that level would consume, so we multiply by 0.5 to convert from
|
||||
// bytes to points here. Each dimension has its own sorted partition, so
|
||||
// we must divide by numDims as wel.
|
||||
// we must divide by numDims as well.
|
||||
|
||||
maxPointsSortInHeap = (int) (0.5 * (maxMBSortInHeap * 1024 * 1024) / (bytesPerDoc * numDims));
|
||||
|
||||
|
|
|
@ -41,9 +41,8 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
if (random().nextBoolean()) {
|
||||
// randomize parameters
|
||||
int maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
|
||||
double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
// sneaky impersonation!
|
||||
|
@ -53,7 +52,7 @@ public class TestLucene60PointsFormat extends BasePointsFormatTestCase {
|
|||
return new PointsFormat() {
|
||||
@Override
|
||||
public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -362,9 +362,8 @@ public class TestPointValues extends LuceneTestCase {
|
|||
IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
Document doc = new Document();
|
||||
doc.add(new BinaryPoint("dim", new byte[PointValues.MAX_NUM_BYTES+1]));
|
||||
expectThrows(IllegalArgumentException.class, () -> {
|
||||
w.addDocument(doc);
|
||||
doc.add(new BinaryPoint("dim", new byte[PointValues.MAX_NUM_BYTES+1]));
|
||||
});
|
||||
|
||||
Document doc2 = new Document();
|
||||
|
|
|
@ -1156,9 +1156,8 @@ public class TestPointQueries extends LuceneTestCase {
|
|||
private static Codec getCodec() {
|
||||
if (Codec.getDefault().getName().equals("Lucene62")) {
|
||||
int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048);
|
||||
double maxMBSortInHeap = 5.0 + (3*random().nextDouble());
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
return new FilterCodec("Lucene62", Codec.getDefault()) {
|
||||
|
@ -1167,7 +1166,7 @@ public class TestPointQueries extends LuceneTestCase {
|
|||
return new PointsFormat() {
|
||||
@Override
|
||||
public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.Fields;
|
||||
import org.apache.lucene.index.FilterDirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.TermContext;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
public class TestTermQuery extends LuceneTestCase {
|
||||
|
||||
public void testEquals() throws IOException {
|
||||
QueryUtils.checkEqual(
|
||||
new TermQuery(new Term("foo", "bar")),
|
||||
new TermQuery(new Term("foo", "bar")));
|
||||
QueryUtils.checkUnequal(
|
||||
new TermQuery(new Term("foo", "bar")),
|
||||
new TermQuery(new Term("foo", "baz")));
|
||||
QueryUtils.checkEqual(
|
||||
new TermQuery(new Term("foo", "bar")),
|
||||
new TermQuery(new Term("foo", "bar"), TermContext.build(new MultiReader().getContext(), new Term("foo", "bar"))));
|
||||
}
|
||||
|
||||
public void testCreateWeightDoesNotSeekIfScoresAreNotNeeded() throws IOException {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
|
||||
// segment that contains the term
|
||||
Document doc = new Document();
|
||||
doc.add(new StringField("foo", "bar", Store.NO));
|
||||
w.addDocument(doc);
|
||||
w.getReader().close();
|
||||
// segment that does not contain the term
|
||||
doc = new Document();
|
||||
doc.add(new StringField("foo", "baz", Store.NO));
|
||||
w.addDocument(doc);
|
||||
w.getReader().close();
|
||||
// segment that does not contain the field
|
||||
w.addDocument(new Document());
|
||||
|
||||
DirectoryReader reader = w.getReader();
|
||||
FilterDirectoryReader noSeekReader = new NoSeekDirectoryReader(reader);
|
||||
IndexSearcher noSeekSearcher = new IndexSearcher(noSeekReader);
|
||||
Query query = new TermQuery(new Term("foo", "bar"));
|
||||
AssertionError e = expectThrows(AssertionError.class,
|
||||
() -> noSeekSearcher.createNormalizedWeight(query, true));
|
||||
assertEquals("no seek", e.getMessage());
|
||||
|
||||
noSeekSearcher.createNormalizedWeight(query, false); // no exception
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
// use a collector rather than searcher.count() which would just read the
|
||||
// doc freq instead of creating a scorer
|
||||
TotalHitCountCollector collector = new TotalHitCountCollector();
|
||||
searcher.search(query, collector);
|
||||
assertEquals(1, collector.getTotalHits());
|
||||
TermQuery queryWithContext = new TermQuery(new Term("foo", "bar"),
|
||||
TermContext.build(reader.getContext(), new Term("foo", "bar")));
|
||||
collector = new TotalHitCountCollector();
|
||||
searcher.search(queryWithContext, collector);
|
||||
assertEquals(1, collector.getTotalHits());
|
||||
|
||||
IOUtils.close(reader, w, dir);
|
||||
}
|
||||
|
||||
private static class NoSeekDirectoryReader extends FilterDirectoryReader {
|
||||
|
||||
public NoSeekDirectoryReader(DirectoryReader in) throws IOException {
|
||||
super(in, new SubReaderWrapper() {
|
||||
@Override
|
||||
public LeafReader wrap(LeafReader reader) {
|
||||
return new NoSeekLeafReader(reader);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
|
||||
return new NoSeekDirectoryReader(in);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static class NoSeekLeafReader extends FilterLeafReader {
|
||||
|
||||
public NoSeekLeafReader(LeafReader in) {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Fields fields() throws IOException {
|
||||
return new FilterFields(super.fields()) {
|
||||
@Override
|
||||
public Terms terms(String field) throws IOException {
|
||||
return new FilterTerms(super.terms(field)) {
|
||||
@Override
|
||||
public TermsEnum iterator() throws IOException {
|
||||
return new FilterTermsEnum(super.iterator()) {
|
||||
@Override
|
||||
public SeekStatus seekCeil(BytesRef text) throws IOException {
|
||||
throw new AssertionError("no seek");
|
||||
}
|
||||
@Override
|
||||
public void seekExact(BytesRef term, TermState state) throws IOException {
|
||||
throw new AssertionError("no seek");
|
||||
}
|
||||
@Override
|
||||
public boolean seekExact(BytesRef text) throws IOException {
|
||||
throw new AssertionError("no seek");
|
||||
}
|
||||
@Override
|
||||
public void seekExact(long ord) throws IOException {
|
||||
throw new AssertionError("no seek");
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
|
@ -21,11 +21,9 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
|
@ -1045,6 +1043,7 @@ public class MemoryIndex {
|
|||
private final class MemoryIndexReader extends LeafReader {
|
||||
|
||||
private final PointValues pointValues;
|
||||
private Fields memoryFields = new MemoryFields(fields);
|
||||
|
||||
private MemoryIndexReader() {
|
||||
super(); // avoid as much superclass baggage as possible
|
||||
|
@ -1198,13 +1197,7 @@ public class MemoryIndex {
|
|||
|
||||
@Override
|
||||
public Fields fields() {
|
||||
Map<String, Info> filteredFields = fields.entrySet().stream()
|
||||
.filter(entry -> entry.getValue().numTokens > 0)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue,
|
||||
(u,v) -> { throw new IllegalStateException(String.format(Locale.ROOT, "Duplicate key %s", u));},
|
||||
TreeMap::new
|
||||
));
|
||||
return new MemoryFields(filteredFields );
|
||||
return memoryFields;
|
||||
}
|
||||
|
||||
private class MemoryFields extends Fields {
|
||||
|
@ -1217,13 +1210,16 @@ public class MemoryIndex {
|
|||
|
||||
@Override
|
||||
public Iterator<String> iterator() {
|
||||
return fields.keySet().iterator();
|
||||
return fields.entrySet().stream()
|
||||
.filter(e -> e.getValue().numTokens > 0)
|
||||
.map(Map.Entry::getKey)
|
||||
.iterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Terms terms(final String field) {
|
||||
final Info info = fields.get(field);
|
||||
if (info == null) {
|
||||
if (info == null || info.numTokens <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -1278,7 +1274,11 @@ public class MemoryIndex {
|
|||
|
||||
@Override
|
||||
public int size() {
|
||||
return fields.size();
|
||||
int size = 0;
|
||||
for (String fieldName : this) {
|
||||
size++;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.lucene.document.TextField;
|
|||
import org.apache.lucene.index.BinaryDocValues;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.FieldInvertState;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
|
@ -129,6 +130,19 @@ public class TestMemoryIndex extends LuceneTestCase {
|
|||
assertEquals("be", terms.term().utf8ToString());
|
||||
TestUtil.checkReader(reader);
|
||||
}
|
||||
|
||||
public void testFieldsOnlyReturnsIndexedFields() throws IOException {
|
||||
Document doc = new Document();
|
||||
|
||||
doc.add(new NumericDocValuesField("numeric", 29L));
|
||||
doc.add(new TextField("text", "some text", Field.Store.NO));
|
||||
|
||||
MemoryIndex mi = MemoryIndex.fromDocument(doc, analyzer);
|
||||
IndexSearcher searcher = mi.createSearcher();
|
||||
IndexReader reader = searcher.getIndexReader();
|
||||
|
||||
assertEquals(reader.getTermVectors(0).size(), 1);
|
||||
}
|
||||
|
||||
public void testReaderConsistency() throws IOException {
|
||||
Analyzer analyzer = new MockPayloadAnalyzer();
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.io.InputStream;
|
|||
*/
|
||||
public class CoreParser implements QueryBuilder {
|
||||
|
||||
protected String defaultField;
|
||||
protected Analyzer analyzer;
|
||||
protected QueryParser parser;
|
||||
protected QueryBuilderFactory queryFactory;
|
||||
|
@ -59,6 +60,7 @@ public class CoreParser implements QueryBuilder {
|
|||
}
|
||||
|
||||
protected CoreParser(String defaultField, Analyzer analyzer, QueryParser parser) {
|
||||
this.defaultField = defaultField;
|
||||
this.analyzer = analyzer;
|
||||
this.parser = parser;
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.io.PrintStream;
|
|||
import java.nio.file.NoSuchFileException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
|
@ -39,10 +40,10 @@ import org.apache.lucene.store.IndexInput;
|
|||
*
|
||||
* @lucene.experimental */
|
||||
|
||||
abstract class Node implements Closeable {
|
||||
public abstract class Node implements Closeable {
|
||||
|
||||
static boolean VERBOSE_FILES = true;
|
||||
static boolean VERBOSE_CONNECTIONS = false;
|
||||
public static boolean VERBOSE_FILES = true;
|
||||
public static boolean VERBOSE_CONNECTIONS = false;
|
||||
|
||||
// Keys we store into IndexWriter's commit user data:
|
||||
|
||||
|
@ -87,6 +88,16 @@ abstract class Node implements Closeable {
|
|||
this.printStream = printStream;
|
||||
}
|
||||
|
||||
/** Returns the {@link ReferenceManager} to use for acquiring and releasing searchers */
|
||||
public ReferenceManager<IndexSearcher> getSearcherManager() {
|
||||
return mgr;
|
||||
}
|
||||
|
||||
/** Returns the {@link Directory} this node is writing to */
|
||||
public Directory getDirectory() {
|
||||
return dir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "(id=" + id + ")";
|
||||
|
@ -119,7 +130,7 @@ abstract class Node implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
protected void message(String message) {
|
||||
public void message(String message) {
|
||||
if (printStream != null) {
|
||||
long now = System.nanoTime();
|
||||
printStream.println(String.format(Locale.ROOT,
|
||||
|
|
|
@ -128,6 +128,11 @@ public abstract class PrimaryNode extends Node {
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the current primary generation, which is incremented each time a new primary is started for this index */
|
||||
public long getPrimaryGen() {
|
||||
return primaryGen;
|
||||
}
|
||||
|
||||
// TODO: in the future, we should separate "flush" (returns an incRef'd SegmentInfos) from "refresh" (open new NRT reader from
|
||||
// IndexWriter) so that the latter can be done concurrently while copying files out to replicas, minimizing the refresh time from the
|
||||
// replicas. But fixing this is tricky because e.g. IndexWriter may complete a big merge just after returning the incRef'd SegmentInfos
|
||||
|
|
|
@ -0,0 +1,262 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import org.apache.lucene.document.RangeFieldQuery.QueryType;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
|
||||
/**
|
||||
* An indexed Double Range field.
|
||||
* <p>
|
||||
* This field indexes dimensional ranges defined as min/max pairs. It supports
|
||||
* up to a maximum of 4 dimensions (indexed as 8 numeric values). With 1 dimension representing a single double range,
|
||||
* 2 dimensions representing a bounding box, 3 dimensions a bounding cube, and 4 dimensions a tesseract.
|
||||
* <p>
|
||||
* Multiple values for the same field in one document is supported, and open ended ranges can be defined using
|
||||
* {@code Double.NEGATIVE_INFINITY} and {@code Double.POSITIVE_INFINITY}.
|
||||
*
|
||||
* <p>
|
||||
* This field defines the following static factory methods for common search operations over double ranges:
|
||||
* <ul>
|
||||
* <li>{@link #newIntersectsQuery newIntersectsQuery()} matches ranges that intersect the defined search range.
|
||||
* <li>{@link #newWithinQuery newWithinQuery()} matches ranges that are within the defined search range.
|
||||
* <li>{@link #newContainsQuery newContainsQuery()} matches ranges that contain the defined search range.
|
||||
* </ul>
|
||||
*/
|
||||
public class DoubleRangeField extends Field {
|
||||
/** stores double values so number of bytes is 8 */
|
||||
public static final int BYTES = Double.BYTES;
|
||||
|
||||
/**
|
||||
* Create a new DoubleRangeField type, from min/max parallel arrays
|
||||
*
|
||||
* @param name field name. must not be null.
|
||||
* @param min range min values; each entry is the min value for the dimension
|
||||
* @param max range max values; each entry is the max value for the dimension
|
||||
*/
|
||||
public DoubleRangeField(String name, final double[] min, final double[] max) {
|
||||
super(name, getType(min.length));
|
||||
setRangeValues(min, max);
|
||||
}
|
||||
|
||||
/** set the field type */
|
||||
private static FieldType getType(int dimensions) {
|
||||
if (dimensions > 4) {
|
||||
throw new IllegalArgumentException("DoubleRangeField does not support greater than 4 dimensions");
|
||||
}
|
||||
|
||||
FieldType ft = new FieldType();
|
||||
// dimensions is set as 2*dimension size (min/max per dimension)
|
||||
ft.setDimensions(dimensions*2, BYTES);
|
||||
ft.freeze();
|
||||
return ft;
|
||||
}
|
||||
|
||||
/**
|
||||
* Changes the values of the field.
|
||||
* @param min array of min values. (accepts {@code Double.NEGATIVE_INFINITY})
|
||||
* @param max array of max values. (accepts {@code Double.POSITIVE_INFINITY})
|
||||
* @throws IllegalArgumentException if {@code min} or {@code max} is invalid
|
||||
*/
|
||||
public void setRangeValues(double[] min, double[] max) {
|
||||
checkArgs(min, max);
|
||||
if (min.length*2 != type.pointDimensionCount() || max.length*2 != type.pointDimensionCount()) {
|
||||
throw new IllegalArgumentException("field (name=" + name + ") uses " + type.pointDimensionCount()/2
|
||||
+ " dimensions; cannot change to (incoming) " + min.length + " dimensions");
|
||||
}
|
||||
|
||||
final byte[] bytes;
|
||||
if (fieldsData == null) {
|
||||
bytes = new byte[BYTES*2*min.length];
|
||||
fieldsData = new BytesRef(bytes);
|
||||
} else {
|
||||
bytes = ((BytesRef)fieldsData).bytes;
|
||||
}
|
||||
verifyAndEncode(min, max, bytes);
|
||||
}
|
||||
|
||||
/** validate the arguments */
|
||||
private static void checkArgs(final double[] min, final double[] max) {
|
||||
if (min == null || max == null || min.length == 0 || max.length == 0) {
|
||||
throw new IllegalArgumentException("min/max range values cannot be null or empty");
|
||||
}
|
||||
if (min.length != max.length) {
|
||||
throw new IllegalArgumentException("min/max ranges must agree");
|
||||
}
|
||||
if (min.length > 4) {
|
||||
throw new IllegalArgumentException("DoubleRangeField does not support greater than 4 dimensions");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encodes the min, max ranges into a byte array
|
||||
*/
|
||||
private static byte[] encode(double[] min, double[] max) {
|
||||
checkArgs(min, max);
|
||||
byte[] b = new byte[BYTES*2*min.length];
|
||||
verifyAndEncode(min, max, b);
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* encode the ranges into a sortable byte array ({@code Double.NaN} not allowed)
|
||||
* <p>
|
||||
* example for 4 dimensions (8 bytes per dimension value):
|
||||
* minD1 ... minD4 | maxD1 ... maxD4
|
||||
*/
|
||||
static void verifyAndEncode(double[] min, double[] max, byte[] bytes) {
|
||||
for (int d=0,i=0,j=min.length*BYTES; d<min.length; ++d, i+=BYTES, j+=BYTES) {
|
||||
if (Double.isNaN(min[d])) {
|
||||
throw new IllegalArgumentException("invalid min value (" + Double.NaN + ")" + " in DoubleRangeField");
|
||||
}
|
||||
if (Double.isNaN(max[d])) {
|
||||
throw new IllegalArgumentException("invalid max value (" + Double.NaN + ")" + " in DoubleRangeField");
|
||||
}
|
||||
if (min[d] > max[d]) {
|
||||
throw new IllegalArgumentException("min value (" + min[d] + ") is greater than max value (" + max[d] + ")");
|
||||
}
|
||||
encode(min[d], bytes, i);
|
||||
encode(max[d], bytes, j);
|
||||
}
|
||||
}
|
||||
|
||||
/** encode the given value into the byte array at the defined offset */
|
||||
private static void encode(double val, byte[] bytes, int offset) {
|
||||
NumericUtils.longToSortableBytes(NumericUtils.doubleToSortableLong(val), bytes, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the min value for the given dimension
|
||||
* @param dimension the dimension, always positive
|
||||
* @return the decoded min value
|
||||
*/
|
||||
public double getMin(int dimension) {
|
||||
if (dimension < 0 || dimension >= type.pointDimensionCount()/2) {
|
||||
throw new IllegalArgumentException("dimension request (" + dimension +
|
||||
") out of bounds for field (name=" + name + " dimensions=" + type.pointDimensionCount()/2 + "). ");
|
||||
}
|
||||
return decodeMin(((BytesRef)fieldsData).bytes, dimension);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the max value for the given dimension
|
||||
* @param dimension the dimension, always positive
|
||||
* @return the decoded max value
|
||||
*/
|
||||
public double getMax(int dimension) {
|
||||
if (dimension < 0 || dimension >= type.pointDimensionCount()/2) {
|
||||
throw new IllegalArgumentException("dimension request (" + dimension +
|
||||
") out of bounds for field (name=" + name + " dimensions=" + type.pointDimensionCount()/2 + "). ");
|
||||
}
|
||||
return decodeMax(((BytesRef)fieldsData).bytes, dimension);
|
||||
}
|
||||
|
||||
/** decodes the min value (for the defined dimension) from the encoded input byte array */
|
||||
static double decodeMin(byte[] b, int dimension) {
|
||||
int offset = dimension*BYTES;
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(b, offset));
|
||||
}
|
||||
|
||||
/** decodes the max value (for the defined dimension) from the encoded input byte array */
|
||||
static double decodeMax(byte[] b, int dimension) {
|
||||
int offset = b.length/2 + dimension*BYTES;
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.sortableBytesToLong(b, offset));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching indexed ranges that intersect the defined range.
|
||||
* @param field field name. must not be null.
|
||||
* @param min array of min values. (accepts {@code Double.NEGATIVE_INFINITY})
|
||||
* @param max array of max values. (accepts {@code Double.POSITIVE_INFINITY})
|
||||
* @return query for matching intersecting ranges (overlap, within, or contains)
|
||||
* @throws IllegalArgumentException if {@code field} is null, {@code min} or {@code max} is invalid
|
||||
*/
|
||||
public static Query newIntersectsQuery(String field, final double[] min, final double[] max) {
|
||||
return new RangeFieldQuery(field, encode(min, max), min.length, QueryType.INTERSECTS) {
|
||||
@Override
|
||||
protected String toString(byte[] ranges, int dimension) {
|
||||
return DoubleRangeField.toString(ranges, dimension);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching indexed ranges that contain the defined range.
|
||||
* @param field field name. must not be null.
|
||||
* @param min array of min values. (accepts {@code Double.MIN_VALUE})
|
||||
* @param max array of max values. (accepts {@code Double.MAX_VALUE})
|
||||
* @return query for matching ranges that contain the defined range
|
||||
* @throws IllegalArgumentException if {@code field} is null, {@code min} or {@code max} is invalid
|
||||
*/
|
||||
public static Query newContainsQuery(String field, final double[] min, final double[] max) {
|
||||
return new RangeFieldQuery(field, encode(min, max), min.length, QueryType.CONTAINS) {
|
||||
@Override
|
||||
protected String toString(byte[] ranges, int dimension) {
|
||||
return DoubleRangeField.toString(ranges, dimension);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching indexed ranges that are within the defined range.
|
||||
* @param field field name. must not be null.
|
||||
* @param min array of min values. (accepts {@code Double.MIN_VALUE})
|
||||
* @param max array of max values. (accepts {@code Double.MAX_VALUE})
|
||||
* @return query for matching ranges within the defined range
|
||||
* @throws IllegalArgumentException if {@code field} is null, {@code min} or {@code max} is invalid
|
||||
*/
|
||||
public static Query newWithinQuery(String field, final double[] min, final double[] max) {
|
||||
checkArgs(min, max);
|
||||
return new RangeFieldQuery(field, encode(min, max), min.length, QueryType.WITHIN) {
|
||||
@Override
|
||||
protected String toString(byte[] ranges, int dimension) {
|
||||
return DoubleRangeField.toString(ranges, dimension);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(getClass().getSimpleName());
|
||||
sb.append(" <");
|
||||
sb.append(name);
|
||||
sb.append(':');
|
||||
byte[] b = ((BytesRef)fieldsData).bytes;
|
||||
toString(b, 0);
|
||||
for (int d=1; d<type.pointDimensionCount(); ++d) {
|
||||
sb.append(' ');
|
||||
toString(b, d);
|
||||
}
|
||||
sb.append('>');
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the String representation for the range at the given dimension
|
||||
* @param ranges the encoded ranges, never null
|
||||
* @param dimension the dimension of interest
|
||||
* @return The string representation for the range at the provided dimension
|
||||
*/
|
||||
private static String toString(byte[] ranges, int dimension) {
|
||||
return "[" + Double.toString(decodeMin(ranges, dimension)) + " : "
|
||||
+ Double.toString(decodeMax(ranges, dimension)) + "]";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,315 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.PointValues;
|
||||
import org.apache.lucene.index.PointValues.Relation;
|
||||
import org.apache.lucene.index.PointValues.IntersectVisitor;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.DocIdSetBuilder;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
|
||||
/**
|
||||
* Query class for searching {@code RangeField} types by a defined {@link Relation}.
|
||||
*/
|
||||
abstract class RangeFieldQuery extends Query {
|
||||
/** field name */
|
||||
final String field;
|
||||
/** query relation
|
||||
* intersects: {@code CELL_CROSSES_QUERY},
|
||||
* contains: {@code CELL_CONTAINS_QUERY},
|
||||
* within: {@code CELL_WITHIN_QUERY} */
|
||||
final QueryType queryType;
|
||||
/** number of dimensions - max 4 */
|
||||
final int numDims;
|
||||
/** ranges encoded as a sortable byte array */
|
||||
final byte[] ranges;
|
||||
/** number of bytes per dimension */
|
||||
final int bytesPerDim;
|
||||
|
||||
/** Used by {@code RangeFieldQuery} to check how each internal or leaf node relates to the query. */
|
||||
enum QueryType {
|
||||
/** Use this for intersects queries. */
|
||||
INTERSECTS,
|
||||
/** Use this for within queries. */
|
||||
WITHIN,
|
||||
/** Use this for contains */
|
||||
CONTAINS
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for searching indexed ranges that match the provided relation.
|
||||
* @param field field name. must not be null.
|
||||
* @param ranges encoded range values; this is done by the {@code RangeField} implementation
|
||||
* @param queryType the query relation
|
||||
*/
|
||||
RangeFieldQuery(String field, final byte[] ranges, final int numDims, final QueryType queryType) {
|
||||
checkArgs(field, ranges, numDims);
|
||||
if (queryType == null) {
|
||||
throw new IllegalArgumentException("Query type cannot be null");
|
||||
}
|
||||
this.field = field;
|
||||
this.queryType = queryType;
|
||||
this.numDims = numDims;
|
||||
this.ranges = ranges;
|
||||
this.bytesPerDim = ranges.length / (2*numDims);
|
||||
}
|
||||
|
||||
/** check input arguments */
|
||||
private static void checkArgs(String field, final byte[] ranges, final int numDims) {
|
||||
if (field == null) {
|
||||
throw new IllegalArgumentException("field must not be null");
|
||||
}
|
||||
if (numDims > 4) {
|
||||
throw new IllegalArgumentException("dimension size cannot be greater than 4");
|
||||
}
|
||||
if (ranges == null || ranges.length == 0) {
|
||||
throw new IllegalArgumentException("encoded ranges cannot be null or empty");
|
||||
}
|
||||
}
|
||||
|
||||
/** Check indexed field info against the provided query data. */
|
||||
private void checkFieldInfo(FieldInfo fieldInfo) {
|
||||
if (fieldInfo.getPointDimensionCount()/2 != numDims) {
|
||||
throw new IllegalArgumentException("field=\"" + field + "\" was indexed with numDims="
|
||||
+ fieldInfo.getPointDimensionCount()/2 + " but this query has numDims=" + numDims);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
return new ConstantScoreWeight(this) {
|
||||
final RangeFieldComparator comparator = new RangeFieldComparator();
|
||||
private DocIdSet buildMatchingDocIdSet(LeafReader reader, PointValues values) throws IOException {
|
||||
DocIdSetBuilder result = new DocIdSetBuilder(reader.maxDoc(), values, field);
|
||||
values.intersect(field,
|
||||
new IntersectVisitor() {
|
||||
DocIdSetBuilder.BulkAdder adder;
|
||||
@Override
|
||||
public void grow(int count) {
|
||||
adder = result.grow(count);
|
||||
}
|
||||
@Override
|
||||
public void visit(int docID) throws IOException {
|
||||
adder.add(docID);
|
||||
}
|
||||
@Override
|
||||
public void visit(int docID, byte[] leaf) throws IOException {
|
||||
// add the document iff:
|
||||
if (Arrays.equals(ranges, leaf)
|
||||
// target is within cell and queryType is INTERSECTS or CONTAINS:
|
||||
|| (comparator.isWithin(leaf) && queryType != QueryType.WITHIN)
|
||||
// target contains cell and queryType is INTERSECTS or WITHIN:
|
||||
|| (comparator.contains(leaf) && queryType != QueryType.CONTAINS)
|
||||
// target is not disjoint (crosses) and queryType is INTERSECTS
|
||||
|| (comparator.isDisjoint(leaf) == false && queryType == QueryType.INTERSECTS)) {
|
||||
adder.add(docID);
|
||||
}
|
||||
}
|
||||
@Override
|
||||
public Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
|
||||
byte[] node = getInternalRange(minPackedValue, maxPackedValue);
|
||||
// compute range relation for BKD traversal
|
||||
if (comparator.isDisjoint(node)) {
|
||||
return Relation.CELL_OUTSIDE_QUERY;
|
||||
} else if (comparator.isWithin(node)) {
|
||||
// target within cell; continue traversing:
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
} else if (comparator.contains(node)) {
|
||||
// target contains cell; add iff queryType is not a CONTAINS query:
|
||||
return (queryType == QueryType.CONTAINS) ? Relation.CELL_OUTSIDE_QUERY : Relation.CELL_INSIDE_QUERY;
|
||||
}
|
||||
// target intersects cell; continue traversing:
|
||||
return Relation.CELL_CROSSES_QUERY;
|
||||
}
|
||||
});
|
||||
return result.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
LeafReader reader = context.reader();
|
||||
PointValues values = reader.getPointValues();
|
||||
if (values == null) {
|
||||
// no docs in this segment indexed any ranges
|
||||
return null;
|
||||
}
|
||||
FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field);
|
||||
if (fieldInfo == null) {
|
||||
// no docs in this segment indexed this field
|
||||
}
|
||||
checkFieldInfo(fieldInfo);
|
||||
boolean allDocsMatch = true;
|
||||
if (values.getDocCount(field) == reader.maxDoc()) {
|
||||
// if query crosses, docs need to be further scrutinized
|
||||
byte[] range = getInternalRange(values.getMinPackedValue(field), values.getMaxPackedValue(field));
|
||||
// if the internal node is not equal and not contained by the query, all docs do not match
|
||||
if ((!Arrays.equals(ranges, range)
|
||||
&& (comparator.contains(range) && queryType != QueryType.CONTAINS)) == false) {
|
||||
allDocsMatch = false;
|
||||
}
|
||||
} else {
|
||||
allDocsMatch = false;
|
||||
}
|
||||
|
||||
DocIdSetIterator iterator = allDocsMatch == true ?
|
||||
DocIdSetIterator.all(reader.maxDoc()) : buildMatchingDocIdSet(reader, values).iterator();
|
||||
return new ConstantScoreScorer(this, score(), iterator);
|
||||
}
|
||||
|
||||
/** get an encoded byte representation of the internal node; this is
|
||||
* the lower half of the min array and the upper half of the max array */
|
||||
private byte[] getInternalRange(byte[] min, byte[] max) {
|
||||
byte[] range = new byte[min.length];
|
||||
final int dimSize = numDims * bytesPerDim;
|
||||
System.arraycopy(min, 0, range, 0, dimSize);
|
||||
System.arraycopy(max, dimSize, range, dimSize, dimSize);
|
||||
return range;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* RangeFieldComparator class provides the core comparison logic for accepting or rejecting indexed
|
||||
* {@code RangeField} types based on the defined query range and relation.
|
||||
*/
|
||||
class RangeFieldComparator {
|
||||
/** check if the query is outside the candidate range */
|
||||
private boolean isDisjoint(final byte[] range) {
|
||||
for (int d=0; d<numDims; ++d) {
|
||||
if (compareMinMax(range, d) > 0 || compareMaxMin(range, d) < 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** check if query is within candidate range */
|
||||
private boolean isWithin(final byte[] range) {
|
||||
for (int d=0; d<numDims; ++d) {
|
||||
if (compareMinMin(range, d) < 0 || compareMaxMax(range, d) > 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** check if query contains candidate range */
|
||||
private boolean contains(final byte[] range) {
|
||||
for (int d=0; d<numDims; ++d) {
|
||||
if (compareMinMin(range, d) > 0 || compareMaxMax(range, d) < 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/** compare the encoded min value (for the defined query dimension) with the encoded min value in the byte array */
|
||||
private int compareMinMin(byte[] b, int dimension) {
|
||||
// convert dimension to offset:
|
||||
dimension *= bytesPerDim;
|
||||
return StringHelper.compare(bytesPerDim, ranges, dimension, b, dimension);
|
||||
}
|
||||
|
||||
/** compare the encoded min value (for the defined query dimension) with the encoded max value in the byte array */
|
||||
private int compareMinMax(byte[] b, int dimension) {
|
||||
// convert dimension to offset:
|
||||
dimension *= bytesPerDim;
|
||||
return StringHelper.compare(bytesPerDim, ranges, dimension, b, numDims * bytesPerDim + dimension);
|
||||
}
|
||||
|
||||
/** compare the encoded max value (for the defined query dimension) with the encoded min value in the byte array */
|
||||
private int compareMaxMin(byte[] b, int dimension) {
|
||||
// convert dimension to offset:
|
||||
dimension *= bytesPerDim;
|
||||
return StringHelper.compare(bytesPerDim, ranges, numDims * bytesPerDim + dimension, b, dimension);
|
||||
}
|
||||
|
||||
/** compare the encoded max value (for the defined query dimension) with the encoded max value in the byte array */
|
||||
private int compareMaxMax(byte[] b, int dimension) {
|
||||
// convert dimension to max offset:
|
||||
dimension = numDims * bytesPerDim + dimension * bytesPerDim;
|
||||
return StringHelper.compare(bytesPerDim, ranges, dimension, b, dimension);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hash = classHash();
|
||||
hash = 31 * hash + field.hashCode();
|
||||
hash = 31 * hash + numDims;
|
||||
hash = 31 * hash + queryType.hashCode();
|
||||
hash = 31 * hash + Arrays.hashCode(ranges);
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object o) {
|
||||
return sameClassAs(o) &&
|
||||
equalsTo(getClass().cast(o));
|
||||
}
|
||||
|
||||
protected boolean equalsTo(RangeFieldQuery other) {
|
||||
return Objects.equals(field, other.field) &&
|
||||
numDims == other.numDims &&
|
||||
Arrays.equals(ranges, other.ranges) &&
|
||||
other.queryType == queryType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (this.field.equals(field) == false) {
|
||||
sb.append(this.field);
|
||||
sb.append(':');
|
||||
}
|
||||
sb.append("<ranges:");
|
||||
sb.append(toString(ranges, 0));
|
||||
for (int d=1; d<numDims; ++d) {
|
||||
sb.append(' ');
|
||||
sb.append(toString(ranges, d));
|
||||
}
|
||||
sb.append('>');
|
||||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a string of a single value in a human-readable format for debugging.
|
||||
* This is used by {@link #toString()}.
|
||||
*
|
||||
* @param dimension dimension of the particular value
|
||||
* @param ranges encoded ranges, never null
|
||||
* @return human readable value for debugging
|
||||
*/
|
||||
protected abstract String toString(byte[] ranges, int dimension);
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Random testing for RangeField type.
|
||||
**/
|
||||
public class TestDoubleRangeField extends LuceneTestCase {
|
||||
private static final String FIELD_NAME = "rangeField";
|
||||
|
||||
/** test illegal NaN range values */
|
||||
public void testIllegalNaNValues() {
|
||||
Document doc = new Document();
|
||||
IllegalArgumentException expected;
|
||||
|
||||
expected = expectThrows(IllegalArgumentException.class, () ->
|
||||
doc.add(new DoubleRangeField(FIELD_NAME, new double[] {Double.NaN}, new double[] {5})));
|
||||
assertTrue(expected.getMessage().contains("invalid min value"));
|
||||
|
||||
expected = expectThrows(IllegalArgumentException.class, () ->
|
||||
doc.add(new DoubleRangeField(FIELD_NAME, new double[] {5}, new double[] {Double.NaN})));
|
||||
assertTrue(expected.getMessage().contains("invalid max value"));
|
||||
}
|
||||
|
||||
/** min/max array sizes must agree */
|
||||
public void testUnevenArrays() {
|
||||
Document doc = new Document();
|
||||
IllegalArgumentException expected;
|
||||
expected = expectThrows(IllegalArgumentException.class, () ->
|
||||
doc.add(new DoubleRangeField(FIELD_NAME, new double[] {5, 6}, new double[] {5})));
|
||||
assertTrue(expected.getMessage().contains("min/max ranges must agree"));
|
||||
}
|
||||
|
||||
/** dimensions greater than 4 not supported */
|
||||
public void testOversizeDimensions() {
|
||||
Document doc = new Document();
|
||||
IllegalArgumentException expected;
|
||||
expected = expectThrows(IllegalArgumentException.class, () ->
|
||||
doc.add(new DoubleRangeField(FIELD_NAME, new double[] {1, 2, 3, 4, 5}, new double[] {5})));
|
||||
assertTrue(expected.getMessage().contains("does not support greater than 4 dimensions"));
|
||||
}
|
||||
|
||||
/** min cannot be greater than max */
|
||||
public void testMinGreaterThanMax() {
|
||||
Document doc = new Document();
|
||||
IllegalArgumentException expected;
|
||||
expected = expectThrows(IllegalArgumentException.class, () ->
|
||||
doc.add(new DoubleRangeField(FIELD_NAME, new double[] {3, 4}, new double[] {1, 2})));
|
||||
assertTrue(expected.getMessage().contains("is greater than max value"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,427 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.MultiDocValues;
|
||||
import org.apache.lucene.index.MultiFields;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.SerialMergeScheduler;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
||||
/**
|
||||
* Abstract class to do basic tests for a RangeField query.
|
||||
*/
|
||||
public abstract class BaseRangeFieldQueryTestCase extends LuceneTestCase {
|
||||
protected abstract Field newRangeField(double[] min, double[] max);
|
||||
|
||||
protected abstract Query newIntersectsQuery(double[] min, double[] max);
|
||||
|
||||
protected abstract Query newContainsQuery(double[] min, double[] max);
|
||||
|
||||
protected abstract Query newWithinQuery(double[] min, double[] max);
|
||||
|
||||
protected int dimension() {
|
||||
return random().nextInt(4) + 1;
|
||||
}
|
||||
|
||||
public void testRandomTiny() throws Exception {
|
||||
// Make sure single-leaf-node case is OK:
|
||||
doTestRandom(10, false);
|
||||
}
|
||||
|
||||
public void testRandomMedium() throws Exception {
|
||||
doTestRandom(10000, false);
|
||||
}
|
||||
|
||||
@Nightly
|
||||
public void testRandomBig() throws Exception {
|
||||
doTestRandom(200000, false);
|
||||
}
|
||||
|
||||
public void testMultiValued() throws Exception {
|
||||
doTestRandom(10000, true);
|
||||
}
|
||||
|
||||
private void doTestRandom(int count, boolean multiValued) throws Exception {
|
||||
int numDocs = atLeast(count);
|
||||
int dimensions = dimension();
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: numDocs=" + numDocs);
|
||||
}
|
||||
|
||||
Box[][] boxes = new Box[numDocs][];
|
||||
|
||||
boolean haveRealDoc = true;
|
||||
|
||||
nextdoc: for (int id=0; id<numDocs; ++id) {
|
||||
int x = random().nextInt(20);
|
||||
if (boxes[id] == null) {
|
||||
boxes[id] = new Box[] {nextBox(dimensions)};
|
||||
}
|
||||
if (x == 17) {
|
||||
// dome docs don't have a box:
|
||||
boxes[id][0].min[0] = Double.NaN;
|
||||
if (VERBOSE) {
|
||||
System.out.println(" id=" + id + " is missing");
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (multiValued == true && random().nextBoolean()) {
|
||||
// randomly add multi valued documents (up to 2 fields)
|
||||
int n = random().nextInt(2) + 1;
|
||||
boxes[id] = new Box[n];
|
||||
for (int i=0; i<n; ++i) {
|
||||
boxes[id][i] = nextBox(dimensions);
|
||||
}
|
||||
}
|
||||
|
||||
if (id > 0 && x < 9 && haveRealDoc) {
|
||||
int oldID;
|
||||
int i=0;
|
||||
// don't step on missing boxes:
|
||||
while (true) {
|
||||
oldID = random().nextInt(id);
|
||||
if (Double.isNaN(boxes[oldID][0].min[0]) == false) {
|
||||
break;
|
||||
} else if (++i > id) {
|
||||
continue nextdoc;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == dimensions*2) {
|
||||
// Fully identical box (use first box in case current is multivalued but old is not)
|
||||
for (int d=0; d<dimensions; ++d) {
|
||||
boxes[id][0].min[d] = boxes[oldID][0].min[d];
|
||||
boxes[id][0].max[d] = boxes[oldID][0].max[d];
|
||||
}
|
||||
if (VERBOSE) {
|
||||
System.out.println(" id=" + id + " box=" + boxes[id] + " (same box as doc=" + oldID + ")");
|
||||
}
|
||||
} else {
|
||||
for (int m = 0, even = dimensions % 2; m < dimensions * 2; ++m) {
|
||||
if (x == m) {
|
||||
int d = (int)Math.floor(m/2);
|
||||
// current could be multivalue but old may not be, so use first box
|
||||
if (even == 0) {
|
||||
boxes[id][0].setVal(d, boxes[oldID][0].min[d]);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" id=" + id + " box=" + boxes[id] + " (same min[" + d + "] as doc=" + oldID + ")");
|
||||
}
|
||||
} else {
|
||||
boxes[id][0].setVal(d, boxes[oldID][0].max[d]);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" id=" + id + " box=" + boxes[id] + " (same max[" + d + "] as doc=" + oldID + ")");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
verify(boxes);
|
||||
}
|
||||
|
||||
private void verify(Box[][] boxes) throws Exception {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
// Else seeds may not reproduce:
|
||||
iwc.setMergeScheduler(new SerialMergeScheduler());
|
||||
// Else we can get O(N^2) merging
|
||||
int mbd = iwc.getMaxBufferedDocs();
|
||||
if (mbd != -1 && mbd < boxes.length/100) {
|
||||
iwc.setMaxBufferedDocs(boxes.length/100);
|
||||
}
|
||||
Directory dir;
|
||||
if (boxes.length > 50000) {
|
||||
dir = newFSDirectory(createTempDir(getClass().getSimpleName()));
|
||||
} else {
|
||||
dir = newDirectory();
|
||||
}
|
||||
|
||||
Set<Integer> deleted = new HashSet<>();
|
||||
IndexWriter w = new IndexWriter(dir, iwc);
|
||||
for (int id=0; id < boxes.length; ++id) {
|
||||
Document doc = new Document();
|
||||
doc.add(newStringField("id", ""+id, Field.Store.NO));
|
||||
doc.add(new NumericDocValuesField("id", id));
|
||||
if (Double.isNaN(boxes[id][0].min[0]) == false) {
|
||||
for (int n=0; n<boxes[id].length; ++n) {
|
||||
doc.add(newRangeField(boxes[id][n].min, boxes[id][n].max));
|
||||
}
|
||||
}
|
||||
w.addDocument(doc);
|
||||
if (id > 0 && random().nextInt(100) == 1) {
|
||||
int idToDelete = random().nextInt(id);
|
||||
w.deleteDocuments(new Term("id", ""+idToDelete));
|
||||
deleted.add(idToDelete);
|
||||
if (VERBOSE) {
|
||||
System.out.println(" delete id=" + idToDelete);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (random().nextBoolean()) {
|
||||
w.forceMerge(1);
|
||||
}
|
||||
final IndexReader r = DirectoryReader.open(w);
|
||||
w.close();
|
||||
IndexSearcher s = newSearcher(r);
|
||||
|
||||
int dimensions = boxes[0][0].min.length;
|
||||
int iters = atLeast(25);
|
||||
NumericDocValues docIDToID = MultiDocValues.getNumericValues(r, "id");
|
||||
Bits liveDocs = MultiFields.getLiveDocs(s.getIndexReader());
|
||||
int maxDoc = s.getIndexReader().maxDoc();
|
||||
|
||||
for (int iter=0; iter<iters; ++iter) {
|
||||
if (VERBOSE) {
|
||||
System.out.println("\nTEST: iter=" + iter + " s=" + s);
|
||||
}
|
||||
|
||||
// occasionally test open ended bounding boxes
|
||||
Box queryBox = nextBox(dimensions);
|
||||
int rv = random().nextInt(3);
|
||||
Query query;
|
||||
Box.QueryType queryType;
|
||||
if (rv == 0) {
|
||||
queryType = Box.QueryType.INTERSECTS;
|
||||
query = newIntersectsQuery(queryBox.min, queryBox.max);
|
||||
} else if (rv == 1) {
|
||||
queryType = Box.QueryType.CONTAINS;
|
||||
query = newContainsQuery(queryBox.min, queryBox.max);
|
||||
} else {
|
||||
queryType = Box.QueryType.WITHIN;
|
||||
query = newWithinQuery(queryBox.min, queryBox.max);
|
||||
}
|
||||
|
||||
if (VERBOSE) {
|
||||
System.out.println(" query=" + query);
|
||||
}
|
||||
|
||||
final FixedBitSet hits = new FixedBitSet(maxDoc);
|
||||
s.search(query, new SimpleCollector() {
|
||||
private int docBase;
|
||||
|
||||
@Override
|
||||
public void collect(int doc) {
|
||||
hits.set(docBase + doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doSetNextReader(LeafReaderContext context) throws IOException {
|
||||
docBase = context.docBase;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean needsScores() { return false; }
|
||||
});
|
||||
|
||||
for (int docID=0; docID<maxDoc; ++docID) {
|
||||
int id = (int) docIDToID.get(docID);
|
||||
boolean expected;
|
||||
if (liveDocs != null && liveDocs.get(docID) == false) {
|
||||
// document is deleted
|
||||
expected = false;
|
||||
} else if (Double.isNaN(boxes[id][0].min[0])) {
|
||||
expected = false;
|
||||
} else {
|
||||
expected = expectedResult(queryBox, boxes[id], queryType);
|
||||
}
|
||||
|
||||
if (hits.get(docID) != expected) {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("FAIL (iter " + iter + "): ");
|
||||
if (expected == true) {
|
||||
b.append("id=" + id + (boxes[id].length > 1 ? " (MultiValue) " : " ") + "should match but did not\n");
|
||||
} else {
|
||||
b.append("id=" + id + " should not match but did\n");
|
||||
}
|
||||
b.append(" queryBox=" + queryBox + "\n");
|
||||
b.append(" box" + ((boxes[id].length > 1) ? "es=" : "=" ) + boxes[id][0]);
|
||||
for (int n=1; n<boxes[id].length; ++n) {
|
||||
b.append(", ");
|
||||
b.append(boxes[id][n]);
|
||||
}
|
||||
b.append("\n queryType=" + queryType + "\n");
|
||||
b.append(" deleted?=" + (liveDocs != null && liveDocs.get(docID) == false));
|
||||
fail("wrong hit (first of possibly more):\n\n" + b);
|
||||
}
|
||||
}
|
||||
}
|
||||
IOUtils.close(r, dir);
|
||||
}
|
||||
|
||||
protected boolean expectedResult(Box queryBox, Box[] box, Box.QueryType queryType) {
|
||||
for (int i=0; i<box.length; ++i) {
|
||||
if (expectedBBoxQueryResult(queryBox, box[i], queryType) == true) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
protected boolean expectedBBoxQueryResult(Box queryBox, Box box, Box.QueryType queryType) {
|
||||
if (box.equals(queryBox)) {
|
||||
return true;
|
||||
}
|
||||
Box.QueryType relation = box.relate(queryBox);
|
||||
if (queryType == Box.QueryType.INTERSECTS) {
|
||||
return relation != null;
|
||||
}
|
||||
return relation == queryType;
|
||||
}
|
||||
|
||||
protected double nextDoubleInternal() {
|
||||
if (rarely()) {
|
||||
return random().nextBoolean() ? Double.POSITIVE_INFINITY : Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
double max = 100 / 2;
|
||||
return (max + max) * random().nextDouble() - max;
|
||||
}
|
||||
|
||||
protected Box nextBox(int dimensions) {
|
||||
double[] min = new double[dimensions];
|
||||
double[] max = new double[dimensions];
|
||||
|
||||
for (int d=0; d<dimensions; ++d) {
|
||||
min[d] = nextDoubleInternal();
|
||||
max[d] = nextDoubleInternal();
|
||||
}
|
||||
|
||||
return new Box(min, max);
|
||||
}
|
||||
|
||||
protected static class Box {
|
||||
double[] min;
|
||||
double[] max;
|
||||
|
||||
enum QueryType { INTERSECTS, WITHIN, CONTAINS }
|
||||
|
||||
Box(double[] min, double[] max) {
|
||||
assert min != null && max != null && min.length > 0 && max.length > 0
|
||||
: "test box: min/max cannot be null or empty";
|
||||
assert min.length == max.length : "test box: min/max length do not agree";
|
||||
this.min = new double[min.length];
|
||||
this.max = new double[max.length];
|
||||
for (int d=0; d<min.length; ++d) {
|
||||
this.min[d] = Math.min(min[d], max[d]);
|
||||
this.max[d] = Math.max(min[d], max[d]);
|
||||
}
|
||||
}
|
||||
|
||||
protected void setVal(int dimension, double val) {
|
||||
if (val <= min[dimension]) {
|
||||
min[dimension] = val;
|
||||
} else {
|
||||
max[dimension] = val;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
return o != null
|
||||
&& getClass() == o.getClass()
|
||||
&& equalTo(getClass().cast(o));
|
||||
}
|
||||
|
||||
private boolean equalTo(Box o) {
|
||||
return Arrays.equals(min, o.min)
|
||||
&& Arrays.equals(max, o.max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Arrays.hashCode(min);
|
||||
result = 31 * result + Arrays.hashCode(max);
|
||||
return result;
|
||||
}
|
||||
|
||||
QueryType relate(Box other) {
|
||||
// check disjoint
|
||||
for (int d=0; d<this.min.length; ++d) {
|
||||
if (this.min[d] > other.max[d] || this.max[d] < other.min[d]) {
|
||||
// disjoint:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// check within
|
||||
boolean within = true;
|
||||
for (int d=0; d<this.min.length; ++d) {
|
||||
if ((this.min[d] >= other.min[d] && this.max[d] <= other.max[d]) == false) {
|
||||
// not within:
|
||||
within = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (within == true) {
|
||||
return QueryType.WITHIN;
|
||||
}
|
||||
|
||||
// check contains
|
||||
boolean contains = true;
|
||||
for (int d=0; d<this.min.length; ++d) {
|
||||
if ((this.min[d] <= other.min[d] && this.max[d] >= other.max[d]) == false) {
|
||||
// not contains:
|
||||
contains = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (contains == true) {
|
||||
return QueryType.CONTAINS;
|
||||
}
|
||||
return QueryType.INTERSECTS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("Box(");
|
||||
b.append(min[0]);
|
||||
b.append(" TO ");
|
||||
b.append(max[0]);
|
||||
for (int d=1; d<min.length; ++d) {
|
||||
b.append(", ");
|
||||
b.append(min[d]);
|
||||
b.append(" TO ");
|
||||
b.append(max[d]);
|
||||
}
|
||||
b.append(")");
|
||||
|
||||
return b.toString();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.DoubleRangeField;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
/**
|
||||
* Random testing for RangeFieldQueries. Testing rigor inspired by {@code BaseGeoPointTestCase}
|
||||
*/
|
||||
public class TestDoubleRangeFieldQueries extends BaseRangeFieldQueryTestCase {
|
||||
private static final String FIELD_NAME = "rangeField";
|
||||
|
||||
protected DoubleRangeField newRangeField(double[] min, double[] max) {
|
||||
return new DoubleRangeField(FIELD_NAME, min, max);
|
||||
}
|
||||
|
||||
protected Query newIntersectsQuery(double[] min, double[] max) {
|
||||
return DoubleRangeField.newIntersectsQuery(FIELD_NAME, min, max);
|
||||
}
|
||||
|
||||
protected Query newContainsQuery(double[] min, double[] max) {
|
||||
return DoubleRangeField.newContainsQuery(FIELD_NAME, min, max);
|
||||
}
|
||||
|
||||
protected Query newWithinQuery(double[] min, double[] max) {
|
||||
return DoubleRangeField.newWithinQuery(FIELD_NAME, min, max);
|
||||
}
|
||||
|
||||
/** Basic test */
|
||||
public void testBasics() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
|
||||
|
||||
// intersects (within)
|
||||
Document document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {-10.0, -10.0}, new double[] {9.1, 10.1}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// intersects (crosses)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {10.0, -10.0}, new double[] {20.0, 10.0}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// intersects (contains)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {-20.0, -20.0}, new double[] {30.0, 30.1}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// intersects (crosses)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {-11.1, -11.2}, new double[] {1.23, 11.5}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// intersects (crosses)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {12.33, 1.2}, new double[] {15.1, 29.9}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// disjoint
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {-122.33, 1.2}, new double[] {-115.1, 29.9}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// intersects (crosses)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {Double.NEGATIVE_INFINITY, 1.2}, new double[] {-11.0, 29.9}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// equal (within, contains, intersects)
|
||||
document = new Document();
|
||||
document.add(new DoubleRangeField(FIELD_NAME, new double[] {-11, -15}, new double[] {15, 20}));
|
||||
writer.addDocument(document);
|
||||
|
||||
// search
|
||||
IndexReader reader = writer.getReader();
|
||||
IndexSearcher searcher = newSearcher(reader);
|
||||
assertEquals(7, searcher.count(DoubleRangeField.newIntersectsQuery(FIELD_NAME,
|
||||
new double[] {-11.0, -15.0}, new double[] {15.0, 20.0})));
|
||||
assertEquals(2, searcher.count(DoubleRangeField.newWithinQuery(FIELD_NAME,
|
||||
new double[] {-11.0, -15.0}, new double[] {15.0, 20.0})));
|
||||
assertEquals(2, searcher.count(DoubleRangeField.newContainsQuery(FIELD_NAME,
|
||||
new double[] {-11.0, -15.0}, new double[] {15.0, 20.0})));
|
||||
|
||||
reader.close();
|
||||
writer.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -87,9 +87,8 @@ public class TestGeo3DPoint extends LuceneTestCase {
|
|||
private static Codec getCodec() {
|
||||
if (Codec.getDefault().getName().equals("Lucene62")) {
|
||||
int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048);
|
||||
double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
|
||||
if (VERBOSE) {
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
|
||||
System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
return new FilterCodec("Lucene62", Codec.getDefault()) {
|
||||
|
@ -98,7 +97,7 @@ public class TestGeo3DPoint extends LuceneTestCase {
|
|||
return new PointsFormat() {
|
||||
@Override
|
||||
public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -254,11 +254,11 @@ public final class AssertingPointsFormat extends PointsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException {
|
||||
if (fieldInfo.getPointDimensionCount() == 0) {
|
||||
throw new IllegalArgumentException("writing field=\"" + fieldInfo.name + "\" but pointDimensionalCount is 0");
|
||||
}
|
||||
in.writeField(fieldInfo, values);
|
||||
in.writeField(fieldInfo, values, maxMBSortInHeap);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -56,11 +56,11 @@ class CrankyPointsFormat extends PointsFormat {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException {
|
||||
if (random.nextInt(100) == 0) {
|
||||
throw new IOException("Fake IOException");
|
||||
}
|
||||
delegate.writeField(fieldInfo, values);
|
||||
delegate.writeField(fieldInfo, values, maxMBSortInHeap);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -67,7 +67,6 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SloppyMath;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.bkd.BKDWriter;
|
||||
|
||||
/**
|
||||
* Abstract class to do basic tests for a geospatial impl (high level
|
||||
|
@ -1248,7 +1247,7 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
|
|||
return new PointsFormat() {
|
||||
@Override
|
||||
public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
|
||||
return new Lucene60PointsWriter(writeState, pointsInLeaf, BKDWriter.DEFAULT_MAX_MB_SORT_IN_HEAP);
|
||||
return new Lucene60PointsWriter(writeState, pointsInLeaf);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -92,7 +92,6 @@ public class RandomCodec extends AssertingCodec {
|
|||
// which is less effective for testing.
|
||||
// TODO: improve how we randomize this...
|
||||
private final int maxPointsInLeafNode;
|
||||
private final double maxMBSortInHeap;
|
||||
private final int bkdSplitRandomSeed;
|
||||
|
||||
@Override
|
||||
|
@ -103,9 +102,9 @@ public class RandomCodec extends AssertingCodec {
|
|||
|
||||
// Randomize how BKDWriter chooses its splis:
|
||||
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap) {
|
||||
return new Lucene60PointsWriter(writeState, maxPointsInLeafNode) {
|
||||
@Override
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values) throws IOException {
|
||||
public void writeField(FieldInfo fieldInfo, PointsReader values, double maxMBSortInHeap) throws IOException {
|
||||
|
||||
boolean singleValuePerDoc = values.size(fieldInfo.name) == values.getDocCount(fieldInfo.name);
|
||||
|
||||
|
@ -185,7 +184,6 @@ public class RandomCodec extends AssertingCodec {
|
|||
int lowFreqCutoff = TestUtil.nextInt(random, 2, 100);
|
||||
|
||||
maxPointsInLeafNode = TestUtil.nextInt(random, 16, 2048);
|
||||
maxMBSortInHeap = 5.0 + (3*random.nextDouble());
|
||||
bkdSplitRandomSeed = random.nextInt();
|
||||
|
||||
add(avoidCodecs,
|
||||
|
@ -253,8 +251,7 @@ public class RandomCodec extends AssertingCodec {
|
|||
public String toString() {
|
||||
return super.toString() + ": " + previousMappings.toString() +
|
||||
", docValues:" + previousDVMappings.toString() +
|
||||
", maxPointsInLeafNode=" + maxPointsInLeafNode +
|
||||
", maxMBSortInHeap=" + maxMBSortInHeap;
|
||||
", maxPointsInLeafNode=" + maxPointsInLeafNode;
|
||||
}
|
||||
|
||||
/** Just like {@link BKDWriter} except it evilly picks random ways to split cells on
|
||||
|
|
|
@ -63,6 +63,9 @@ New Features
|
|||
|
||||
* SOLR-9240: Support parallel ETL with the topic expression (Joel Bernstein)
|
||||
|
||||
* SOLR-9275: XML QueryParser support (defType=xmlparser) now extensible via configuration.
|
||||
(Christine Poerschke)
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
|
||||
|
@ -123,6 +126,10 @@ Bug Fixes
|
|||
|
||||
* SOLR-9285: Fixed AIOOBE when using ValueSourceAugmenter in single node RTG (hossman)
|
||||
|
||||
* SOLR-9288: Fix [docid] transformer to return -1 when used in RTG with uncommitted doc (hossman)
|
||||
|
||||
* SOLR-9309: Fix SolrCloud RTG response structure when multi ids requested but only 1 found (hossman)
|
||||
|
||||
* SOLR-9334: CloudSolrClient.collectionStateCache is unbounded (noble)
|
||||
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.hadoop.util.JarFinder;
|
|||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.cloud.AbstractZkTestCase;
|
||||
|
@ -67,6 +68,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Conseque
|
|||
})
|
||||
@Slow
|
||||
@Nightly
|
||||
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-9076")
|
||||
public class MorphlineBasicMiniMRTest extends SolrTestCaseJ4 {
|
||||
|
||||
private static final boolean ENABLE_LOCAL_JOB_RUNNER = false; // for debugging only
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.util.ToolRunner;
|
|||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
|
@ -97,6 +98,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies.Conseque
|
|||
@SuppressSSL // SSL does not work with this test for currently unknown reasons
|
||||
@Slow
|
||||
@Nightly
|
||||
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-9076")
|
||||
public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase {
|
||||
|
||||
private static final int RECORD_COUNT = 2104;
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.apache.solr.client.solrj.io.stream.expr.StreamFactory;
|
|||
import org.apache.solr.client.solrj.io.stream.expr.Explanation.ExpressionType;
|
||||
import org.apache.solr.client.solrj.io.stream.metrics.*;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -1514,9 +1513,9 @@ public class SQLHandler extends RequestHandlerBase implements SolrCoreAware , Pe
|
|||
CloudSolrClient cloudSolrClient = this.context.getSolrClientCache().getCloudSolrClient(this.zkHost);
|
||||
cloudSolrClient.connect();
|
||||
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
|
||||
Map<String, DocCollection> collections = zkStateReader.getClusterState().getCollectionsMap();
|
||||
Set<String> collections = zkStateReader.getClusterState().getCollectionStates().keySet();
|
||||
if (collections.size() != 0) {
|
||||
this.tables.addAll(collections.keySet());
|
||||
this.tables.addAll(collections);
|
||||
}
|
||||
Collections.sort(this.tables);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.commons.lang.StringUtils;
|
|||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.Builder;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
|
||||
import org.apache.solr.client.solrj.response.RequestStatusState;
|
||||
import org.apache.solr.client.solrj.util.SolrIdentifierValidator;
|
||||
|
@ -204,7 +203,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
log.info("Invoked Collection Action :{} with params {} and sendToOCPQueue={}", action.toLower(), req.getParamString(), operation.sendToOCPQueue);
|
||||
|
||||
SolrResponse response = null;
|
||||
Map<String, Object> props = operation.call(req, rsp, this);
|
||||
Map<String, Object> props = operation.execute(req, rsp, this);
|
||||
String asyncId = req.getParams().get(ASYNC);
|
||||
if (props != null) {
|
||||
if (asyncId != null) {
|
||||
|
@ -335,7 +334,34 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
|
||||
public static final String SYSTEM_COLL = ".system";
|
||||
|
||||
enum CollectionOperation {
|
||||
private static void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
|
||||
SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
|
||||
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
|
||||
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
|
||||
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL, zk);
|
||||
|
||||
try {
|
||||
String path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/schema.xml";
|
||||
byte[] data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSchema.xml"));
|
||||
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
|
||||
path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/solrconfig.xml";
|
||||
data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSolrConfig.xml"));
|
||||
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, e);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
private static void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
|
||||
SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
|
||||
status.add("state", state.getKey());
|
||||
status.add("msg", msg);
|
||||
results.add("status", status);
|
||||
}
|
||||
|
||||
enum CollectionOperation implements CollectionOp {
|
||||
/**
|
||||
* very simple currently, you can pass a template collection, and the new collection is created on
|
||||
* every node the template collection is on
|
||||
|
@ -343,284 +369,194 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
* we might also want to think about error handling (add the request to a zk queue and involve overseer?)
|
||||
* as well as specific replicas= options
|
||||
*/
|
||||
CREATE_OP(CREATE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
|
||||
throws KeeperException, InterruptedException {
|
||||
Map<String, Object> props = req.getParams().required().getAll(null, NAME);
|
||||
props.put("fromApi", "true");
|
||||
req.getParams().getAll(props,
|
||||
REPLICATION_FACTOR,
|
||||
COLL_CONF,
|
||||
NUM_SLICES,
|
||||
MAX_SHARDS_PER_NODE,
|
||||
CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE,
|
||||
SHARDS_PROP,
|
||||
STATE_FORMAT,
|
||||
AUTO_ADD_REPLICAS,
|
||||
RULE,
|
||||
SNITCH);
|
||||
CREATE_OP(CREATE, (req, rsp, h) -> {
|
||||
Map<String, Object> props = req.getParams().required().getAll(null, NAME);
|
||||
props.put("fromApi", "true");
|
||||
req.getParams().getAll(props,
|
||||
REPLICATION_FACTOR,
|
||||
COLL_CONF,
|
||||
NUM_SLICES,
|
||||
MAX_SHARDS_PER_NODE,
|
||||
CREATE_NODE_SET, CREATE_NODE_SET_SHUFFLE,
|
||||
SHARDS_PROP,
|
||||
STATE_FORMAT,
|
||||
AUTO_ADD_REPLICAS,
|
||||
RULE,
|
||||
SNITCH);
|
||||
|
||||
if (props.get(STATE_FORMAT) == null) {
|
||||
props.put(STATE_FORMAT, "2");
|
||||
}
|
||||
addMapObject(props, RULE);
|
||||
addMapObject(props, SNITCH);
|
||||
verifyRuleParams(h.coreContainer, props);
|
||||
final String collectionName = SolrIdentifierValidator.validateCollectionName((String)props.get(NAME));
|
||||
final String shardsParam = (String) props.get(SHARDS_PROP);
|
||||
if (StringUtils.isNotEmpty(shardsParam)) {
|
||||
verifyShardsParam(shardsParam);
|
||||
}
|
||||
if (SYSTEM_COLL.equals(collectionName)) {
|
||||
//We must always create a .system collection with only a single shard
|
||||
props.put(NUM_SLICES, 1);
|
||||
props.remove(SHARDS_PROP);
|
||||
createSysConfigSet(h.coreContainer);
|
||||
|
||||
}
|
||||
copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
|
||||
return copyPropertiesWithPrefix(req.getParams(), props, "router.");
|
||||
if (props.get(STATE_FORMAT) == null) {
|
||||
props.put(STATE_FORMAT, "2");
|
||||
}
|
||||
addMapObject(props, RULE);
|
||||
addMapObject(props, SNITCH);
|
||||
verifyRuleParams(h.coreContainer, props);
|
||||
final String collectionName = SolrIdentifierValidator.validateCollectionName((String) props.get(NAME));
|
||||
final String shardsParam = (String) props.get(SHARDS_PROP);
|
||||
if (StringUtils.isNotEmpty(shardsParam)) {
|
||||
verifyShardsParam(shardsParam);
|
||||
}
|
||||
if (SYSTEM_COLL.equals(collectionName)) {
|
||||
//We must always create a .system collection with only a single shard
|
||||
props.put(NUM_SLICES, 1);
|
||||
props.remove(SHARDS_PROP);
|
||||
createSysConfigSet(h.coreContainer);
|
||||
|
||||
}
|
||||
copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
|
||||
return copyPropertiesWithPrefix(req.getParams(), props, "router.");
|
||||
|
||||
private void createSysConfigSet(CoreContainer coreContainer) throws KeeperException, InterruptedException {
|
||||
SolrZkClient zk = coreContainer.getZkController().getZkStateReader().getZkClient();
|
||||
ZkCmdExecutor cmdExecutor = new ZkCmdExecutor(zk.getZkClientTimeout());
|
||||
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE, zk);
|
||||
cmdExecutor.ensureExists(ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL, zk);
|
||||
}),
|
||||
DELETE_OP(DELETE, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
|
||||
|
||||
try {
|
||||
String path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/schema.xml";
|
||||
byte[] data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSchema.xml"));
|
||||
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
|
||||
path = ZkStateReader.CONFIGS_ZKNODE + "/" + SYSTEM_COLL + "/solrconfig.xml";
|
||||
data = IOUtils.toByteArray(Thread.currentThread().getContextClassLoader().getResourceAsStream("SystemCollectionSolrConfig.xml"));
|
||||
cmdExecutor.ensureExists(path, data, CreateMode.PERSISTENT, zk);
|
||||
} catch (IOException e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, e);
|
||||
RELOAD_OP(RELOAD, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
|
||||
|
||||
}
|
||||
SYNCSHARD_OP(SYNCSHARD, (req, rsp, h) -> {
|
||||
String collection = req.getParams().required().get("collection");
|
||||
String shard = req.getParams().required().get("shard");
|
||||
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
|
||||
DocCollection docCollection = clusterState.getCollection(collection);
|
||||
ZkNodeProps leaderProps = docCollection.getLeader(shard);
|
||||
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
|
||||
|
||||
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()).build()) {
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
RequestSyncShard reqSyncShard = new RequestSyncShard();
|
||||
reqSyncShard.setCollection(collection);
|
||||
reqSyncShard.setShard(shard);
|
||||
reqSyncShard.setCoreName(nodeProps.getCoreName());
|
||||
client.request(reqSyncShard);
|
||||
}
|
||||
},
|
||||
DELETE_OP(DELETE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws Exception {
|
||||
return req.getParams().required().getAll(null, NAME);
|
||||
return null;
|
||||
}),
|
||||
CREATEALIAS_OP(CREATEALIAS, (req, rsp, h) -> {
|
||||
final String aliasName = SolrIdentifierValidator.validateAliasName(req.getParams().get(NAME));
|
||||
return req.getParams().required().getAll(null, NAME, "collections");
|
||||
}),
|
||||
DELETEALIAS_OP(DELETEALIAS, (req, rsp, h) -> req.getParams().required().getAll(null, NAME)),
|
||||
SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true, (req, rsp, h) -> {
|
||||
String name = req.getParams().required().get(COLLECTION_PROP);
|
||||
// TODO : add support for multiple shards
|
||||
String shard = req.getParams().get(SHARD_ID_PROP);
|
||||
String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
|
||||
String splitKey = req.getParams().get("split.key");
|
||||
|
||||
if (splitKey == null && shard == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
|
||||
}
|
||||
},
|
||||
RELOAD_OP(RELOAD) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws Exception {
|
||||
return req.getParams().required().getAll(null, NAME);
|
||||
if (splitKey != null && shard != null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
"Only one of 'shard' or 'split.key' should be specified");
|
||||
}
|
||||
},
|
||||
SYNCSHARD_OP(SYNCSHARD) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
|
||||
throws Exception {
|
||||
String collection = req.getParams().required().get("collection");
|
||||
String shard = req.getParams().required().get("shard");
|
||||
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
|
||||
DocCollection docCollection = clusterState.getCollection(collection);
|
||||
ZkNodeProps leaderProps = docCollection.getLeader(shard);
|
||||
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
|
||||
|
||||
try (HttpSolrClient client = new Builder(nodeProps.getBaseUrl()).build()) {
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
RequestSyncShard reqSyncShard = new CoreAdminRequest.RequestSyncShard();
|
||||
reqSyncShard.setCollection(collection);
|
||||
reqSyncShard.setShard(shard);
|
||||
reqSyncShard.setCoreName(nodeProps.getCoreName());
|
||||
client.request(reqSyncShard);
|
||||
}
|
||||
return null;
|
||||
if (splitKey != null && rangesStr != null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
"Only one of 'ranges' or 'split.key' should be specified");
|
||||
}
|
||||
|
||||
},
|
||||
CREATEALIAS_OP(CREATEALIAS) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws Exception {
|
||||
final String aliasName = SolrIdentifierValidator.validateAliasName(req.getParams().get(NAME));
|
||||
return req.getParams().required().getAll(null, NAME, "collections");
|
||||
}
|
||||
},
|
||||
DELETEALIAS_OP(DELETEALIAS) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws Exception {
|
||||
return req.getParams().required().getAll(null, NAME);
|
||||
Map<String, Object> map = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
"split.key",
|
||||
CoreAdminParams.RANGES);
|
||||
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
|
||||
}),
|
||||
DELETESHARD_OP(DELETESHARD, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP);
|
||||
req.getParams().getAll(map,
|
||||
DELETE_INDEX,
|
||||
DELETE_DATA_DIR,
|
||||
DELETE_INSTANCE_DIR);
|
||||
return map;
|
||||
}),
|
||||
FORCELEADER_OP(FORCELEADER, (req, rsp, h) -> {
|
||||
forceLeaderElection(req, h);
|
||||
return null;
|
||||
}),
|
||||
CREATESHARD_OP(CREATESHARD, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP);
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
|
||||
if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(req.getParams().get(COLLECTION_PROP)).get(DOC_ROUTER)).get(NAME)))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
|
||||
req.getParams().getAll(map,
|
||||
REPLICATION_FACTOR,
|
||||
CREATE_NODE_SET);
|
||||
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
|
||||
}),
|
||||
DELETEREPLICA_OP(DELETEREPLICA, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP);
|
||||
|
||||
req.getParams().getAll(map,
|
||||
DELETE_INDEX,
|
||||
DELETE_DATA_DIR,
|
||||
DELETE_INSTANCE_DIR);
|
||||
|
||||
return req.getParams().getAll(map, ONLY_IF_DOWN);
|
||||
}),
|
||||
MIGRATE_OP(MIGRATE, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, COLLECTION_PROP, "split.key", "target.collection");
|
||||
return req.getParams().getAll(map, "forward.timeout");
|
||||
}),
|
||||
ADDROLE_OP(ADDROLE, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
|
||||
if (!KNOWN_ROLES.contains(map.get("role")))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
|
||||
return map;
|
||||
}),
|
||||
REMOVEROLE_OP(REMOVEROLE, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
|
||||
if (!KNOWN_ROLES.contains(map.get("role")))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
|
||||
return map;
|
||||
}),
|
||||
CLUSTERPROP_OP(CLUSTERPROP, (req, rsp, h) -> {
|
||||
String name = req.getParams().required().get(NAME);
|
||||
String val = req.getParams().get(VALUE_LONG);
|
||||
ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
|
||||
cp.setClusterProperty(name, val);
|
||||
return null;
|
||||
}),
|
||||
REQUESTSTATUS_OP(REQUESTSTATUS, (req, rsp, h) -> {
|
||||
req.getParams().required().check(REQUESTID);
|
||||
|
||||
final CoreContainer coreContainer1 = h.coreContainer;
|
||||
final String requestId = req.getParams().get(REQUESTID);
|
||||
final ZkController zkController = coreContainer1.getZkController();
|
||||
|
||||
final NamedList<Object> results = new NamedList<>();
|
||||
if (zkController.getOverseerCompletedMap().contains(requestId)) {
|
||||
final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
|
||||
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
|
||||
addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks");
|
||||
} else if (zkController.getOverseerFailureMap().contains(requestId)) {
|
||||
final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
|
||||
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
|
||||
addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks");
|
||||
} else if (zkController.getOverseerRunningMap().contains(requestId)) {
|
||||
addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks");
|
||||
} else if (h.overseerCollectionQueueContains(requestId)) {
|
||||
addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks");
|
||||
} else {
|
||||
addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
|
||||
}
|
||||
|
||||
},
|
||||
SPLITSHARD_OP(SPLITSHARD, DEFAULT_COLLECTION_OP_TIMEOUT * 5, true) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
|
||||
throws Exception {
|
||||
String name = req.getParams().required().get(COLLECTION_PROP);
|
||||
// TODO : add support for multiple shards
|
||||
String shard = req.getParams().get(SHARD_ID_PROP);
|
||||
String rangesStr = req.getParams().get(CoreAdminParams.RANGES);
|
||||
String splitKey = req.getParams().get("split.key");
|
||||
|
||||
if (splitKey == null && shard == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "At least one of shard, or split.key should be specified.");
|
||||
}
|
||||
if (splitKey != null && shard != null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
"Only one of 'shard' or 'split.key' should be specified");
|
||||
}
|
||||
if (splitKey != null && rangesStr != null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
"Only one of 'ranges' or 'split.key' should be specified");
|
||||
}
|
||||
|
||||
Map<String, Object> map = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
"split.key",
|
||||
CoreAdminParams.RANGES);
|
||||
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
|
||||
}
|
||||
},
|
||||
DELETESHARD_OP(DELETESHARD) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP);
|
||||
req.getParams().getAll(map,
|
||||
DELETE_INDEX,
|
||||
DELETE_DATA_DIR,
|
||||
DELETE_INSTANCE_DIR);
|
||||
return map;
|
||||
}
|
||||
},
|
||||
FORCELEADER_OP(FORCELEADER) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
forceLeaderElection(req, handler);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
CREATESHARD_OP(CREATESHARD) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP);
|
||||
ClusterState clusterState = handler.coreContainer.getZkController().getClusterState();
|
||||
final String newShardName = SolrIdentifierValidator.validateShardName(req.getParams().get(SHARD_ID_PROP));
|
||||
if (!ImplicitDocRouter.NAME.equals(((Map) clusterState.getCollection(req.getParams().get(COLLECTION_PROP)).get(DOC_ROUTER)).get(NAME)))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "shards can be added only to 'implicit' collections");
|
||||
req.getParams().getAll(map,
|
||||
REPLICATION_FACTOR,
|
||||
CREATE_NODE_SET);
|
||||
return copyPropertiesWithPrefix(req.getParams(), map, COLL_PROP_PREFIX);
|
||||
}
|
||||
},
|
||||
DELETEREPLICA_OP(DELETEREPLICA) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP);
|
||||
|
||||
req.getParams().getAll(map,
|
||||
DELETE_INDEX,
|
||||
DELETE_DATA_DIR,
|
||||
DELETE_INSTANCE_DIR);
|
||||
|
||||
return req.getParams().getAll(map, ONLY_IF_DOWN);
|
||||
}
|
||||
},
|
||||
MIGRATE_OP(MIGRATE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, COLLECTION_PROP, "split.key", "target.collection");
|
||||
return req.getParams().getAll(map, "forward.timeout");
|
||||
}
|
||||
},
|
||||
ADDROLE_OP(ADDROLE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
|
||||
if (!KNOWN_ROLES.contains(map.get("role")))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
|
||||
return map;
|
||||
}
|
||||
},
|
||||
REMOVEROLE_OP(REMOVEROLE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null, "role", "node");
|
||||
if (!KNOWN_ROLES.contains(map.get("role")))
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown role. Supported roles are ," + KNOWN_ROLES);
|
||||
return map;
|
||||
}
|
||||
},
|
||||
CLUSTERPROP_OP(CLUSTERPROP) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
String name = req.getParams().required().get(NAME);
|
||||
String val = req.getParams().get(VALUE_LONG);
|
||||
ClusterProperties cp = new ClusterProperties(h.coreContainer.getZkController().getZkClient());
|
||||
cp.setClusterProperty(name, val);
|
||||
return null;
|
||||
}
|
||||
},
|
||||
REQUESTSTATUS_OP(REQUESTSTATUS) {
|
||||
final SolrResponse response = new OverseerSolrResponse(results);
|
||||
rsp.getValues().addAll(response.getResponse());
|
||||
return null;
|
||||
}),
|
||||
DELETESTATUS_OP(DELETESTATUS, new CollectionOp() {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
req.getParams().required().check(REQUESTID);
|
||||
|
||||
final CoreContainer coreContainer = h.coreContainer;
|
||||
final String requestId = req.getParams().get(REQUESTID);
|
||||
final ZkController zkController = coreContainer.getZkController();
|
||||
|
||||
final NamedList<Object> results = new NamedList<>();
|
||||
if (zkController.getOverseerCompletedMap().contains(requestId)) {
|
||||
final byte[] mapEntry = zkController.getOverseerCompletedMap().get(requestId);
|
||||
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
|
||||
addStatusToResponse(results, COMPLETED, "found [" + requestId + "] in completed tasks");
|
||||
} else if (zkController.getOverseerFailureMap().contains(requestId)) {
|
||||
final byte[] mapEntry = zkController.getOverseerFailureMap().get(requestId);
|
||||
rsp.getValues().addAll(SolrResponse.deserialize(mapEntry).getResponse());
|
||||
addStatusToResponse(results, FAILED, "found [" + requestId + "] in failed tasks");
|
||||
} else if (zkController.getOverseerRunningMap().contains(requestId)) {
|
||||
addStatusToResponse(results, RUNNING, "found [" + requestId + "] in running tasks");
|
||||
} else if (h.overseerCollectionQueueContains(requestId)) {
|
||||
addStatusToResponse(results, SUBMITTED, "found [" + requestId + "] in submitted tasks");
|
||||
} else {
|
||||
addStatusToResponse(results, NOT_FOUND, "Did not find [" + requestId + "] in any tasks queue");
|
||||
}
|
||||
|
||||
final SolrResponse response = new OverseerSolrResponse(results);
|
||||
rsp.getValues().addAll(response.getResponse());
|
||||
return null;
|
||||
}
|
||||
|
||||
private void addStatusToResponse(NamedList<Object> results, RequestStatusState state, String msg) {
|
||||
SimpleOrderedMap<String> status = new SimpleOrderedMap<>();
|
||||
status.add("state", state.getKey());
|
||||
status.add("msg", msg);
|
||||
results.add("status", status);
|
||||
}
|
||||
},
|
||||
DELETESTATUS_OP(DELETESTATUS) {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
final CoreContainer coreContainer = h.coreContainer;
|
||||
final String requestId = req.getParams().get(REQUESTID);
|
||||
final ZkController zkController = coreContainer.getZkController();
|
||||
|
@ -652,263 +588,217 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
}
|
||||
return null;
|
||||
}
|
||||
},
|
||||
ADDREPLICA_OP(ADDREPLICA) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
|
||||
throws Exception {
|
||||
Map<String, Object> props = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
"node",
|
||||
SHARD_ID_PROP,
|
||||
_ROUTE_,
|
||||
CoreAdminParams.NAME,
|
||||
INSTANCE_DIR,
|
||||
DATA_DIR);
|
||||
return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
|
||||
}
|
||||
},
|
||||
OVERSEERSTATUS_OP(OVERSEERSTATUS) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
return new LinkedHashMap<>();
|
||||
}
|
||||
},
|
||||
}),
|
||||
ADDREPLICA_OP(ADDREPLICA, (req, rsp, h) -> {
|
||||
Map<String, Object> props = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
"node",
|
||||
SHARD_ID_PROP,
|
||||
_ROUTE_,
|
||||
CoreAdminParams.NAME,
|
||||
INSTANCE_DIR,
|
||||
DATA_DIR);
|
||||
return copyPropertiesWithPrefix(req.getParams(), props, COLL_PROP_PREFIX);
|
||||
}),
|
||||
OVERSEERSTATUS_OP(OVERSEERSTATUS, (req, rsp, h) -> (Map) new LinkedHashMap<>()),
|
||||
|
||||
/**
|
||||
* Handle list collection request.
|
||||
* Do list collection request to zk host
|
||||
*/
|
||||
LIST_OP(LIST) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler) throws Exception {
|
||||
NamedList<Object> results = new NamedList<>();
|
||||
Map<String, DocCollection> collections = handler.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
|
||||
List<String> collectionList = new ArrayList<>(collections.keySet());
|
||||
results.add("collections", collectionList);
|
||||
SolrResponse response = new OverseerSolrResponse(results);
|
||||
rsp.getValues().addAll(response.getResponse());
|
||||
return null;
|
||||
}
|
||||
},
|
||||
LIST_OP(LIST, (req, rsp, h) -> {
|
||||
NamedList<Object> results = new NamedList<>();
|
||||
Map<String, DocCollection> collections = h.coreContainer.getZkController().getZkStateReader().getClusterState().getCollectionsMap();
|
||||
List<String> collectionList = new ArrayList<>(collections.keySet());
|
||||
results.add("collections", collectionList);
|
||||
SolrResponse response = new OverseerSolrResponse(results);
|
||||
rsp.getValues().addAll(response.getResponse());
|
||||
return null;
|
||||
}),
|
||||
/**
|
||||
* Handle cluster status request.
|
||||
* Can return status per specific collection/shard or per all collections.
|
||||
*/
|
||||
CLUSTERSTATUS_OP(CLUSTERSTATUS) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws KeeperException, InterruptedException {
|
||||
Map<String, Object> all = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
_ROUTE_);
|
||||
new ClusterStatus(handler.coreContainer.getZkController().getZkStateReader(),
|
||||
new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
|
||||
return null;
|
||||
CLUSTERSTATUS_OP(CLUSTERSTATUS, (req, rsp, h) -> {
|
||||
Map<String, Object> all = req.getParams().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
SHARD_ID_PROP,
|
||||
_ROUTE_);
|
||||
new ClusterStatus(h.coreContainer.getZkController().getZkStateReader(),
|
||||
new ZkNodeProps(all)).getClusterStatus(rsp.getValues());
|
||||
return null;
|
||||
}),
|
||||
ADDREPLICAPROP_OP(ADDREPLICAPROP, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP,
|
||||
PROPERTY_VALUE_PROP);
|
||||
req.getParams().getAll(map, SHARD_UNIQUE);
|
||||
String property = (String) map.get(PROPERTY_PROP);
|
||||
if (!property.startsWith(COLL_PROP_PREFIX)) {
|
||||
property = COLL_PROP_PREFIX + property;
|
||||
}
|
||||
},
|
||||
ADDREPLICAPROP_OP(ADDREPLICAPROP) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP,
|
||||
PROPERTY_VALUE_PROP);
|
||||
req.getParams().getAll(map, SHARD_UNIQUE);
|
||||
String property = (String) map.get(PROPERTY_PROP);
|
||||
if (!property.startsWith(COLL_PROP_PREFIX)) {
|
||||
property = COLL_PROP_PREFIX + property;
|
||||
}
|
||||
|
||||
boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
|
||||
boolean uniquePerSlice = Boolean.parseBoolean((String) map.get(SHARD_UNIQUE));
|
||||
|
||||
// Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
|
||||
// in a slice on properties that are known to only be one-per-slice and error out if so.
|
||||
if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
|
||||
SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
|
||||
uniquePerSlice == false) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Overseer replica property command received for property " + property +
|
||||
" with the " + SHARD_UNIQUE +
|
||||
" parameter set to something other than 'true'. No action taken.");
|
||||
}
|
||||
return map;
|
||||
// Check if we're trying to set a property with parameters that allow us to set the property on multiple replicas
|
||||
// in a slice on properties that are known to only be one-per-slice and error out if so.
|
||||
if (StringUtils.isNotBlank((String) map.get(SHARD_UNIQUE)) &&
|
||||
SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(property.toLowerCase(Locale.ROOT)) &&
|
||||
uniquePerSlice == false) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
"Overseer replica property command received for property " + property +
|
||||
" with the " + SHARD_UNIQUE +
|
||||
" parameter set to something other than 'true'. No action taken.");
|
||||
}
|
||||
},
|
||||
DELETEREPLICAPROP_OP(DELETEREPLICAPROP) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP);
|
||||
return req.getParams().getAll(map, PROPERTY_PROP);
|
||||
return map;
|
||||
}),
|
||||
DELETEREPLICAPROP_OP(DELETEREPLICAPROP, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP,
|
||||
SHARD_ID_PROP,
|
||||
REPLICA_PROP);
|
||||
return req.getParams().getAll(map, PROPERTY_PROP);
|
||||
}),
|
||||
BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE, (req, rsp, h) -> {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP);
|
||||
Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
|
||||
String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
|
||||
if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
|
||||
prop = COLL_PROP_PREFIX + prop;
|
||||
}
|
||||
},
|
||||
BALANCESHARDUNIQUE_OP(BALANCESHARDUNIQUE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
Map<String, Object> map = req.getParams().required().getAll(null,
|
||||
COLLECTION_PROP,
|
||||
PROPERTY_PROP);
|
||||
Boolean shardUnique = Boolean.parseBoolean(req.getParams().get(SHARD_UNIQUE));
|
||||
String prop = req.getParams().get(PROPERTY_PROP).toLowerCase(Locale.ROOT);
|
||||
if (!StringUtils.startsWith(prop, COLL_PROP_PREFIX)) {
|
||||
prop = COLL_PROP_PREFIX + prop;
|
||||
}
|
||||
|
||||
if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
|
||||
+ " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
|
||||
" Property: " + prop + " shardUnique: " + Boolean.toString(shardUnique));
|
||||
}
|
||||
|
||||
return req.getParams().getAll(map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
|
||||
if (!shardUnique && !SliceMutator.SLICE_UNIQUE_BOOLEAN_PROPERTIES.contains(prop)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Balancing properties amongst replicas in a slice requires that"
|
||||
+ " the property be pre-defined as a unique property (e.g. 'preferredLeader') or that 'shardUnique' be set to 'true'. " +
|
||||
" Property: " + prop + " shardUnique: " + Boolean.toString(shardUnique));
|
||||
}
|
||||
},
|
||||
REBALANCELEADERS_OP(REBALANCELEADERS) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
new RebalanceLeaders(req,rsp,h).execute();
|
||||
return null;
|
||||
|
||||
return req.getParams().getAll(map, ONLY_ACTIVE_NODES, SHARD_UNIQUE);
|
||||
}),
|
||||
REBALANCELEADERS_OP(REBALANCELEADERS, (req, rsp, h) -> {
|
||||
new RebalanceLeaders(req, rsp, h).execute();
|
||||
return null;
|
||||
}),
|
||||
MODIFYCOLLECTION_OP(MODIFYCOLLECTION, (req, rsp, h) -> {
|
||||
Map<String, Object> m = req.getParams().getAll(null, MODIFIABLE_COLL_PROPS);
|
||||
if (m.isEmpty()) throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
formatString("no supported values provided rule, snitch, maxShardsPerNode, replicationFactor, collection.configName"));
|
||||
req.getParams().required().getAll(m, COLLECTION_PROP);
|
||||
addMapObject(m, RULE);
|
||||
addMapObject(m, SNITCH);
|
||||
for (String prop : MODIFIABLE_COLL_PROPS) DocCollection.verifyProp(m, prop);
|
||||
verifyRuleParams(h.coreContainer, m);
|
||||
return m;
|
||||
}),
|
||||
MIGRATESTATEFORMAT_OP(MIGRATESTATEFORMAT, (req, rsp, h) -> req.getParams().required().getAll(null, COLLECTION_PROP)),
|
||||
|
||||
BACKUP_OP(BACKUP, (req, rsp, h) -> {
|
||||
req.getParams().required().check(NAME, COLLECTION_PROP);
|
||||
|
||||
String collectionName = req.getParams().get(COLLECTION_PROP);
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
if (!clusterState.hasCollection(collectionName)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
|
||||
}
|
||||
},
|
||||
MODIFYCOLLECTION_OP(MODIFYCOLLECTION) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
|
||||
Map<String, Object> m = req.getParams().getAll(null, MODIFIABLE_COLL_PROPS);
|
||||
if (m.isEmpty()) throw new SolrException(ErrorCode.BAD_REQUEST,
|
||||
formatString("no supported values provided rule, snitch, maxShardsPerNode, replicationFactor, collection.configName"));
|
||||
req.getParams().required().getAll(m, COLLECTION_PROP);
|
||||
addMapObject(m, RULE);
|
||||
addMapObject(m, SNITCH);
|
||||
for (String prop : MODIFIABLE_COLL_PROPS) DocCollection.verifyProp(m, prop);
|
||||
verifyRuleParams(h.coreContainer, m);
|
||||
return m;
|
||||
}
|
||||
},
|
||||
MIGRATESTATEFORMAT_OP(MIGRATESTATEFORMAT) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler handler)
|
||||
throws Exception {
|
||||
return req.getParams().required().getAll(null, COLLECTION_PROP);
|
||||
}
|
||||
},
|
||||
BACKUP_OP(BACKUP) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
req.getParams().required().check(NAME, COLLECTION_PROP);
|
||||
CoreContainer cc = h.coreContainer;
|
||||
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
|
||||
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
|
||||
|
||||
String collectionName = req.getParams().get(COLLECTION_PROP);
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
if (!clusterState.hasCollection(collectionName)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' does not exist, no action taken.");
|
||||
}
|
||||
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
|
||||
if (location == null) {
|
||||
//Refresh the cluster property file to make sure the value set for location is the latest
|
||||
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
|
||||
|
||||
CoreContainer cc = h.coreContainer;
|
||||
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
|
||||
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
|
||||
|
||||
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
|
||||
// Check if the location is specified in the cluster property.
|
||||
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
|
||||
if (location == null) {
|
||||
//Refresh the cluster property file to make sure the value set for location is the latest
|
||||
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
|
||||
|
||||
// Check if the location is specified in the cluster property.
|
||||
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty(CoreAdminParams.BACKUP_LOCATION, null);
|
||||
if (location == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
|
||||
+ " parameter or as a default repository property or as a cluster property.");
|
||||
}
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
|
||||
+ " parameter or as a default repository property or as a cluster property.");
|
||||
}
|
||||
|
||||
// Check if the specified location is valid for this repository.
|
||||
URI uri = repository.createURI(location);
|
||||
try {
|
||||
if (!repository.exists(uri)) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
|
||||
}
|
||||
|
||||
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
|
||||
params.put(CoreAdminParams.BACKUP_LOCATION, location);
|
||||
return params;
|
||||
}
|
||||
},
|
||||
RESTORE_OP(RESTORE) {
|
||||
@Override
|
||||
Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception {
|
||||
req.getParams().required().check(NAME, COLLECTION_PROP);
|
||||
|
||||
String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
//We always want to restore into an collection name which doesn't exist yet.
|
||||
if (clusterState.hasCollection(collectionName)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
|
||||
// Check if the specified location is valid for this repository.
|
||||
URI uri = repository.createURI(location);
|
||||
try {
|
||||
if (!repository.exists(uri)) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
|
||||
}
|
||||
|
||||
CoreContainer cc = h.coreContainer;
|
||||
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
|
||||
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
|
||||
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
|
||||
params.put(CoreAdminParams.BACKUP_LOCATION, location);
|
||||
return params;
|
||||
}),
|
||||
RESTORE_OP(RESTORE, (req, rsp, h) -> {
|
||||
req.getParams().required().check(NAME, COLLECTION_PROP);
|
||||
|
||||
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
|
||||
String collectionName = SolrIdentifierValidator.validateCollectionName(req.getParams().get(COLLECTION_PROP));
|
||||
ClusterState clusterState = h.coreContainer.getZkController().getClusterState();
|
||||
//We always want to restore into an collection name which doesn't exist yet.
|
||||
if (clusterState.hasCollection(collectionName)) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Collection '" + collectionName + "' exists, no action taken.");
|
||||
}
|
||||
|
||||
CoreContainer cc = h.coreContainer;
|
||||
String repo = req.getParams().get(CoreAdminParams.BACKUP_REPOSITORY);
|
||||
BackupRepository repository = cc.newBackupRepository(Optional.ofNullable(repo));
|
||||
|
||||
String location = repository.getBackupLocation(req.getParams().get(CoreAdminParams.BACKUP_LOCATION));
|
||||
if (location == null) {
|
||||
//Refresh the cluster property file to make sure the value set for location is the latest
|
||||
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
|
||||
|
||||
// Check if the location is specified in the cluster property.
|
||||
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", null);
|
||||
if (location == null) {
|
||||
//Refresh the cluster property file to make sure the value set for location is the latest
|
||||
h.coreContainer.getZkController().getZkStateReader().forceUpdateClusterProperties();
|
||||
|
||||
// Check if the location is specified in the cluster property.
|
||||
location = h.coreContainer.getZkController().getZkStateReader().getClusterProperty("location", null);
|
||||
if (location == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
|
||||
+ " parameter or as a default repository property or as a cluster property.");
|
||||
}
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "'location' is not specified as a query"
|
||||
+ " parameter or as a default repository property or as a cluster property.");
|
||||
}
|
||||
|
||||
// Check if the specified location is valid for this repository.
|
||||
URI uri = repository.createURI(location);
|
||||
try {
|
||||
if (!repository.exists(uri)) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
|
||||
}
|
||||
|
||||
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
|
||||
params.put(CoreAdminParams.BACKUP_LOCATION, location);
|
||||
// from CREATE_OP:
|
||||
req.getParams().getAll(params, COLL_CONF, REPLICATION_FACTOR, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS);
|
||||
copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
|
||||
return params;
|
||||
}
|
||||
};
|
||||
|
||||
// Check if the specified location is valid for this repository.
|
||||
URI uri = repository.createURI(location);
|
||||
try {
|
||||
if (!repository.exists(uri)) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "specified location " + uri + " does not exist.");
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to check the existance of " + uri + ". Is it valid?", ex);
|
||||
}
|
||||
|
||||
Map<String, Object> params = req.getParams().getAll(null, NAME, COLLECTION_PROP);
|
||||
params.put(CoreAdminParams.BACKUP_LOCATION, location);
|
||||
// from CREATE_OP:
|
||||
req.getParams().getAll(params, COLL_CONF, REPLICATION_FACTOR, MAX_SHARDS_PER_NODE, STATE_FORMAT, AUTO_ADD_REPLICAS);
|
||||
copyPropertiesWithPrefix(req.getParams(), params, COLL_PROP_PREFIX);
|
||||
return params;
|
||||
});
|
||||
public final CollectionOp fun;
|
||||
CollectionAction action;
|
||||
long timeOut;
|
||||
boolean sendToOCPQueue;
|
||||
|
||||
CollectionOperation(CollectionAction action) {
|
||||
this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true);
|
||||
CollectionOperation(CollectionAction action, CollectionOp fun) {
|
||||
this(action, DEFAULT_COLLECTION_OP_TIMEOUT, true, fun);
|
||||
}
|
||||
|
||||
CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue) {
|
||||
CollectionOperation(CollectionAction action, long timeOut, boolean sendToOCPQueue, CollectionOp fun) {
|
||||
this.action = action;
|
||||
this.timeOut = timeOut;
|
||||
this.sendToOCPQueue = sendToOCPQueue;
|
||||
this.fun = fun;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* All actions must implement this method. If a non null map is returned , the action name is added to
|
||||
* the map and sent to overseer for processing. If it returns a null, the call returns immediately
|
||||
*/
|
||||
abstract Map<String, Object> call(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
|
||||
|
||||
public static CollectionOperation get(CollectionAction action) {
|
||||
for (CollectionOperation op : values()) {
|
||||
|
@ -916,6 +806,12 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
}
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "No such action" + action);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h)
|
||||
throws Exception {
|
||||
return fun.execute(req, rsp, h);
|
||||
}
|
||||
}
|
||||
|
||||
private static void forceLeaderElection(SolrQueryRequest req, CollectionsHandler handler) {
|
||||
|
@ -1096,6 +992,11 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
}
|
||||
}
|
||||
|
||||
interface CollectionOp {
|
||||
Map<String, Object> execute(SolrQueryRequest req, SolrQueryResponse rsp, CollectionsHandler h) throws Exception;
|
||||
|
||||
}
|
||||
|
||||
public static final List<String> MODIFIABLE_COLL_PROPS = Arrays.asList(
|
||||
RULE,
|
||||
SNITCH,
|
||||
|
|
|
@ -362,7 +362,7 @@ public class CoreAdminHandler extends RequestHandlerBase implements PermissionNa
|
|||
}
|
||||
|
||||
void call() throws Exception {
|
||||
op.call(this);
|
||||
op.execute(this);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -374,7 +374,10 @@ public class CoreAdminHandler extends RequestHandlerBase implements PermissionNa
|
|||
/**
|
||||
* used by the INVOKE action of core admin handler
|
||||
*/
|
||||
public static interface Invocable {
|
||||
public Map<String, Object> invoke(SolrQueryRequest req);
|
||||
public interface Invocable {
|
||||
Map<String, Object> invoke(SolrQueryRequest req);
|
||||
}
|
||||
interface CoreAdminOp {
|
||||
void execute(CallInfo it) throws Exception;
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -143,10 +143,9 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
return;
|
||||
}
|
||||
|
||||
String id[] = params.getParams("id");
|
||||
String ids[] = params.getParams("ids");
|
||||
|
||||
if (id == null && ids == null) {
|
||||
final IdsRequsted reqIds = IdsRequsted.parseParams(req);
|
||||
|
||||
if (reqIds.allIds.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -171,20 +170,6 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
|
||||
}
|
||||
|
||||
|
||||
String[] allIds = id==null ? new String[0] : id;
|
||||
|
||||
if (ids != null) {
|
||||
List<String> lst = new ArrayList<>();
|
||||
for (String s : allIds) {
|
||||
lst.add(s);
|
||||
}
|
||||
for (String idList : ids) {
|
||||
lst.addAll( StrUtils.splitSmart(idList, ",", true) );
|
||||
}
|
||||
allIds = lst.toArray(new String[lst.size()]);
|
||||
}
|
||||
|
||||
SolrCore core = req.getCore();
|
||||
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
|
||||
FieldType fieldType = idField.getType();
|
||||
|
@ -209,7 +194,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
SolrIndexSearcher searcher = null;
|
||||
|
||||
BytesRefBuilder idBytes = new BytesRefBuilder();
|
||||
for (String idStr : allIds) {
|
||||
for (String idStr : reqIds.allIds) {
|
||||
fieldType.readableToIndexed(idStr, idBytes);
|
||||
if (ulog != null) {
|
||||
Object o = ulog.lookup(idBytes.get());
|
||||
|
@ -297,18 +282,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
// if the client specified a single id=foo, then use "doc":{
|
||||
// otherwise use a standard doclist
|
||||
|
||||
if (ids == null && allIds.length <= 1) {
|
||||
// if the doc was not found, then use a value of null.
|
||||
rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
|
||||
} else {
|
||||
docList.setNumFound(docList.size());
|
||||
rsp.addResponse(docList);
|
||||
}
|
||||
|
||||
addDocListToResponse(rb, docList);
|
||||
}
|
||||
|
||||
|
||||
|
@ -461,25 +435,13 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
}
|
||||
|
||||
public int createSubRequests(ResponseBuilder rb) throws IOException {
|
||||
SolrParams params = rb.req.getParams();
|
||||
String id1[] = params.getParams("id");
|
||||
String ids[] = params.getParams("ids");
|
||||
|
||||
if (id1 == null && ids == null) {
|
||||
|
||||
final IdsRequsted reqIds = IdsRequsted.parseParams(rb.req);
|
||||
if (reqIds.allIds.isEmpty()) {
|
||||
return ResponseBuilder.STAGE_DONE;
|
||||
}
|
||||
|
||||
List<String> allIds = new ArrayList<>();
|
||||
if (id1 != null) {
|
||||
for (String s : id1) {
|
||||
allIds.add(s);
|
||||
}
|
||||
}
|
||||
if (ids != null) {
|
||||
for (String s : ids) {
|
||||
allIds.addAll( StrUtils.splitSmart(s, ",", true) );
|
||||
}
|
||||
}
|
||||
|
||||
SolrParams params = rb.req.getParams();
|
||||
|
||||
// TODO: handle collection=...?
|
||||
|
||||
|
@ -495,7 +457,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
|
||||
|
||||
Map<String, List<String>> sliceToId = new HashMap<>();
|
||||
for (String id : allIds) {
|
||||
for (String id : reqIds.allIds) {
|
||||
Slice slice = coll.getRouter().getTargetSlice(id, null, null, params, coll);
|
||||
|
||||
List<String> idsForShard = sliceToId.get(slice.getName());
|
||||
|
@ -524,7 +486,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
rb.addRequest(this, sreq);
|
||||
}
|
||||
} else {
|
||||
String shardIdList = StrUtils.join(allIds, ',');
|
||||
String shardIdList = StrUtils.join(reqIds.allIds, ',');
|
||||
ShardRequest sreq = new ShardRequest();
|
||||
|
||||
sreq.purpose = 1;
|
||||
|
@ -586,17 +548,31 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
docList.addAll(subList);
|
||||
}
|
||||
}
|
||||
|
||||
addDocListToResponse(rb, docList);
|
||||
}
|
||||
|
||||
if (docList.size() <= 1 && rb.req.getParams().getParams("ids")==null) {
|
||||
/**
|
||||
* Encapsulates logic for how a {@link SolrDocumentList} should be added to the response
|
||||
* based on the request params used
|
||||
*/
|
||||
private void addDocListToResponse(final ResponseBuilder rb, final SolrDocumentList docList) {
|
||||
assert null != docList;
|
||||
|
||||
final SolrQueryResponse rsp = rb.rsp;
|
||||
final IdsRequsted reqIds = IdsRequsted.parseParams(rb.req);
|
||||
|
||||
if (reqIds.useSingleDocResponse) {
|
||||
assert docList.size() <= 1;
|
||||
// if the doc was not found, then use a value of null.
|
||||
rb.rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
|
||||
rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
|
||||
} else {
|
||||
docList.setNumFound(docList.size());
|
||||
rb.rsp.addResponse(docList);
|
||||
rsp.addResponse(docList);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
////////////////////////////////////////////
|
||||
/// SolrInfoMBean
|
||||
|
@ -768,6 +744,66 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
return new ArrayList<>(versionsToRet);
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple struct for tracking what ids were requested and what response format is expected
|
||||
* acording to the request params
|
||||
*/
|
||||
private final static class IdsRequsted {
|
||||
/** An List (which may be empty but will never be null) of the uniqueKeys requested. */
|
||||
public final List<String> allIds;
|
||||
/**
|
||||
* true if the params provided by the user indicate that a single doc response structure
|
||||
* should be used.
|
||||
* Value is meaninless if <code>ids</code> is empty.
|
||||
*/
|
||||
public final boolean useSingleDocResponse;
|
||||
private IdsRequsted(List<String> allIds, boolean useSingleDocResponse) {
|
||||
assert null != allIds;
|
||||
this.allIds = allIds;
|
||||
this.useSingleDocResponse = useSingleDocResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parsers the <code>id</code> and <code>ids</code> params attached to the specified request object,
|
||||
* and returns an <code>IdsRequsted</code> struct to use for this request.
|
||||
* The <code>IdsRequsted</code> is cached in the {@link SolrQueryRequest#getContext} so subsequent
|
||||
* method calls on the same request will not re-parse the params.
|
||||
*/
|
||||
public static IdsRequsted parseParams(SolrQueryRequest req) {
|
||||
final String contextKey = IdsRequsted.class.toString() + "_PARSED_ID_PARAMS";
|
||||
if (req.getContext().containsKey(contextKey)) {
|
||||
return (IdsRequsted)req.getContext().get(contextKey);
|
||||
}
|
||||
final SolrParams params = req.getParams();
|
||||
final String id[] = params.getParams("id");
|
||||
final String ids[] = params.getParams("ids");
|
||||
|
||||
if (id == null && ids == null) {
|
||||
IdsRequsted result = new IdsRequsted(Collections.<String>emptyList(), true);
|
||||
req.getContext().put(contextKey, result);
|
||||
return result;
|
||||
}
|
||||
final List<String> allIds = new ArrayList<>((null == id ? 0 : id.length)
|
||||
+ (null == ids ? 0 : (2 * ids.length)));
|
||||
if (null != id) {
|
||||
for (String singleId : id) {
|
||||
allIds.add(singleId);
|
||||
}
|
||||
}
|
||||
if (null != ids) {
|
||||
for (String idList : ids) {
|
||||
allIds.addAll( StrUtils.splitSmart(idList, ",", true) );
|
||||
}
|
||||
}
|
||||
// if the client specified a single id=foo, then use "doc":{
|
||||
// otherwise use a standard doclist
|
||||
IdsRequsted result = new IdsRequsted(allIds, (ids == null && allIds.size() <= 1));
|
||||
req.getContext().put(contextKey, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A lite weight ResultContext for use with RTG requests that can point at Realtime Searchers
|
||||
*/
|
||||
|
|
|
@ -21,7 +21,10 @@ import org.apache.solr.common.params.SolrParams;
|
|||
import org.apache.solr.request.SolrQueryRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
* Augments the document with a <code>[docid]</code> integer containing it's current
|
||||
* (internal) id in the lucene index. May be <code>-1</code> if this document did not come from the
|
||||
* index (ie: a RealTimeGet from the transaction log)
|
||||
*
|
||||
* @since solr 4.0
|
||||
*/
|
||||
public class DocIdAugmenterFactory extends TransformerFactory
|
||||
|
@ -49,9 +52,8 @@ class DocIdAugmenter extends DocTransformer
|
|||
|
||||
@Override
|
||||
public void transform(SolrDocument doc, int docid, float score) {
|
||||
if( docid >= 0 ) {
|
||||
doc.setField( name, docid );
|
||||
}
|
||||
assert -1 <= docid;
|
||||
doc.setField( name, docid );
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,23 +16,57 @@
|
|||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryparser.xml.CoreParser;
|
||||
import org.apache.lucene.queryparser.xml.QueryBuilder;
|
||||
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.util.plugin.NamedListInitializedPlugin;
|
||||
|
||||
/**
|
||||
* Assembles a QueryBuilder which uses Query objects from Solr's <code>search</code> module
|
||||
* in addition to Query objects supported by the Lucene <code>CoreParser</code>.
|
||||
*/
|
||||
public class SolrCoreParser extends CoreParser {
|
||||
public class SolrCoreParser extends CoreParser implements NamedListInitializedPlugin {
|
||||
|
||||
protected final SolrQueryRequest req;
|
||||
|
||||
public SolrCoreParser(String defaultField, Analyzer analyzer,
|
||||
SolrQueryRequest req) {
|
||||
super(defaultField, analyzer);
|
||||
this.req = req;
|
||||
}
|
||||
|
||||
// final IndexSchema schema = req.getSchema();
|
||||
// lucene_parser.addQueryBuilder("SomeOtherQuery", new SomeOtherQueryBuilder(schema));
|
||||
@Override
|
||||
public void init(NamedList initArgs) {
|
||||
if (initArgs == null || initArgs.size() == 0) {
|
||||
return;
|
||||
}
|
||||
final SolrResourceLoader loader;
|
||||
if (req == null) {
|
||||
loader = new SolrResourceLoader();
|
||||
} else {
|
||||
loader = req.getCore().getResourceLoader();
|
||||
}
|
||||
|
||||
final Iterable<Map.Entry<String,Object>> args = initArgs;
|
||||
for (final Map.Entry<String,Object> entry : args) {
|
||||
final String queryName = entry.getKey();
|
||||
final String queryBuilderClassName = (String)entry.getValue();
|
||||
|
||||
final SolrQueryBuilder queryBuilder = loader.newInstance(
|
||||
queryBuilderClassName,
|
||||
SolrQueryBuilder.class,
|
||||
null,
|
||||
new Class[] {String.class, Analyzer.class, SolrQueryRequest.class, QueryBuilder.class},
|
||||
new Object[] {defaultField, analyzer, req, this});
|
||||
|
||||
this.queryFactory.addBuilder(queryName, queryBuilder);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryparser.xml.QueryBuilder;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
|
||||
public abstract class SolrQueryBuilder implements QueryBuilder {
|
||||
|
||||
protected final SolrQueryRequest req;
|
||||
protected final QueryBuilder queryFactory;
|
||||
|
||||
public SolrQueryBuilder(String defaultField, Analyzer analyzer,
|
||||
SolrQueryRequest req, QueryBuilder queryFactory) {
|
||||
this.req = req;
|
||||
this.queryFactory = queryFactory;
|
||||
}
|
||||
|
||||
}
|
|
@ -25,12 +25,21 @@ import org.apache.lucene.search.Query;
|
|||
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
|
||||
public class XmlQParserPlugin extends QParserPlugin {
|
||||
public static final String NAME = "xmlparser";
|
||||
|
||||
private NamedList args;
|
||||
|
||||
@Override
|
||||
public void init( NamedList args ) {
|
||||
super.init(args);
|
||||
this.args = args;
|
||||
}
|
||||
|
||||
private class XmlQParser extends QParser {
|
||||
|
||||
public XmlQParser(String qstr, SolrParams localParams,
|
||||
|
@ -46,7 +55,9 @@ public class XmlQParserPlugin extends QParserPlugin {
|
|||
final IndexSchema schema = req.getSchema();
|
||||
final String defaultField = QueryParsing.getDefaultField(schema, getParam(CommonParams.DF));
|
||||
final Analyzer analyzer = schema.getQueryAnalyzer();
|
||||
|
||||
final SolrCoreParser solrParser = new SolrCoreParser(defaultField, analyzer, req);
|
||||
solrParser.init(args);
|
||||
try {
|
||||
return solrParser.parse(new ByteArrayInputStream(qstr.getBytes(StandardCharsets.UTF_8)));
|
||||
} catch (ParserException e) {
|
||||
|
|
|
@ -42,6 +42,8 @@ public class IndexFingerprint {
|
|||
|
||||
private long maxVersionSpecified;
|
||||
private long maxVersionEncountered;
|
||||
// this actually means max versions used in computing the hash.
|
||||
// we cannot change this now because it changes back-compat
|
||||
private long maxInHash;
|
||||
private long versionsHash;
|
||||
private long numVersions;
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
<?xml version="1.0" ?>
|
||||
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<!-- solrconfig-basic.xml plus a queryParser element -->
|
||||
<config>
|
||||
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
|
||||
<dataDir>${solr.data.dir:}</dataDir>
|
||||
<xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
|
||||
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}"/>
|
||||
<schemaFactory class="ClassicIndexSchemaFactory"/>
|
||||
<requestHandler name="standard" class="solr.StandardRequestHandler" />
|
||||
<queryParser name="testxmlparser" class="XmlQParserPlugin">
|
||||
<str name="HandyQuery">org.apache.solr.search.HandyQueryBuilder</str>
|
||||
<str name="HelloQuery">org.apache.solr.search.HelloQueryBuilder</str>
|
||||
<str name="GoodbyeQuery">org.apache.solr.search.GoodbyeQueryBuilder</str>
|
||||
</queryParser>
|
||||
</config>
|
|
@ -47,6 +47,7 @@ import org.apache.solr.common.SolrInputDocument;
|
|||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.response.transform.DocTransformer; // jdocs
|
||||
|
||||
import org.apache.solr.util.RandomizeSSL;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
|
@ -90,8 +91,6 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
(Arrays.<FlValidator>asList(
|
||||
// TODO: SOLR-9314: add more of these for other various transformers
|
||||
//
|
||||
// TODO: add a [docid] validator (blocked by SOLR-9288 & SOLR-9289)
|
||||
//
|
||||
new GlobValidator("*"),
|
||||
new GlobValidator("*_i"),
|
||||
new GlobValidator("*_s"),
|
||||
|
@ -119,6 +118,9 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
new RenameFieldValueValidator("id", "my_id_alias"),
|
||||
new RenameFieldValueValidator("bbb_i", "my_int_field_alias"),
|
||||
new RenameFieldValueValidator("ddd_s", "my_str_field_alias")));
|
||||
// SOLR-9289...
|
||||
FL_VALIDATORS.add(new DocIdValidator());
|
||||
FL_VALIDATORS.add(new DocIdValidator("my_docid_alias"));
|
||||
} else {
|
||||
// No-Op
|
||||
// No known transformers that only work in distrib cloud but fail in singleCoreMode
|
||||
|
@ -385,16 +387,11 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
* trivial helper method to deal with diff response structure between using a single 'id' param vs
|
||||
* 2 or more 'id' params (or 1 or more 'ids' params).
|
||||
*
|
||||
* NOTE: <code>expectList</code> is currently ignored due to SOLR-9309 -- instead best efforst are made to
|
||||
* return a synthetic list based on whatever can be found in the response.
|
||||
*
|
||||
* @return List from response, or a synthetic one created from single response doc if
|
||||
* <code>expectList</code> was false; May be empty; May be null if response included null list.
|
||||
*/
|
||||
private static SolrDocumentList getDocsFromRTGResponse(final boolean expectList, final QueryResponse rsp) {
|
||||
// TODO: blocked by SOLR-9309 (once this can be fixed, update jdocs)
|
||||
if (null != rsp.getResults()) { // TODO: replace this..
|
||||
// if (expectList) { // TODO: ...with this tighter check.
|
||||
if (expectList) {
|
||||
return rsp.getResults();
|
||||
}
|
||||
|
||||
|
@ -428,7 +425,7 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* abstraction for diff types of things that can be added to an 'fl' param that can validate
|
||||
* Abstraction for diff types of things that can be added to an 'fl' param that can validate
|
||||
* the results are correct compared to an expected SolrInputDocument
|
||||
*/
|
||||
private interface FlValidator {
|
||||
|
@ -441,6 +438,21 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
}
|
||||
params.add(buildCommaSepParams(random(), "fl", fls));
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates if this validator is for a transformer that returns true from
|
||||
* {@link DocTransformer#needsSolrIndexSearcher}. Other validators for transformers that
|
||||
* do <em>not</em> require a re-opened searcher (but may have slightly diff behavior depending
|
||||
* on wether a doc comesfrom the index or from the update log) may use this information to
|
||||
* decide wether they wish to enforce stricter assertions on the resulting document.
|
||||
*
|
||||
* The default implementation always returns <code>false</code>
|
||||
*
|
||||
* @see DocIdValidator
|
||||
*/
|
||||
public default boolean requiresRealtimeSearcherReOpen() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Must return a non null String that can be used in an fl param -- either by itself,
|
||||
|
@ -496,6 +508,42 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
public String getFlParam() { return actualFieldName + ":" + expectedFieldName; }
|
||||
}
|
||||
|
||||
/**
|
||||
* enforces that a valid <code>[docid]</code> is present in the response, possibly using a
|
||||
* resultKey alias. By default the only validation of docId values is that they are an integer
|
||||
* greater than or equal to <code>-1</code> -- but if any other validator in use returns true
|
||||
* from {@link #requiresRealtimeSearcherReOpen} then the constraint is tightened and values must
|
||||
* be greater than or equal to <code>0</code>
|
||||
*/
|
||||
private static class DocIdValidator implements FlValidator {
|
||||
private final String resultKey;
|
||||
public DocIdValidator(final String resultKey) {
|
||||
this.resultKey = resultKey;
|
||||
}
|
||||
public DocIdValidator() {
|
||||
this("[docid]");
|
||||
}
|
||||
public String getFlParam() { return "[docid]".equals(resultKey) ? resultKey : resultKey+":[docid]"; }
|
||||
public Collection<String> assertRTGResults(final Collection<FlValidator> validators,
|
||||
final SolrInputDocument expected,
|
||||
final SolrDocument actual) {
|
||||
final Object value = actual.getFirstValue(resultKey);
|
||||
assertNotNull(getFlParam() + " => no value in actual doc", value);
|
||||
assertTrue("[docid] must be an Integer: " + value, value instanceof Integer);
|
||||
|
||||
int minValidDocId = -1; // if it comes from update log
|
||||
for (FlValidator other : validators) {
|
||||
if (other.requiresRealtimeSearcherReOpen()) {
|
||||
minValidDocId = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
assertTrue("[docid] must be >= " + minValidDocId + ": " + value,
|
||||
minValidDocId <= ((Integer)value).intValue());
|
||||
return Collections.<String>singleton(resultKey);
|
||||
}
|
||||
}
|
||||
|
||||
/** Trivial validator of a ValueSourceAugmenter */
|
||||
private static class FunctionValidator implements FlValidator {
|
||||
private static String func(String fieldName) {
|
||||
|
@ -515,6 +563,8 @@ public class TestRandomFlRTGCloud extends SolrCloudTestCase {
|
|||
this.resultKey = resultKey;
|
||||
this.fieldName = fieldName;
|
||||
}
|
||||
/** always returns true */
|
||||
public boolean requiresRealtimeSearcherReOpen() { return true; }
|
||||
public String getFlParam() { return fl; }
|
||||
public Collection<String> assertRTGResults(final Collection<FlValidator> validators,
|
||||
final SolrInputDocument expected,
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryparser.xml.ParserException;
|
||||
import org.apache.lucene.queryparser.xml.QueryBuilder;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
public class GoodbyeQueryBuilder extends SolrQueryBuilder {
|
||||
|
||||
public GoodbyeQueryBuilder(String defaultField, Analyzer analyzer,
|
||||
SolrQueryRequest req, QueryBuilder queryFactory) {
|
||||
super(defaultField, analyzer, req, queryFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery(Element e) throws ParserException {
|
||||
return new MatchNoDocsQuery();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryparser.xml.DOMUtils;
|
||||
import org.apache.lucene.queryparser.xml.ParserException;
|
||||
import org.apache.lucene.queryparser.xml.QueryBuilder;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
// A simple test query builder to demonstrate use of
|
||||
// SolrQueryBuilder's queryFactory constructor argument.
|
||||
public class HandyQueryBuilder extends SolrQueryBuilder {
|
||||
|
||||
public HandyQueryBuilder(String defaultField, Analyzer analyzer,
|
||||
SolrQueryRequest req, QueryBuilder queryFactory) {
|
||||
super(defaultField, analyzer, req, queryFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery(Element e) throws ParserException {
|
||||
final BooleanQuery.Builder bq = new BooleanQuery.Builder();
|
||||
final Query lhsQ = getSubQuery(e, "Left");
|
||||
final Query rhsQ = getSubQuery(e, "Right");
|
||||
bq.add(new BooleanClause(lhsQ, BooleanClause.Occur.SHOULD));
|
||||
bq.add(new BooleanClause(rhsQ, BooleanClause.Occur.SHOULD));
|
||||
return bq.build();
|
||||
}
|
||||
|
||||
private Query getSubQuery(Element e, String name) throws ParserException {
|
||||
Element subE = DOMUtils.getChildByTagOrFail(e, name);
|
||||
subE = DOMUtils.getFirstChildOrFail(subE);
|
||||
return queryFactory.getQuery(subE);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.queryparser.xml.ParserException;
|
||||
import org.apache.lucene.queryparser.xml.QueryBuilder;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
public class HelloQueryBuilder extends SolrQueryBuilder {
|
||||
|
||||
public HelloQueryBuilder(String defaultField, Analyzer analyzer,
|
||||
SolrQueryRequest req, QueryBuilder queryFactory) {
|
||||
super(defaultField, analyzer, req, queryFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query getQuery(Element e) throws ParserException {
|
||||
return new MatchAllDocsQuery();
|
||||
}
|
||||
|
||||
}
|
|
@ -531,21 +531,13 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-9288")
|
||||
public void testDocIdAugmenterRTG() throws Exception {
|
||||
// NOTE: once this test is fixed to pass, testAugmentersRTG should also be updated to test [docid]
|
||||
|
||||
// TODO: behavior of fl=[docid] should be consistent regardless of wether doc is committed
|
||||
// what should behavior be?
|
||||
// right now, for an uncommited doc, [docid] is silently ignored and no value included in result
|
||||
// perhaps it should be "null" or "-1" ?
|
||||
|
||||
// behavior shouldn't matter if we are committed or uncommitted
|
||||
// for an uncommitted doc, we should get -1
|
||||
for (String id : Arrays.asList("42","99")) {
|
||||
assertQ(id + ": fl=[docid]",
|
||||
req("qt","/get","id",id, "wt","xml", "fl","[docid]")
|
||||
,"count(//doc)=1"
|
||||
,"//doc/int[@name='[docid]']"
|
||||
,"//doc/int[@name='[docid]'][.>=-1]"
|
||||
,"//doc[count(*)=1]"
|
||||
);
|
||||
}
|
||||
|
@ -554,22 +546,21 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
public void testAugmentersRTG() throws Exception {
|
||||
// behavior shouldn't matter if we are committed or uncommitted
|
||||
for (String id : Arrays.asList("42","99")) {
|
||||
// NOTE: once testDocIdAugmenterRTG can pass, [docid] should be tested here as well.
|
||||
for (SolrParams p : Arrays.asList
|
||||
(params("fl","[shard],[explain],x_alias:[value v=10 t=int],abs(val_i)"),
|
||||
params("fl","[shard],abs(val_i)","fl","[explain],x_alias:[value v=10 t=int]"),
|
||||
params("fl","[shard]","fl","[explain],x_alias:[value v=10 t=int]","fl","abs(val_i)"),
|
||||
params("fl","[shard]","fl","[explain]","fl","x_alias:[value v=10 t=int]","fl","abs(val_i)"))) {
|
||||
(params("fl","[docid],[shard],[explain],x_alias:[value v=10 t=int],abs(val_i)"),
|
||||
params("fl","[docid],[shard],abs(val_i)","fl","[explain],x_alias:[value v=10 t=int]"),
|
||||
params("fl","[docid],[shard]","fl","[explain],x_alias:[value v=10 t=int]","fl","abs(val_i)"),
|
||||
params("fl","[docid]","fl","[shard]","fl","[explain]","fl","x_alias:[value v=10 t=int]","fl","abs(val_i)"))) {
|
||||
assertQ(id + ": " + p,
|
||||
req(p, "qt","/get","id",id, "wt","xml")
|
||||
,"count(//doc)=1"
|
||||
// ,"//doc/int[@name='[docid]']" // TODO
|
||||
,"//doc/int[@name='[docid]'][.>=-1]"
|
||||
,"//doc/float[@name='abs(val_i)'][.='1.0']"
|
||||
,"//doc/str[@name='[shard]'][.='[not a shard request]']"
|
||||
// RTG: [explain] should be missing (ignored)
|
||||
,"//doc/int[@name='x_alias'][.=10]"
|
||||
|
||||
,"//doc[count(*)=3]"
|
||||
,"//doc[count(*)=4]"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -595,21 +586,20 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
public void testAugmentersAndExplicitRTG() throws Exception {
|
||||
// behavior shouldn't matter if we are committed or uncommitted
|
||||
for (String id : Arrays.asList("42","99")) {
|
||||
// NOTE: once testDocIdAugmenterRTG can pass, [docid] should be tested here as well.
|
||||
for (SolrParams p : Arrays.asList
|
||||
(params("fl","id,[explain],x_alias:[value v=10 t=int],abs(val_i)"),
|
||||
params("fl","id,abs(val_i)","fl","[explain],x_alias:[value v=10 t=int]"),
|
||||
params("fl","id","fl","[explain]","fl","x_alias:[value v=10 t=int]","fl","abs(val_i)"))) {
|
||||
(params("fl","id,[docid],[explain],x_alias:[value v=10 t=int],abs(val_i)"),
|
||||
params("fl","id,[docid],abs(val_i)","fl","[explain],x_alias:[value v=10 t=int]"),
|
||||
params("fl","id","fl","[docid]","fl","[explain]","fl","x_alias:[value v=10 t=int]","fl","abs(val_i)"))) {
|
||||
assertQ(id + ": " + p,
|
||||
req(p, "qt","/get","id",id, "wt","xml")
|
||||
,"count(//doc)=1"
|
||||
,"//doc/str[@name='id']"
|
||||
// ,"//doc/int[@name='[docid]']" // TODO
|
||||
,"//doc/int[@name='[docid]'][.>=-1]"
|
||||
,"//doc/float[@name='abs(val_i)'][.='1.0']"
|
||||
// RTG: [explain] should be missing (ignored)
|
||||
,"//doc/int[@name='x_alias'][.=10]"
|
||||
|
||||
,"//doc[count(*)=3]"
|
||||
,"//doc[count(*)=4]"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -646,29 +636,28 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
public void testAugmentersAndScoreRTG() throws Exception {
|
||||
// if we use RTG (committed or otherwise) score should be ignored
|
||||
for (String id : Arrays.asList("42","99")) {
|
||||
// NOTE: once testDocIdAugmenterRTG can pass, [docid] should be tested here as well.
|
||||
assertQ(id,
|
||||
req("qt","/get","id",id, "wt","xml",
|
||||
"fl","x_alias:[value v=10 t=int],score,abs(val_i)")
|
||||
// ,"//doc/int[@name='[docid]']" // TODO
|
||||
"fl","x_alias:[value v=10 t=int],score,abs(val_i),[docid]")
|
||||
,"//doc/int[@name='[docid]'][.>=-1]"
|
||||
,"//doc/float[@name='abs(val_i)'][.='1.0']"
|
||||
,"//doc/int[@name='x_alias'][.=10]"
|
||||
|
||||
,"//doc[count(*)=2]"
|
||||
,"//doc[count(*)=3]"
|
||||
);
|
||||
for (SolrParams p : Arrays.asList(params("fl","x_alias:[value v=10 t=int],[explain],score,abs(val_i)"),
|
||||
params("fl","x_alias:[value v=10 t=int],[explain]","fl","score,abs(val_i)"),
|
||||
params("fl","x_alias:[value v=10 t=int]","fl","[explain]","fl","score","fl","abs(val_i)"))) {
|
||||
for (SolrParams p : Arrays.asList(params("fl","[docid],x_alias:[value v=10 t=int],[explain],score,abs(val_i)"),
|
||||
params("fl","x_alias:[value v=10 t=int],[explain]","fl","[docid],score,abs(val_i)"),
|
||||
params("fl","[docid]","fl","x_alias:[value v=10 t=int]","fl","[explain]","fl","score","fl","abs(val_i)"))) {
|
||||
|
||||
assertQ(p.toString(),
|
||||
req(p, "qt","/get","id",id, "wt","xml")
|
||||
|
||||
// ,"//doc/int[@name='[docid]']" // TODO
|
||||
,"//doc/int[@name='[docid]']" // TODO
|
||||
,"//doc/float[@name='abs(val_i)'][.='1.0']"
|
||||
,"//doc/int[@name='x_alias'][.=10]"
|
||||
// RTG: [explain] and score should be missing (ignored)
|
||||
|
||||
,"//doc[count(*)=2]"
|
||||
,"//doc[count(*)=3]"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -713,8 +702,7 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
|
||||
// NOTE: 'ssto' is the missing one
|
||||
final List<String> fl = Arrays.asList
|
||||
// NOTE: once testDocIdAugmenterRTG can pass, [docid] should be tested here as well.
|
||||
("id","[explain]","score","val_*","subj*","abs(val_i)");
|
||||
("id","[explain]","score","val_*","subj*","abs(val_i)","[docid]");
|
||||
|
||||
final int iters = atLeast(random, 10);
|
||||
for (int i = 0; i< iters; i++) {
|
||||
|
@ -734,12 +722,12 @@ public class TestPseudoReturnFields extends SolrTestCaseJ4 {
|
|||
req(p, "qt","/get","id",id, "wt","xml")
|
||||
,"count(//doc)=1"
|
||||
,"//doc/str[@name='id']"
|
||||
// ,"//doc/int[@name='[docid]']" // TODO
|
||||
,"//doc/int[@name='[docid]'][.>=-1]"
|
||||
,"//doc/float[@name='abs(val_i)'][.='1.0']"
|
||||
// RTG: [explain] and score should be missing (ignored)
|
||||
,"//doc/int[@name='val_i'][.=1]"
|
||||
,"//doc/str[@name='subject']"
|
||||
,"//doc[count(*)=4]"
|
||||
,"//doc[count(*)=5]"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestXmlQParserPlugin extends SolrTestCaseJ4 {
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
initCore("solrconfig-testxmlparser.xml", "schema-minimal.xml");
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// if you override setUp or tearDown, you better call
|
||||
// the super classes version
|
||||
super.setUp();
|
||||
clearIndex();
|
||||
assertU(commit());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHelloQuery() throws Exception {
|
||||
final int numDocs = random().nextInt(10);
|
||||
implTestQuery(numDocs, "<HelloQuery/>", numDocs);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGoodbyeQuery() throws Exception {
|
||||
final int numDocs = random().nextInt(10);
|
||||
implTestQuery(numDocs, "<GoodbyeQuery/>", 0);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testHandyQuery() throws Exception {
|
||||
final int numDocs = random().nextInt(10);
|
||||
final String q = "<HandyQuery><Left><HelloQuery/></Left><Right><GoodbyeQuery/></Right></HandyQuery>";
|
||||
implTestQuery(numDocs, q, numDocs);
|
||||
}
|
||||
|
||||
public void implTestQuery(int numDocs, String q, int expectedCount) throws Exception {
|
||||
// add some documents
|
||||
for (int ii=1; ii<=numDocs; ++ii) {
|
||||
String[] doc = {"id",ii+"0"};
|
||||
assertU(adoc(doc));
|
||||
if (random().nextBoolean()) {
|
||||
assertU(commit());
|
||||
}
|
||||
}
|
||||
assertU(commit());
|
||||
// and then run the query
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.add("defType", "testxmlparser");
|
||||
params.add("q", q);
|
||||
assertQ(req(params), "*[count(//doc)="+expectedCount+"]");
|
||||
}
|
||||
|
||||
}
|
|
@ -352,6 +352,16 @@ public class CloudSolrStream extends TupleStream implements Expressible {
|
|||
}
|
||||
}
|
||||
|
||||
public static Collection<Slice> getSlicesIgnoreCase(String name, ClusterState clusterState) {
|
||||
for (String coll : clusterState.getCollectionStates().keySet()) {
|
||||
if (coll.equalsIgnoreCase(name)) {
|
||||
DocCollection collection = clusterState.getCollectionOrNull(coll);
|
||||
if (collection != null) return collection.getActiveSlices();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected void constructStreams() throws IOException {
|
||||
|
||||
try {
|
||||
|
@ -362,20 +372,9 @@ public class CloudSolrStream extends TupleStream implements Expressible {
|
|||
//System.out.println("Connected to zk an got cluster state.");
|
||||
|
||||
Collection<Slice> slices = clusterState.getActiveSlices(this.collection);
|
||||
|
||||
if (slices == null) slices = getSlicesIgnoreCase(this.collection, clusterState);
|
||||
if (slices == null) {
|
||||
//Try case insensitive match
|
||||
Map<String, DocCollection> collectionsMap = clusterState.getCollectionsMap();
|
||||
for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
|
||||
if (entry.getKey().equalsIgnoreCase(collection)) {
|
||||
slices = entry.getValue().getActiveSlices();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slices == null) {
|
||||
throw new Exception("Collection not found:" + this.collection);
|
||||
}
|
||||
throw new Exception("Collection not found:" + this.collection);
|
||||
}
|
||||
|
||||
ModifiableSolrParams mParams = new ModifiableSolrParams(params);
|
||||
|
|
|
@ -52,7 +52,6 @@ import org.apache.solr.client.solrj.request.UpdateRequest;
|
|||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
|
@ -519,20 +518,9 @@ public class TopicStream extends CloudSolrStream implements Expressible {
|
|||
//System.out.println("Connected to zk an got cluster state.");
|
||||
|
||||
Collection<Slice> slices = clusterState.getActiveSlices(this.collection);
|
||||
|
||||
if (slices == null) slices = getSlicesIgnoreCase(this.collection, clusterState);
|
||||
if (slices == null) {
|
||||
//Try case insensitive match
|
||||
Map<String, DocCollection> collectionsMap = clusterState.getCollectionsMap();
|
||||
for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
|
||||
if (entry.getKey().equalsIgnoreCase(collection)) {
|
||||
slices = entry.getValue().getActiveSlices();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (slices == null) {
|
||||
throw new Exception("Collection not found:" + this.collection);
|
||||
}
|
||||
throw new Exception("Collection not found:" + this.collection);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@ import org.noggit.JSONWriter;
|
|||
public class ClusterState implements JSONWriter.Writable {
|
||||
|
||||
private final Integer znodeVersion;
|
||||
|
||||
private final Map<String, CollectionRef> collectionStates;
|
||||
|
||||
private final Map<String, CollectionRef> collectionStates, immutableCollectionStates;
|
||||
private Set<String> liveNodes;
|
||||
|
||||
/**
|
||||
|
@ -67,6 +67,7 @@ public class ClusterState implements JSONWriter.Writable {
|
|||
this.liveNodes = new HashSet<>(liveNodes.size());
|
||||
this.liveNodes.addAll(liveNodes);
|
||||
this.collectionStates = new LinkedHashMap<>(collectionStates);
|
||||
this.immutableCollectionStates = Collections.unmodifiableMap(collectionStates);
|
||||
}
|
||||
|
||||
|
||||
|
@ -432,10 +433,12 @@ public class ClusterState implements JSONWriter.Writable {
|
|||
this.liveNodes = liveNodes;
|
||||
}
|
||||
|
||||
/**For internal use only
|
||||
/** Be aware that this may return collections which may not exist now.
|
||||
* You can confirm that this collection exists after verifying
|
||||
* CollectionRef.get() != null
|
||||
*/
|
||||
Map<String, CollectionRef> getCollectionStates() {
|
||||
return collectionStates;
|
||||
public Map<String, CollectionRef> getCollectionStates() {
|
||||
return immutableCollectionStates;
|
||||
}
|
||||
|
||||
public static class CollectionRef {
|
||||
|
|
Loading…
Reference in New Issue