mirror of https://github.com/apache/lucene.git
Merge remote-tracking branch 'origin/master' into gradle-master
This commit is contained in:
commit
405d227c55
|
@ -115,7 +115,8 @@ Improvements
|
|||
|
||||
Optimizations
|
||||
---------------------
|
||||
(No changes)
|
||||
|
||||
* LUCENE-9113: Faster merging of SORTED/SORTED_SET doc values. (Adrien Grand)
|
||||
|
||||
Bug Fixes
|
||||
---------------------
|
||||
|
@ -123,7 +124,9 @@ Bug Fixes
|
|||
|
||||
Other
|
||||
---------------------
|
||||
(No changes)
|
||||
|
||||
* LUCENE-9096: Simplification of CompressingTermVectorsWriter#flushOffsets.
|
||||
(kkewwei via Adrien Grand)
|
||||
|
||||
======================= Lucene 8.4.0 =======================
|
||||
|
||||
|
|
|
@ -29,14 +29,18 @@ import org.apache.lucene.index.DocValuesType;
|
|||
import org.apache.lucene.index.EmptyDocValuesProducer;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FilteredTermsEnum;
|
||||
import org.apache.lucene.index.ImpactsEnum;
|
||||
import org.apache.lucene.index.MergeState;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.index.OrdinalMap;
|
||||
import org.apache.lucene.index.PostingsEnum;
|
||||
import org.apache.lucene.index.SegmentWriteState; // javadocs
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedNumericDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.index.TermState;
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.util.AttributeSource;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.LongBitSet;
|
||||
|
@ -450,6 +454,102 @@ public abstract class DocValuesConsumer implements Closeable {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* A merged {@link TermsEnum}. This helps avoid relying on the default terms enum,
|
||||
* which calls {@link SortedDocValues#lookupOrd(int)} or
|
||||
* {@link SortedSetDocValues#lookupOrd(long)} on every call to {@link TermsEnum#next()}.
|
||||
*/
|
||||
private static class MergedTermsEnum extends TermsEnum {
|
||||
|
||||
private final TermsEnum[] subs;
|
||||
private final OrdinalMap ordinalMap;
|
||||
private final long valueCount;
|
||||
private long ord = -1;
|
||||
private BytesRef term;
|
||||
|
||||
MergedTermsEnum(OrdinalMap ordinalMap, TermsEnum[] subs) {
|
||||
this.ordinalMap = ordinalMap;
|
||||
this.subs = subs;
|
||||
this.valueCount = ordinalMap.getValueCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef term() throws IOException {
|
||||
return term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ord() throws IOException {
|
||||
return ord;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesRef next() throws IOException {
|
||||
if (++ord >= valueCount) {
|
||||
return null;
|
||||
}
|
||||
final int subNum = ordinalMap.getFirstSegmentNumber(ord);
|
||||
final TermsEnum sub = subs[subNum];
|
||||
final long subOrd = ordinalMap.getFirstSegmentOrd(ord);
|
||||
do {
|
||||
term = sub.next();
|
||||
} while (sub.ord() < subOrd);
|
||||
assert sub.ord() == subOrd;
|
||||
return term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AttributeSource attributes() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean seekExact(BytesRef text) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SeekStatus seekCeil(BytesRef text) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seekExact(long ord) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void seekExact(BytesRef term, TermState state) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docFreq() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long totalTermFreq() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImpactsEnum impacts(int flags) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermState termState() throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/** Tracks state of one sorted sub-reader that we are merging */
|
||||
private static class SortedDocValuesSub extends DocIDMerger.Sub {
|
||||
|
||||
|
@ -610,6 +710,15 @@ public abstract class DocValuesConsumer implements Closeable {
|
|||
int segmentOrd = (int) map.getFirstSegmentOrd(ord);
|
||||
return dvs[segmentNumber].lookupOrd(segmentOrd);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum termsEnum() throws IOException {
|
||||
TermsEnum[] subs = new TermsEnum[toMerge.size()];
|
||||
for (int sub = 0; sub < subs.length; ++sub) {
|
||||
subs[sub] = toMerge.get(sub).termsEnum();
|
||||
}
|
||||
return new MergedTermsEnum(map, subs);
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
@ -781,6 +890,15 @@ public abstract class DocValuesConsumer implements Closeable {
|
|||
public long getValueCount() {
|
||||
return map.getValueCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TermsEnum termsEnum() throws IOException {
|
||||
TermsEnum[] subs = new TermsEnum[toMerge.size()];
|
||||
for (int sub = 0; sub < subs.length; ++sub) {
|
||||
subs[sub] = toMerge.get(sub).termsEnum();
|
||||
}
|
||||
return new MergedTermsEnum(map, subs);
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.lucene.codecs.blocktree;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.codecs.BlockTermState;
|
||||
|
@ -882,18 +883,17 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
|
||||
/** Pushes the new term to the top of the stack, and writes new blocks. */
|
||||
private void pushTerm(BytesRef text) throws IOException {
|
||||
int limit = Math.min(lastTerm.length(), text.length);
|
||||
|
||||
// Find common prefix between last term and current term:
|
||||
int pos = 0;
|
||||
while (pos < limit && lastTerm.byteAt(pos) == text.bytes[text.offset+pos]) {
|
||||
pos++;
|
||||
int prefixLength = Arrays.mismatch(lastTerm.bytes(), 0, lastTerm.length(), text.bytes, text.offset, text.offset + text.length);
|
||||
if (prefixLength == -1) { // Only happens for the first term, if it is empty
|
||||
assert lastTerm.length() == 0;
|
||||
prefixLength = 0;
|
||||
}
|
||||
|
||||
// if (DEBUG) System.out.println(" shared=" + pos + " lastTerm.length=" + lastTerm.length);
|
||||
|
||||
// Close the "abandoned" suffix now:
|
||||
for(int i=lastTerm.length()-1;i>=pos;i--) {
|
||||
for(int i=lastTerm.length()-1;i>=prefixLength;i--) {
|
||||
|
||||
// How many items on top of the stack share the current suffix
|
||||
// we are closing:
|
||||
|
@ -910,7 +910,7 @@ public final class BlockTreeTermsWriter extends FieldsConsumer {
|
|||
}
|
||||
|
||||
// Init new tail:
|
||||
for(int i=pos;i<text.length;i++) {
|
||||
for(int i=prefixLength;i<text.length;i++) {
|
||||
prefixStarts[i] = pending.size();
|
||||
}
|
||||
|
||||
|
|
|
@ -564,17 +564,9 @@ public final class CompressingTermVectorsWriter extends TermVectorsWriter {
|
|||
final int fieldNumOff = Arrays.binarySearch(fieldNums, fd.fieldNum);
|
||||
int pos = 0;
|
||||
for (int i = 0; i < fd.numTerms; ++i) {
|
||||
int previousPos = 0;
|
||||
int previousOff = 0;
|
||||
for (int j = 0; j < fd.freqs[i]; ++j) {
|
||||
final int position = positionsBuf[fd.posStart + pos];
|
||||
final int startOffset = startOffsetsBuf[fd.offStart + pos];
|
||||
sumPos[fieldNumOff] += position - previousPos;
|
||||
sumOffsets[fieldNumOff] += startOffset - previousOff;
|
||||
previousPos = position;
|
||||
previousOff = startOffset;
|
||||
++pos;
|
||||
}
|
||||
sumPos[fieldNumOff] += positionsBuf[fd.posStart + fd.freqs[i]-1 + pos];
|
||||
sumOffsets[fieldNumOff] += startOffsetsBuf[fd.offStart + fd.freqs[i]-1 + pos];
|
||||
pos += fd.freqs[i];
|
||||
}
|
||||
assert pos == fd.totalPositions;
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@
|
|||
* {@link org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat Term Frequency data}.
|
||||
* For each term in the dictionary, the numbers of all the
|
||||
* documents that contain that term, and the frequency of the term in that
|
||||
* document, unless frequencies are omitted (IndexOptions.DOCS_ONLY)
|
||||
* document, unless frequencies are omitted ({@link org.apache.lucene.index.IndexOptions#DOCS IndexOptions.DOCS})
|
||||
* </li>
|
||||
* <li>
|
||||
* {@link org.apache.lucene.codecs.lucene84.Lucene84PostingsFormat Term Proximity data}.
|
||||
|
|
|
@ -1332,7 +1332,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* See {@link #newDirectory()} for more information.
|
||||
*/
|
||||
public static BaseDirectoryWrapper newDirectory(Random r) {
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r));
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), rarely(r), false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1340,7 +1340,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
* See {@link #newDirectory()} for more information.
|
||||
*/
|
||||
public static BaseDirectoryWrapper newDirectory(Random r, LockFactory lf) {
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), rarely(r));
|
||||
return wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), rarely(r), false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory() {
|
||||
|
@ -1348,11 +1348,11 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory(Random r) {
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false);
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY), false, false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockDirectory(Random r, LockFactory lf) {
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false);
|
||||
return (MockDirectoryWrapper) wrapDirectory(r, newDirectoryImpl(r, TEST_DIRECTORY, lf), false, false);
|
||||
}
|
||||
|
||||
public static MockDirectoryWrapper newMockFSDirectory(Path f) {
|
||||
|
@ -1416,10 +1416,7 @@ public abstract class LuceneTestCase extends Assert {
|
|||
}
|
||||
|
||||
Directory fsdir = newFSDirectoryImpl(clazz, f, lf);
|
||||
if (rarely()) {
|
||||
|
||||
}
|
||||
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare);
|
||||
BaseDirectoryWrapper wrapped = wrapDirectory(random(), fsdir, bare, true);
|
||||
return wrapped;
|
||||
} catch (Exception e) {
|
||||
Rethrow.rethrow(e);
|
||||
|
@ -1447,11 +1444,13 @@ public abstract class LuceneTestCase extends Assert {
|
|||
impl.copyFrom(d, file, file, newIOContext(r));
|
||||
}
|
||||
}
|
||||
return wrapDirectory(r, impl, rarely(r));
|
||||
return wrapDirectory(r, impl, rarely(r), false);
|
||||
}
|
||||
|
||||
private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare) {
|
||||
if (rarely(random) && !bare) {
|
||||
private static BaseDirectoryWrapper wrapDirectory(Random random, Directory directory, boolean bare, boolean filesystem) {
|
||||
// IOContext randomization might make NRTCachingDirectory make bad decisions, so avoid
|
||||
// using it if the user requested a filesystem directory.
|
||||
if (rarely(random) && !bare && filesystem == false) {
|
||||
directory = new NRTCachingDirectory(directory, random.nextDouble(), random.nextDouble());
|
||||
}
|
||||
|
||||
|
|
|
@ -77,6 +77,10 @@ Upgrade Notes
|
|||
|
||||
* SOLR-14092: Deprecated BlockJoinFacetComponent and BlockJoinDocSetFacetComponent are removed
|
||||
Users are encouraged to migrate to uniqueBlock() in JSON Facet API. (Mikhail Khludnev)
|
||||
|
||||
* SOLR-13985: Solr's Jetty now binds to localhost network interface by default for better out of the box security.
|
||||
Administrators that need Solr exposed more broadly can change the SOLR_JETTY_HOST property in their Solr include
|
||||
(solr.in.sh/solr.in.cmd) file. (Jason Gerlowski, David Smiley, Robert Muir)
|
||||
|
||||
Improvements
|
||||
----------------------
|
||||
|
@ -149,12 +153,14 @@ Upgrade Notes
|
|||
If you prefer to keep the old (but insecure) serialization strategy, you can start your nodes using the
|
||||
property: `-Dsolr.useUnsafeOverseerResponse=true`. Keep in mind that this will be removed in future version of Solr.
|
||||
|
||||
* SOLR-13808: add cache=false into uderneath BoolQParser's filter clause or {"bool":{"filter":..}} to avoid caching in
|
||||
* SOLR-13808: add cache=false into underneath BoolQParser's filter clause or {"bool":{"filter":..}} to avoid caching in
|
||||
filterCache. (Mikhail Khludnev)
|
||||
|
||||
New Features
|
||||
---------------------
|
||||
(No changes)
|
||||
* SOLR-12490: Introducing json.queries in JSON Request API. Every property of this object holds one or many named
|
||||
Query DSL queries. It's optional and doesn't impact response without explicit referencing these queries by names
|
||||
(Anatolii Siuniaev via Mikhail Khludnev)
|
||||
|
||||
Improvements
|
||||
---------------------
|
||||
|
@ -187,6 +193,8 @@ Improvements
|
|||
hl.fragsizeIsMinimum, with defaults that aim to better center matches in fragments than previously. See the ref guide.
|
||||
Regardless of the settings, the passages may be sized differently than before. (Nándor Mátravölgyi, David Smiley)
|
||||
|
||||
* SOLR-14154: Return correct isolation level when retrieving it from the SQL Connection (Nick Vercammen, Kevin Risden)
|
||||
|
||||
Optimizations
|
||||
---------------------
|
||||
(No changes)
|
||||
|
@ -204,6 +212,10 @@ Bug Fixes
|
|||
|
||||
* SOLR-14122: SimUtils converts v2 to v1 request params incorrectly. (Li Cao, ab)
|
||||
|
||||
* SOLR-13089: Fix lsof edge cases in the solr CLI script (Martijn Koster via janhoy)
|
||||
|
||||
* SOLR-11746: Fixed existence query support for numeric point fields. (Kai Chan, hossman, Houston Putman)
|
||||
|
||||
Other Changes
|
||||
---------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
############################################################################################
|
||||
#
|
||||
# A command line tool for indexing Solr logs in the out-of-the-box log format.
|
||||
#
|
||||
# First build the Solr distribution. Then run postlogs from inside the Solr distribution
|
||||
# home directory as described below:
|
||||
#
|
||||
# parameters:
|
||||
#
|
||||
# -- baseUrl: Example http://localhost:8983/solr/collection1
|
||||
# -- rootDir: All files found at or below the root will be indexed
|
||||
#
|
||||
# Sample syntax: ./bin/postlogs http://localhost:8983/solr/collection1 /user/foo/logs");
|
||||
#
|
||||
#
|
||||
############################################################################################
|
||||
|
||||
java -classpath dist/*:dist/solrj-lib/*: org.apache.solr.util.SolrLogPostTool $1 $2
|
|
@ -1879,6 +1879,10 @@ if [ -z "$SOLR_PORT" ]; then
|
|||
SOLR_PORT=8983
|
||||
fi
|
||||
|
||||
if [ -n "$SOLR_JETTY_HOST" ]; then
|
||||
SOLR_OPTS+=("-Dsolr.jetty.host=$SOLR_JETTY_HOST")
|
||||
fi
|
||||
|
||||
if [ -z "$STOP_PORT" ]; then
|
||||
STOP_PORT=`expr $SOLR_PORT - 1000`
|
||||
fi
|
||||
|
@ -2237,13 +2241,13 @@ function start_solr() {
|
|||
echo ""
|
||||
fi
|
||||
# no lsof on cygwin though
|
||||
if hash lsof 2>/dev/null ; then # hash returns true if lsof is on the path
|
||||
if lsof -v 2>&1 | grep -q revision; then
|
||||
echo -n "Waiting up to $SOLR_STOP_WAIT seconds to see Solr running on port $SOLR_PORT"
|
||||
# Launch in a subshell to show the spinner
|
||||
(loops=0
|
||||
while true
|
||||
do
|
||||
running=`lsof -PniTCP:$SOLR_PORT -sTCP:LISTEN`
|
||||
running=$(lsof -t -PniTCP:$SOLR_PORT -sTCP:LISTEN)
|
||||
if [ -z "$running" ]; then
|
||||
slept=$((loops * 2))
|
||||
if [ $slept -lt $SOLR_STOP_WAIT ]; then
|
||||
|
|
|
@ -178,7 +178,7 @@ IF NOT "%SOLR_HOST%"=="" (
|
|||
set "SOLR_TOOL_HOST=localhost"
|
||||
)
|
||||
IF "%SOLR_JETTY_HOST%"=="" (
|
||||
set SOLR_JETTY_HOST=0.0.0.0
|
||||
set "SOLR_JETTY_HOST=127.0.0.1"
|
||||
)
|
||||
|
||||
REM Verify Java is available
|
||||
|
@ -1086,6 +1086,10 @@ IF "!IS_RESTART!"=="1" set SCRIPT_CMD=start
|
|||
IF "%SOLR_PORT%"=="" set SOLR_PORT=8983
|
||||
IF "%STOP_PORT%"=="" set /A STOP_PORT=%SOLR_PORT% - 1000
|
||||
|
||||
IF DEFINED SOLR_JETTY_HOST (
|
||||
set "SOLR_OPTS=%SOLR_OPTS% -Dsolr.jetty.host=%SOLR_JETTY_HOST%"
|
||||
)
|
||||
|
||||
IF "%SCRIPT_CMD%"=="start" (
|
||||
REM see if Solr is already running using netstat
|
||||
For /f "tokens=2,5" %%j in ('netstat -aon ^| find "TCP " ^| find ":0 " ^| find ":%SOLR_PORT% "') do (
|
||||
|
@ -1321,7 +1325,7 @@ IF "%FG%"=="1" (
|
|||
"%JAVA%" %SERVEROPT% %SOLR_JAVA_MEM% %START_OPTS% ^
|
||||
-Dlog4j.configurationFile="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^
|
||||
-Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%" ^
|
||||
-Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
|
||||
-Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
|
||||
-Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar %SOLR_JETTY_CONFIG% "%SOLR_JETTY_ADDL_CONFIG%"
|
||||
) ELSE (
|
||||
START /B "Solr-%SOLR_PORT%" /D "%SOLR_SERVER_DIR%" ^
|
||||
|
@ -1329,7 +1333,7 @@ IF "%FG%"=="1" (
|
|||
-Dlog4j.configurationFile="%LOG4J_CONFIG%" -DSTOP.PORT=!STOP_PORT! -DSTOP.KEY=%STOP_KEY% ^
|
||||
-Dsolr.log.muteconsole ^
|
||||
-Dsolr.solr.home="%SOLR_HOME%" -Dsolr.install.dir="%SOLR_TIP%" -Dsolr.default.confdir="%DEFAULT_CONFDIR%" ^
|
||||
-Djetty.host=%SOLR_JETTY_HOST% -Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
|
||||
-Djetty.port=%SOLR_PORT% -Djetty.home="%SOLR_SERVER_DIR%" ^
|
||||
-Djava.io.tmpdir="%SOLR_SERVER_DIR%\tmp" -jar start.jar %SOLR_JETTY_CONFIG% "%SOLR_JETTY_ADDL_CONFIG%" > "!SOLR_LOGS_DIR!\solr-%SOLR_PORT%-console.log"
|
||||
echo %SOLR_PORT%>"%SOLR_TIP%"\bin\solr-%SOLR_PORT%.port
|
||||
|
||||
|
|
|
@ -105,13 +105,17 @@ REM set SOLR_LOG_PRESTART_ROTATION=false
|
|||
REM Enables jetty request log for all requests
|
||||
REM set SOLR_REQUESTLOG_ENABLED=false
|
||||
|
||||
REM Set the host interface to listen on. Jetty will listen on all interfaces (0.0.0.0) by default.
|
||||
REM This must be an IPv4 ("a.b.c.d") or bracketed IPv6 ("[x::y]") address, not a hostname!
|
||||
REM set SOLR_JETTY_HOST=0.0.0.0
|
||||
|
||||
REM Sets the port Solr binds to, default is 8983
|
||||
REM set SOLR_PORT=8983
|
||||
|
||||
REM Sets the network interface the Solr binds to. To prevent administrators from
|
||||
REM accidentally exposing Solr more widely than intended, this defaults to 127.0.0.1.
|
||||
REM Administrators should think carefully about their deployment environment and
|
||||
REM set this value as narrowly as required before going to production. In
|
||||
REM environments where security is not a concern, 0.0.0.0 can be used to allow
|
||||
REM Solr to accept connections on all network interfaces.
|
||||
REM set SOLR_JETTY_HOST=127.0.0.1
|
||||
|
||||
REM Restrict access to solr by IP address.
|
||||
REM Specify a comma-separated list of addresses or networks, for example:
|
||||
REM 127.0.0.1, 192.168.0.0/24, [::1], [2000:123:4:5::]/64
|
||||
|
|
|
@ -139,6 +139,14 @@
|
|||
# 127.0.0.1, 192.168.0.0/24, [::1], [2000:123:4:5::]/64
|
||||
#SOLR_IP_BLACKLIST=
|
||||
|
||||
# Sets the network interface the Solr binds to. To prevent administrators from
|
||||
# accidentally exposing Solr more widely than intended, this defaults to 127.0.0.1.
|
||||
# Administrators should think carefully about their deployment environment and
|
||||
# set this value as narrowly as required before going to production. In
|
||||
# environments where security is not a concern, 0.0.0.0 can be used to allow
|
||||
# Solr to accept connections on all network interfaces.
|
||||
#SOLR_JETTY_HOST="127.0.0.1"
|
||||
|
||||
# Enables HTTPS. It is implictly true if you set SOLR_SSL_KEY_STORE. Use this config
|
||||
# to enable https module with custom jetty configuration.
|
||||
#SOLR_SSL_ENABLED=true
|
||||
|
|
|
@ -266,7 +266,7 @@ public class ICUCollationField extends FieldType {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
String f = field.getName();
|
||||
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
|
||||
BytesRef high = part2 == null ? null : getCollationKey(f, part2);
|
||||
|
|
|
@ -1187,10 +1187,14 @@ public abstract class SolrQueryParserBase extends QueryBuilder {
|
|||
// called from parser
|
||||
protected Query getWildcardQuery(String field, String termStr) throws SyntaxError {
|
||||
checkNullField(field);
|
||||
// *:* -> MatchAllDocsQuery
|
||||
|
||||
if ("*".equals(termStr)) {
|
||||
if ("*".equals(field) || getExplicitField() == null) {
|
||||
// '*:*' and '*' -> MatchAllDocsQuery
|
||||
return newMatchAllDocsQuery();
|
||||
} else {
|
||||
// 'foo:*' -> empty prefix query
|
||||
return getPrefixQuery(field, "");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -192,6 +192,7 @@ public class RequestUtil {
|
|||
|
||||
// implement compat for existing components...
|
||||
JsonQueryConverter jsonQueryConverter = new JsonQueryConverter();
|
||||
|
||||
if (json != null && !isShard) {
|
||||
for (Map.Entry<String,Object> entry : json.entrySet()) {
|
||||
String key = entry.getKey();
|
||||
|
@ -214,48 +215,62 @@ public class RequestUtil {
|
|||
out = "rows";
|
||||
} else if (SORT.equals(key)) {
|
||||
out = SORT;
|
||||
} else if ("queries".equals(key)) {
|
||||
Object queriesJsonObj = entry.getValue();
|
||||
if (queriesJsonObj instanceof Map && queriesJsonObj != null) {
|
||||
@SuppressWarnings("unchecked")
|
||||
final Map<String,Object> queriesAsMap = (Map<String,Object>) queriesJsonObj;
|
||||
for (Map.Entry<String,Object> queryJsonProperty : queriesAsMap.entrySet()) {
|
||||
out = queryJsonProperty.getKey();
|
||||
arr = true;
|
||||
isQuery = true;
|
||||
convertJsonPropertyToLocalParams(newMap, jsonQueryConverter, queryJsonProperty, out, isQuery, arr);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
"Expected Map for 'queries', received " + queriesJsonObj);
|
||||
}
|
||||
} else if ("params".equals(key) || "facet".equals(key) ) {
|
||||
// handled elsewhere
|
||||
continue;
|
||||
} else {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown top-level key in JSON request : " + key);
|
||||
}
|
||||
|
||||
Object val = entry.getValue();
|
||||
|
||||
if (arr) {
|
||||
String[] existing = newMap.get(out);
|
||||
List lst = val instanceof List ? (List)val : null;
|
||||
int existingSize = existing==null ? 0 : existing.length;
|
||||
int jsonSize = lst==null ? 1 : lst.size();
|
||||
String[] newval = new String[ existingSize + jsonSize ];
|
||||
for (int i=0; i<existingSize; i++) {
|
||||
newval[i] = existing[i];
|
||||
}
|
||||
if (lst != null) {
|
||||
for (int i = 0; i < jsonSize; i++) {
|
||||
Object v = lst.get(i);
|
||||
newval[existingSize + i] = isQuery ? jsonQueryConverter.toLocalParams(v, newMap) : v.toString();
|
||||
}
|
||||
} else {
|
||||
newval[newval.length-1] = isQuery ? jsonQueryConverter.toLocalParams(val, newMap) : val.toString();
|
||||
}
|
||||
newMap.put(out, newval);
|
||||
} else {
|
||||
newMap.put(out, new String[]{isQuery ? jsonQueryConverter.toLocalParams(val, newMap) : val.toString()});
|
||||
}
|
||||
|
||||
convertJsonPropertyToLocalParams(newMap, jsonQueryConverter, entry, out, isQuery, arr);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
if (json != null) {
|
||||
req.setJSON(json);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private static void convertJsonPropertyToLocalParams(Map<String, String[]> outMap, JsonQueryConverter jsonQueryConverter, Map.Entry<String, Object> jsonProperty, String outKey, boolean isQuery, boolean arr) {
|
||||
Object val = jsonProperty.getValue();
|
||||
|
||||
if (arr) {
|
||||
String[] existing = outMap.get(outKey);
|
||||
List<?> lst = val instanceof List ? (List<?>)val : null;
|
||||
int existingSize = existing==null ? 0 : existing.length;
|
||||
int jsonSize = lst==null ? 1 : lst.size();
|
||||
String[] newval = new String[ existingSize + jsonSize ];
|
||||
for (int i=0; i<existingSize; i++) {
|
||||
newval[i] = existing[i];
|
||||
}
|
||||
if (lst != null) {
|
||||
for (int i = 0; i < jsonSize; i++) {
|
||||
Object v = lst.get(i);
|
||||
newval[existingSize + i] = isQuery ? jsonQueryConverter.toLocalParams(v, outMap) : v.toString();
|
||||
}
|
||||
} else {
|
||||
newval[newval.length-1] = isQuery ? jsonQueryConverter.toLocalParams(val, outMap) : val.toString();
|
||||
}
|
||||
outMap.put(outKey, newval);
|
||||
} else {
|
||||
outMap.put(outKey, new String[]{isQuery ? jsonQueryConverter.toLocalParams(val, outMap) : val.toString()});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// queryParamName is something like json.facet or json.query, or just json...
|
||||
|
@ -295,6 +310,7 @@ public class RequestUtil {
|
|||
|
||||
Object o = ObjectBuilder.getVal(parser);
|
||||
if (!(o instanceof Map)) return;
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String,Object> map = (Map<String,Object>)o;
|
||||
// To make consistent with json.param handling, we should make query params come after json params (i.e. query params should
|
||||
// appear to overwrite json params.
|
||||
|
@ -310,7 +326,7 @@ public class RequestUtil {
|
|||
if (val == null) {
|
||||
params.remove(key);
|
||||
} else if (val instanceof List) {
|
||||
List lst = (List) val;
|
||||
List<?> lst = (List<?>) val;
|
||||
String[] vals = new String[lst.size()];
|
||||
for (int i = 0; i < vals.length; i++) {
|
||||
vals[i] = lst.get(i).toString();
|
||||
|
|
|
@ -316,7 +316,7 @@ public abstract class AbstractSpatialFieldType<T extends SpatialStrategy> extend
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
if (!minInclusive || !maxInclusive)
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Both sides of spatial range query must be inclusive: " + field.getName());
|
||||
Point p1 = SpatialUtils.parsePointSolrException(part1, ctx);
|
||||
|
|
|
@ -236,7 +236,7 @@ public class CollationField extends FieldType {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
String f = field.getName();
|
||||
BytesRef low = part1 == null ? null : getCollationKey(f, part1);
|
||||
BytesRef high = part2 == null ? null : getCollationKey(f, part2);
|
||||
|
|
|
@ -251,7 +251,7 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
|
|||
CurrencyValue valueDefault;
|
||||
valueDefault = value.convertTo(provider, defaultCurrency);
|
||||
|
||||
return getRangeQuery(parser, field, valueDefault, valueDefault, true, true);
|
||||
return getRangeQueryInternal(parser, field, valueDefault, valueDefault, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -317,7 +317,7 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, final boolean minInclusive, final boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, final boolean minInclusive, final boolean maxInclusive) {
|
||||
final CurrencyValue p1 = CurrencyValue.parse(part1, defaultCurrency);
|
||||
final CurrencyValue p2 = CurrencyValue.parse(part2, defaultCurrency);
|
||||
|
||||
|
@ -327,10 +327,10 @@ public class CurrencyFieldType extends FieldType implements SchemaAware, Resourc
|
|||
": range queries only supported when upper and lower bound have same currency.");
|
||||
}
|
||||
|
||||
return getRangeQuery(parser, field, p1, p2, minInclusive, maxInclusive);
|
||||
return getRangeQueryInternal(parser, field, p1, p2, minInclusive, maxInclusive);
|
||||
}
|
||||
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, final CurrencyValue p1, final CurrencyValue p2, final boolean minInclusive, final boolean maxInclusive) {
|
||||
private Query getRangeQueryInternal(QParser parser, SchemaField field, final CurrencyValue p1, final CurrencyValue p2, final boolean minInclusive, final boolean maxInclusive) {
|
||||
String currencyCode = (p1 != null) ? p1.getCurrencyCode() :
|
||||
(p2 != null) ? p2.getCurrencyCode() : defaultCurrency;
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ public class DateRangeField extends AbstractSpatialPrefixTreeFieldType<NumberRan
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String startStr, String endStr, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String startStr, String endStr, boolean minInclusive, boolean maxInclusive) {
|
||||
if (parser == null) {//null when invoked by SimpleFacets. But getQueryFromSpatialArgs expects to get localParams.
|
||||
final SolrRequestInfo requestInfo = SolrRequestInfo.getRequestInfo();
|
||||
parser = new QParser("", null, requestInfo.getReq().getParams(), requestInfo.getReq()) {
|
||||
|
|
|
@ -63,13 +63,13 @@ public class EnumField extends AbstractEnumField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
Integer minValue = enumMapping.stringValueToIntValue(min);
|
||||
Integer maxValue = enumMapping.stringValueToIntValue(max);
|
||||
|
||||
if (field.multiValued() && field.hasDocValues() && !field.indexed()) {
|
||||
// for the multi-valued dv-case, the default rangeimpl over toInternal is correct
|
||||
return super.getRangeQuery(parser, field, minValue.toString(), maxValue.toString(), minInclusive, maxInclusive);
|
||||
return super.getSpecializedRangeQuery(parser, field, minValue.toString(), maxValue.toString(), minInclusive, maxInclusive);
|
||||
}
|
||||
Query query = null;
|
||||
final boolean matchOnly = field.hasDocValues() && !field.indexed();
|
||||
|
|
|
@ -57,7 +57,7 @@ public class EnumFieldType extends AbstractEnumField {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
Integer minValue = enumMapping.stringValueToIntValue(min);
|
||||
Integer maxValue = enumMapping.stringValueToIntValue(max);
|
||||
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.queries.function.ValueSource;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.DocValuesRewriteMethod;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
|
@ -457,11 +458,13 @@ public abstract class FieldType extends FieldProperties {
|
|||
*
|
||||
* @param parser the {@link org.apache.solr.search.QParser} calling the method
|
||||
* @param sf the schema field
|
||||
* @param termStr the term string for prefix query
|
||||
* @param termStr the term string for prefix query, if blank then this query should match all docs with this field
|
||||
* @return a Query instance to perform prefix search
|
||||
*
|
||||
*/
|
||||
public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) {
|
||||
if ("".equals(termStr)) {
|
||||
return getRangeQuery(parser, sf, null, null, true, true);
|
||||
}
|
||||
PrefixQuery query = new PrefixQuery(new Term(sf.getName(), termStr));
|
||||
query.setRewriteMethod(sf.getType().getRewriteMethod(parser, sf));
|
||||
return query;
|
||||
|
@ -846,9 +849,36 @@ public abstract class FieldType extends FieldProperties {
|
|||
// trivial base case
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser}
|
||||
* currently passes part1 and part2 as null if they are '*' respectively. minInclusive and maxInclusive are both true
|
||||
* currently by SolrQueryParser but that may change in the future. Also, other QueryParser implementations may have
|
||||
* different semantics.
|
||||
* <p>
|
||||
* If the field has docValues enabled, and the range query has '*'s or nulls on either side, then a {@link org.apache.lucene.search.DocValuesFieldExistsQuery} is returned.
|
||||
*
|
||||
* Sub-classes should override the "getSpecializedRangeQuery" method to provide their own range query implementation. They should strive to
|
||||
* handle nulls in part1 and/or part2 as well as unequal minInclusive and maxInclusive parameters gracefully.
|
||||
*
|
||||
*
|
||||
* @param parser the {@link org.apache.solr.search.QParser} calling the method
|
||||
* @param field the schema field
|
||||
* @param part1 the lower boundary of the range, nulls are allowed.
|
||||
* @param part2 the upper boundary of the range, nulls are allowed
|
||||
* @param minInclusive whether the minimum of the range is inclusive or not
|
||||
* @param maxInclusive whether the maximum of the range is inclusive or not
|
||||
* @return a Query instance to perform range search according to given parameters
|
||||
*
|
||||
*/
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
if (field.hasDocValues() && part1 == null && part2 == null) {
|
||||
return new DocValuesFieldExistsQuery(field.getName());
|
||||
} else {
|
||||
return getSpecializedRangeQuery(parser, field, part1, part2, minInclusive, maxInclusive);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a Query instance for doing range searches on this field type. {@link org.apache.solr.search.SolrQueryParser}
|
||||
* currently passes part1 and part2 as null if they are '*' respectively. minInclusive and maxInclusive are both true
|
||||
|
@ -867,20 +897,21 @@ public abstract class FieldType extends FieldProperties {
|
|||
* @return a Query instance to perform range search according to given parameters
|
||||
*
|
||||
*/
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
// TODO: change these all to use readableToIndexed/bytes instead (e.g. for unicode collation)
|
||||
final BytesRef miValue = part1 == null ? null : new BytesRef(toInternal(part1));
|
||||
final BytesRef maxValue = part2 == null ? null : new BytesRef(toInternal(part2));
|
||||
|
||||
if (field.hasDocValues() && !field.indexed()) {
|
||||
return SortedSetDocValuesField.newSlowRangeQuery(
|
||||
field.getName(),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
field.getName(),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
} else {
|
||||
SolrRangeQuery rangeQuery = new SolrRangeQuery(
|
||||
field.getName(),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
field.getName(),
|
||||
miValue, maxValue,
|
||||
minInclusive, maxInclusive);
|
||||
return rangeQuery;
|
||||
}
|
||||
}
|
||||
|
@ -891,7 +922,6 @@ public abstract class FieldType extends FieldProperties {
|
|||
* @param field The {@link org.apache.solr.schema.SchemaField} of the field to search
|
||||
* @param externalVal The String representation of the value to search
|
||||
* @return The {@link org.apache.lucene.search.Query} instance. This implementation returns a {@link org.apache.lucene.search.TermQuery} but overriding queries may not
|
||||
*
|
||||
*/
|
||||
public Query getFieldQuery(QParser parser, SchemaField field, String externalVal) {
|
||||
BytesRefBuilder br = new BytesRefBuilder();
|
||||
|
|
|
@ -103,7 +103,7 @@ public class LatLonType extends AbstractSubTypeFieldType implements SpatialQuery
|
|||
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
Point p1 = SpatialUtils.parsePointSolrException(part1, SpatialContext.GEO);
|
||||
Point p2 = SpatialUtils.parsePointSolrException(part2, SpatialContext.GEO);
|
||||
|
||||
|
|
|
@ -165,8 +165,8 @@ public abstract class PointField extends NumericFieldType {
|
|||
boolean maxInclusive);
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive,
|
||||
boolean maxInclusive) {
|
||||
if (!field.indexed() && field.hasDocValues()) {
|
||||
return getDocValuesRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
} else if (field.indexed() && field.hasDocValues()) {
|
||||
|
@ -222,6 +222,9 @@ public abstract class PointField extends NumericFieldType {
|
|||
|
||||
@Override
|
||||
public Query getPrefixQuery(QParser parser, SchemaField sf, String termStr) {
|
||||
if ("".equals(termStr)) {
|
||||
return super.getPrefixQuery(parser, sf, termStr);
|
||||
}
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't run prefix queries on numeric fields");
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ public class PointType extends CoordinateFieldType implements SpatialQueryable {
|
|||
/**
|
||||
* Care should be taken in calling this with higher order dimensions for performance reasons.
|
||||
*/
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
//Query could look like: [x1,y1 TO x2,y2] for 2 dimension, but could look like: [x1,y1,z1 TO x2,y2,z2], and can be extrapolated to n-dimensions
|
||||
//thus, this query essentially creates a box, cube, etc.
|
||||
String[] p1 = parseCommaSeparatedList(part1, dimension);
|
||||
|
|
|
@ -158,7 +158,7 @@ public class TextField extends FieldType {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String part1, String part2, boolean minInclusive, boolean maxInclusive) {
|
||||
Analyzer multiAnalyzer = getMultiTermAnalyzer();
|
||||
BytesRef lower = analyzeMultiTerm(field.getName(), part1, multiAnalyzer);
|
||||
BytesRef upper = analyzeMultiTerm(field.getName(), part2, multiAnalyzer);
|
||||
|
|
|
@ -298,10 +298,10 @@ public class TrieField extends NumericFieldType {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Query getRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
protected Query getSpecializedRangeQuery(QParser parser, SchemaField field, String min, String max, boolean minInclusive, boolean maxInclusive) {
|
||||
if (field.multiValued() && field.hasDocValues() && !field.indexed()) {
|
||||
// for the multi-valued dv-case, the default rangeimpl over toInternal is correct
|
||||
return super.getRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
return super.getSpecializedRangeQuery(parser, field, min, max, minInclusive, maxInclusive);
|
||||
}
|
||||
int ps = precisionStep;
|
||||
Query query;
|
||||
|
|
|
@ -0,0 +1,500 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.util;
|
||||
|
||||
import java.io.*;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.UUID;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.regex.Matcher;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
||||
|
||||
/**
|
||||
* A command line tool for indexing Solr logs in the out-of-the-box log format.
|
||||
**/
|
||||
|
||||
public class SolrLogPostTool {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
if(args.length != 2) {
|
||||
CLIO.out("");
|
||||
CLIO.out("postlogs is a simple tool for indexing Solr logs.");
|
||||
CLIO.out("");
|
||||
CLIO.out("parameters:");
|
||||
CLIO.out("");
|
||||
CLIO.out("-- baseUrl: Example http://localhost:8983/solr/collection1");
|
||||
CLIO.out("-- rootDir: All files found at or below the root will be indexed.");
|
||||
CLIO.out("");
|
||||
CLIO.out("Sample syntax 1: ./bin/postlogs http://localhost:8983/solr/collection1 /user/foo/logs/solr.log");
|
||||
CLIO.out("Sample syntax 2: ./bin/postlogs http://localhost:8983/solr/collection1 /user/foo/logs");
|
||||
CLIO.out("");
|
||||
return;
|
||||
}
|
||||
|
||||
String baseUrl = args[0];
|
||||
String root = args[1];
|
||||
|
||||
HttpSolrClient.Builder builder = new HttpSolrClient.Builder();
|
||||
SolrClient client = null;
|
||||
try {
|
||||
client = builder.withBaseSolrUrl(baseUrl).build();
|
||||
File rf = new File(root);
|
||||
List<File> files = new ArrayList();
|
||||
gatherFiles(rf, files);
|
||||
int rec = 0;
|
||||
UpdateRequest request = new UpdateRequest();
|
||||
|
||||
for (File file : files) {
|
||||
|
||||
LineNumberReader bufferedReader = null;
|
||||
|
||||
try {
|
||||
bufferedReader = new LineNumberReader(new InputStreamReader(new FileInputStream(file), Charset.defaultCharset()));
|
||||
LogRecordReader recordReader = new LogRecordReader(bufferedReader);
|
||||
SolrInputDocument doc = null;
|
||||
String fileName = file.getName();
|
||||
while (true) {
|
||||
try {
|
||||
doc = recordReader.readRecord();
|
||||
} catch (Throwable t) {
|
||||
CLIO.err("Error reading log record:"+ bufferedReader.getLineNumber() +" from file:"+ fileName);
|
||||
CLIO.err(t.getMessage());
|
||||
continue;
|
||||
}
|
||||
|
||||
if(doc == null) {
|
||||
break;
|
||||
}
|
||||
|
||||
rec++;
|
||||
UUID id = UUID.randomUUID();
|
||||
doc.addField("id", id.toString());
|
||||
doc.addField("file_s", fileName);
|
||||
request.add(doc);
|
||||
if (rec == 300) {
|
||||
CLIO.out("Sending batch of 300 log records...");
|
||||
request.process(client);
|
||||
CLIO.out("Batch sent");
|
||||
request = new UpdateRequest();
|
||||
rec = 0;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
bufferedReader.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (rec > 0) {
|
||||
//Process last batch
|
||||
CLIO.out("Sending last batch ...");
|
||||
request.process(client);
|
||||
client.commit();
|
||||
CLIO.out("Committed");
|
||||
}
|
||||
} finally {
|
||||
client.close();
|
||||
}
|
||||
}
|
||||
|
||||
static void gatherFiles(File rootFile, List<File> files) {
|
||||
|
||||
if(rootFile.isFile()) {
|
||||
files.add(rootFile);
|
||||
} else {
|
||||
File[] subFiles = rootFile.listFiles();
|
||||
for(File f : subFiles) {
|
||||
if(f.isFile()) {
|
||||
files.add(f);
|
||||
} else {
|
||||
gatherFiles(f, files);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class LogRecordReader {
|
||||
|
||||
private BufferedReader bufferedReader;
|
||||
private String pushedBack = null;
|
||||
private boolean finished = false;
|
||||
private String cause;
|
||||
private Pattern p = Pattern.compile("^(\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d \\d\\d:\\d\\d\\:\\d\\d.\\d\\d\\d)");
|
||||
|
||||
public LogRecordReader(BufferedReader bufferedReader) throws IOException {
|
||||
this.bufferedReader = bufferedReader;
|
||||
}
|
||||
|
||||
public SolrInputDocument readRecord() throws IOException {
|
||||
while(true) {
|
||||
String line = null;
|
||||
|
||||
if(finished) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if(pushedBack != null) {
|
||||
line = pushedBack;
|
||||
pushedBack = null;
|
||||
} else {
|
||||
line = bufferedReader.readLine();
|
||||
}
|
||||
|
||||
if (line != null) {
|
||||
if (line.contains("QTime=")) {
|
||||
return parseQueryRecord(line);
|
||||
} else if (line.contains("Registered new searcher")) {
|
||||
return parseNewSearch(line);
|
||||
} else if (line.contains("path=/update")) {
|
||||
return parseUpdate(line);
|
||||
} else if (line.contains(" ERROR ")) {
|
||||
this.cause = null;
|
||||
return parseError(line, readTrace());
|
||||
} else if (line.contains("start commit")) {
|
||||
return parseCommit(line);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String readTrace() throws IOException {
|
||||
StringBuilder buf = new StringBuilder();
|
||||
buf.append("%html ");
|
||||
|
||||
while(true) {
|
||||
String line = bufferedReader.readLine();
|
||||
if (line == null) {
|
||||
finished = true;
|
||||
return buf.toString();
|
||||
} else {
|
||||
//look for a date at the beginning of the line
|
||||
//If it's not there then read into the stack trace buffer
|
||||
Matcher m = p.matcher(line);
|
||||
|
||||
if (!m.find() && buf.length() < 10000) {
|
||||
//Line does not start with a timestamp so append to the stack trace
|
||||
buf.append(line.replace("\t", " ") + "<br/>");
|
||||
if(line.startsWith("Caused by:")) {
|
||||
this.cause = line;
|
||||
}
|
||||
} else {
|
||||
pushedBack = line;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private String parseDate(String line) {
|
||||
Matcher m = p.matcher(line);
|
||||
if(m.find()) {
|
||||
String date = m.group(1);
|
||||
return date.replace(" ", "T");
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private SolrInputDocument parseError(String line, String trace) throws IOException {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("date_dt", parseDate(line));
|
||||
doc.addField("type_s", "error");
|
||||
doc.addField("line_t", line);
|
||||
|
||||
//Don't include traces that have only the %html header.
|
||||
if(trace != null && trace.length() > 6) {
|
||||
doc.addField("stack_t", trace);
|
||||
}
|
||||
|
||||
if(this.cause != null) {
|
||||
doc.addField("root_cause_t", cause.replace("Caused by:", "").trim());
|
||||
}
|
||||
|
||||
doc.addField("collection_s", parseCollection(line));
|
||||
doc.addField("core_s", parseCore(line));
|
||||
doc.addField("shard_s", parseShard(line));
|
||||
doc.addField("replica_s", parseReplica(line));
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private SolrInputDocument parseCommit(String line) throws IOException {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("date_dt", parseDate(line));
|
||||
doc.addField("type_s", "commit");
|
||||
doc.addField("line_t", line);
|
||||
if(line.contains("softCommit=true")) {
|
||||
doc.addField("soft_commit_s", "true");
|
||||
} else {
|
||||
doc.addField("soft_commit_s", "false");
|
||||
}
|
||||
|
||||
if(line.contains("openSearcher=true")) {
|
||||
doc.addField("open_searcher_s", "true");
|
||||
} else {
|
||||
doc.addField("open_searcher_s", "false");
|
||||
}
|
||||
|
||||
doc.addField("collection_s", parseCollection(line));
|
||||
doc.addField("core_s", parseCore(line));
|
||||
doc.addField("shard_s", parseShard(line));
|
||||
doc.addField("replica_s", parseReplica(line));
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private SolrInputDocument parseQueryRecord(String line) {
|
||||
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("date_dt", parseDate(line));
|
||||
doc.addField("qtime_i", parseQTime(line));
|
||||
doc.addField("status_s", parseStatus(line));
|
||||
|
||||
if(line.contains("hits=")) {
|
||||
doc.addField("hits_l", parseHits(line));
|
||||
}
|
||||
|
||||
String params = parseParams(line);
|
||||
doc.addField("params_t", params);
|
||||
addParams(doc, params);
|
||||
|
||||
doc.addField("collection_s", parseCollection(line));
|
||||
doc.addField("core_s", parseCore(line));
|
||||
doc.addField("node_s", parseNode(line));
|
||||
doc.addField("shard_s", parseShard(line));
|
||||
doc.addField("replica_s", parseReplica(line));
|
||||
|
||||
String path = parsePath(line);
|
||||
doc.addField("path_s", path);
|
||||
if(path != null && path.contains("/admin")) {
|
||||
doc.addField("type_s", "admin");
|
||||
} else {
|
||||
doc.addField("type_s", "query");
|
||||
}
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private SolrInputDocument parseNewSearch(String line) {
|
||||
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("date_dt", parseDate(line));
|
||||
doc.addField("core_s", parseNewSearcherCore(line));
|
||||
doc.addField("type_s", "newSearcher");
|
||||
doc.addField("line_t", line);
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private String parseCollection(String line) {
|
||||
char[] ca = {' ', ']'};
|
||||
String parts[] = line.split("c:");
|
||||
if(parts.length >= 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private SolrInputDocument parseUpdate(String line) {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("date_dt", parseDate(line));
|
||||
|
||||
if(line.contains("deleteByQuery=")) {
|
||||
doc.addField("type_s", "deleteByQuery");
|
||||
} else if(line.contains("delete=")) {
|
||||
doc.addField("type_s", "delete");
|
||||
} else {
|
||||
doc.addField("type_s", "update");
|
||||
}
|
||||
|
||||
doc.addField("collection_s", parseCollection(line));
|
||||
doc.addField("core_s", parseCore(line));
|
||||
doc.addField("shard_s", parseShard(line));
|
||||
doc.addField("replica_s", parseReplica(line));
|
||||
doc.addField("line_t", line);
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
||||
private String parseNewSearcherCore(String line) {
|
||||
char[] ca = {']'};
|
||||
String parts[] = line.split("\\[");
|
||||
if(parts.length > 3) {
|
||||
return readUntil(parts[2], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseCore(String line) {
|
||||
char[] ca = {' ', ']'};
|
||||
String parts[] = line.split("x:");
|
||||
if(parts.length >= 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseShard(String line) {
|
||||
char[] ca = {' ', ']'};
|
||||
String parts[] = line.split("s:");
|
||||
if(parts.length >= 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseReplica(String line) {
|
||||
char[] ca = {' ', ']'};
|
||||
String parts[] = line.split("r:");
|
||||
if(parts.length >= 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private String parsePath(String line) {
|
||||
char[] ca = {' '};
|
||||
String parts[] = line.split(" path=");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseQTime(String line) {
|
||||
char[] ca = {'\n', '\r'};
|
||||
String parts[] = line.split(" QTime=");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseNode(String line) {
|
||||
char[] ca = {' ', ']'};
|
||||
String parts[] = line.split("n:");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseStatus(String line) {
|
||||
char[] ca = {' ', '\n', '\r'};
|
||||
String parts[] = line.split(" status=");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseHits(String line) {
|
||||
char[] ca = {' '};
|
||||
String parts[] = line.split(" hits=");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1], ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String parseParams(String line) {
|
||||
char[] ca = {'}'};
|
||||
String parts[] = line.split(" params=");
|
||||
if(parts.length == 2) {
|
||||
return readUntil(parts[1].substring(1), ca);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private String readUntil(String s, char[] chars) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for(int i=0; i<s.length(); i++) {
|
||||
char a = s.charAt(i);
|
||||
for(char c : chars) {
|
||||
if(a == c) {
|
||||
return builder.toString();
|
||||
}
|
||||
}
|
||||
builder.append(a);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
private void addParams(SolrInputDocument doc, String params) {
|
||||
String[] pairs = params.split("&");
|
||||
for(String pair : pairs) {
|
||||
String[] parts = pair.split("=");
|
||||
if(parts.length == 2 && parts[0].equals("q")) {
|
||||
String dq = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("q_s", dq);
|
||||
doc.addField("q_t", dq);
|
||||
}
|
||||
|
||||
if(parts[0].equals("rows")) {
|
||||
String dr = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("rows_i", dr);
|
||||
}
|
||||
|
||||
if(parts[0].equals("distrib")) {
|
||||
String dr = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("distrib_s", dr);
|
||||
}
|
||||
|
||||
if(parts[0].equals("isShard")) {
|
||||
String dr = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("isShard_s", dr);
|
||||
}
|
||||
|
||||
if(parts[0].equals("wt")) {
|
||||
String dr = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("wt_s", dr);
|
||||
}
|
||||
|
||||
if(parts[0].equals("facet")) {
|
||||
String dr = URLDecoder.decode(parts[1], Charset.defaultCharset());
|
||||
doc.addField("facet_s", dr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -686,23 +686,33 @@
|
|||
<dynamicField name="*_l_dv" type="long" indexed="true" stored="true" docValues="true" multiValued="false"/>
|
||||
<dynamicField name="*_ls_dv" type="long" indexed="true" stored="true" docValues="true" multiValued="true"/>
|
||||
<dynamicField name="*_l_dvo" type="long" indexed="false" stored="true" docValues="true"/>
|
||||
|
||||
|
||||
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_ds" type="double" indexed="true" stored="true" multiValued="true"/>
|
||||
<dynamicField name="*_d_dv" type="double" indexed="true" stored="true" docValues="true" multiValued="false"/>
|
||||
<dynamicField name="*_ds_dv" type="double" indexed="true" stored="true" docValues="true" multiValued="true"/>
|
||||
<dynamicField name="*_d_dvo" type="double" indexed="false" stored="true" docValues="true"/>
|
||||
|
||||
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true"/>
|
||||
<dynamicField name="*_dt_dv" type="date" indexed="true" stored="true" docValues="true" multiValued="false"/>
|
||||
<dynamicField name="*_dts_dv" type="date" indexed="true" stored="true" docValues="true" multiValued="true"/>
|
||||
<dynamicField name="*_dt_dvo" type="date" indexed="false" stored="true" docValues="true"/>
|
||||
|
||||
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
|
||||
<!-- :TODO: why are these identical?!?!?! -->
|
||||
<dynamicField name="*_s" type="string" indexed="true" stored="true" multiValued="true"/>
|
||||
<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true"/>
|
||||
<dynamicField name="*_s_dv" type="string" indexed="true" stored="true" docValues="true"/>
|
||||
<dynamicField name="*_sdv" type="string" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
|
||||
<dynamicField name="*_bdv" type="boolean" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
|
||||
<dynamicField name="*_ss_dv" type="string" indexed="true" stored="true" docValues="true" multiValued="true"/>
|
||||
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_tt" type="text" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
|
||||
<dynamicField name="*_bs" type="boolean" indexed="true" stored="true" multiValued="true"/>
|
||||
<dynamicField name="*_bdv" type="boolean" indexed="false" stored="false" docValues="true" useDocValuesAsStored="true"/>
|
||||
<dynamicField name="*_b_dv" type="boolean" indexed="true" stored="true" docValues="true"/>
|
||||
<dynamicField name="*_bs_dv" type="boolean" indexed="true" stored="true" docValues="true" multiValued="true"/>
|
||||
|
||||
<dynamicField name="*_pi" type="pint" indexed="true" multiValued="false"/>
|
||||
<dynamicField name="*_pl" type="plong" indexed="true" multiValued="false"/>
|
||||
|
|
|
@ -93,14 +93,11 @@ public class TestCloudConsistency extends SolrCloudTestCase {
|
|||
}
|
||||
|
||||
@Test
|
||||
//commented 2-Aug-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 12-Jun-2018
|
||||
@BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 6-Sep-2018
|
||||
public void testOutOfSyncReplicasCannotBecomeLeader() throws Exception {
|
||||
testOutOfSyncReplicasCannotBecomeLeader(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
// commented out on: 24-Dec-2018 @LuceneTestCase.BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 2-Aug-2018
|
||||
public void testOutOfSyncReplicasCannotBecomeLeaderAfterRestart() throws Exception {
|
||||
testOutOfSyncReplicasCannotBecomeLeader(true);
|
||||
}
|
||||
|
@ -247,6 +244,7 @@ public class TestCloudConsistency extends SolrCloudTestCase {
|
|||
private void addDoc(String collection, int docId, JettySolrRunner solrRunner) throws IOException, SolrServerException {
|
||||
try (HttpSolrClient solrClient = new HttpSolrClient.Builder(solrRunner.getBaseUrl().toString()).build()) {
|
||||
solrClient.add(collection, new SolrInputDocument("id", String.valueOf(docId), "fieldName_s", String.valueOf(docId)));
|
||||
solrClient.commit(collection);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
|
||||
import org.apache.solr.JSONTestUtil;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.cloud.SocketProxy;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.client.solrj.response.RequestStatusState;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.util.TimeSource;
|
||||
import org.apache.solr.update.DirectUpdateHandler2;
|
||||
import org.apache.solr.util.TestInjection;
|
||||
import org.apache.solr.util.TimeOut;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-13486")
|
||||
public class TestTlogReplayVsRecovery extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String COLLECTION = "collecion_with_slow_tlog_recovery";
|
||||
|
||||
private JettySolrRunner NODE0;
|
||||
private JettySolrRunner NODE1;
|
||||
private Map<JettySolrRunner, SocketProxy> proxies;
|
||||
private Map<URI, JettySolrRunner> jettys;
|
||||
|
||||
@Before
|
||||
public void setupCluster() throws Exception {
|
||||
// we want to ensure there is tlog replay on the leader after we restart it,
|
||||
// so in addition to not committing the docs we add during network partition
|
||||
// we also want to ensure that our leader doesn't do a "Commit on close"
|
||||
DirectUpdateHandler2.commitOnClose = false;
|
||||
|
||||
System.setProperty("solr.directoryFactory", "solr.StandardDirectoryFactory");
|
||||
System.setProperty("solr.ulog.numRecordsToKeep", "1000");
|
||||
System.setProperty("leaderVoteWait", "60000");
|
||||
|
||||
configureCluster(2)
|
||||
.addConfig("conf", configset("cloud-minimal"))
|
||||
.configure();
|
||||
|
||||
NODE0 = cluster.getJettySolrRunner(0);
|
||||
NODE1 = cluster.getJettySolrRunner(1);
|
||||
|
||||
// Add proxies
|
||||
proxies = new HashMap<>(cluster.getJettySolrRunners().size());
|
||||
jettys = new HashMap<>();
|
||||
for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
|
||||
SocketProxy proxy = new SocketProxy();
|
||||
jetty.setProxyPort(proxy.getListenPort());
|
||||
cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
|
||||
cluster.startJettySolrRunner(jetty);
|
||||
proxy.open(jetty.getBaseUrl().toURI());
|
||||
log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
proxies.put(jetty, proxy);
|
||||
jettys.put(proxy.getUrl(), jetty);
|
||||
}
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDownCluster() throws Exception {
|
||||
TestInjection.reset();
|
||||
DirectUpdateHandler2.commitOnClose = true;
|
||||
|
||||
if (null != proxies) {
|
||||
for (SocketProxy proxy : proxies.values()) {
|
||||
proxy.close();
|
||||
}
|
||||
proxies = null;
|
||||
}
|
||||
jettys = null;
|
||||
System.clearProperty("solr.directoryFactory");
|
||||
System.clearProperty("solr.ulog.numRecordsToKeep");
|
||||
System.clearProperty("leaderVoteWait");
|
||||
|
||||
shutdownCluster();
|
||||
}
|
||||
|
||||
public void testManyDocsInTlogReplayWhileReplicaIsTryingToRecover() throws Exception {
|
||||
final int committedDocs = 3;
|
||||
final int uncommittedDocs = 50;
|
||||
|
||||
log.info("Create Collection...");
|
||||
assertEquals(RequestStatusState.COMPLETED,
|
||||
CollectionAdminRequest.createCollection(COLLECTION, 1, 2)
|
||||
.setCreateNodeSet("")
|
||||
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT));
|
||||
assertEquals(RequestStatusState.COMPLETED,
|
||||
CollectionAdminRequest.addReplicaToShard(COLLECTION, "shard1")
|
||||
.setNode(NODE0.getNodeName())
|
||||
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT));
|
||||
|
||||
waitForState("Timeout waiting for shard leader", COLLECTION, clusterShape(1, 1));
|
||||
|
||||
assertEquals(RequestStatusState.COMPLETED,
|
||||
CollectionAdminRequest.addReplicaToShard(COLLECTION, "shard1")
|
||||
.setNode(NODE1.getNodeName())
|
||||
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT));
|
||||
|
||||
cluster.waitForActiveCollection(COLLECTION, 1, 2);
|
||||
|
||||
waitForState("Timeout waiting for 1x2 collection", COLLECTION, clusterShape(1, 2));
|
||||
|
||||
final Replica leader = getCollectionState(COLLECTION).getSlice("shard1").getLeader();
|
||||
assertEquals("Sanity check failed", NODE0.getNodeName(), leader.getNodeName());
|
||||
|
||||
log.info("Add and commit a {} docs...", committedDocs);
|
||||
addDocs(true, committedDocs, 1);
|
||||
|
||||
log.info("Partition nodes...");
|
||||
proxies.get(NODE0).close();
|
||||
proxies.get(NODE1).close();
|
||||
|
||||
log.info("Adding {} (uncommitted) docs during network partition....", uncommittedDocs);
|
||||
addDocs(false, uncommittedDocs, committedDocs + 1);
|
||||
|
||||
log.info("Stopping leader node...");
|
||||
assertEquals("Something broke our expected commitOnClose", false, DirectUpdateHandler2.commitOnClose);
|
||||
NODE0.stop();
|
||||
cluster.waitForJettyToStop(NODE0);
|
||||
|
||||
log.info("Un-Partition replica (NODE1)...");
|
||||
proxies.get(NODE1).reopen();
|
||||
|
||||
waitForState("Timeout waiting for leader goes DOWN", COLLECTION, (liveNodes, collectionState)
|
||||
-> collectionState.getReplica(leader.getName()).getState() == Replica.State.DOWN);
|
||||
|
||||
TimeOut timeOut = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (!timeOut.hasTimedOut()) {
|
||||
Replica newLeader = getCollectionState(COLLECTION).getLeader("shard1");
|
||||
if (newLeader != null && !newLeader.getName().equals(leader.getName()) && newLeader.getState() == Replica.State.ACTIVE) {
|
||||
fail("Out of sync replica became leader " + newLeader);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("Enabling TestInjection.updateLogReplayRandomPause");
|
||||
TestInjection.updateLogReplayRandomPause = "true:100";
|
||||
|
||||
log.info("Un-Partition & restart leader (NODE0)...");
|
||||
proxies.get(NODE0).reopen();
|
||||
NODE0.start();
|
||||
|
||||
log.info("Waiting for all nodes and active collection...");
|
||||
|
||||
cluster.waitForAllNodes(30);;
|
||||
waitForState("Timeout waiting for leader", COLLECTION, (liveNodes, collectionState) -> {
|
||||
Replica newLeader = collectionState.getLeader("shard1");
|
||||
return newLeader != null && newLeader.getName().equals(leader.getName());
|
||||
});
|
||||
waitForState("Timeout waiting for active collection", COLLECTION, clusterShape(1, 2));
|
||||
|
||||
cluster.waitForActiveCollection(COLLECTION, 1, 2);
|
||||
|
||||
log.info("Check docs on both replicas...");
|
||||
assertDocsExistInBothReplicas(1, uncommittedDocs + uncommittedDocs);
|
||||
|
||||
log.info("Test ok, delete collection...");
|
||||
CollectionAdminRequest.deleteCollection(COLLECTION).process(cluster.getSolrClient());
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the specified number of docs directly to the leader,
|
||||
* using increasing docIds begining with startId. Commits if and only if the boolean is true.
|
||||
*/
|
||||
private void addDocs(final boolean commit, final int numDocs, final int startId) throws SolrServerException, IOException {
|
||||
|
||||
List<SolrInputDocument> docs = new ArrayList<>(numDocs);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
int id = startId + i;
|
||||
docs.add(new SolrInputDocument("id", String.valueOf(id), "fieldName_s", String.valueOf(id)));
|
||||
}
|
||||
// For simplicity, we always add out docs directly to NODE0
|
||||
// (where the leader should be) and bypass the proxy...
|
||||
try (HttpSolrClient client = getHttpSolrClient(NODE0.getBaseUrl().toString())) {
|
||||
assertEquals(0, client.add(COLLECTION, docs).getStatus());
|
||||
if (commit) {
|
||||
assertEquals(0, client.commit(COLLECTION).getStatus());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* uses distrib=false RTG requests to verify that every doc between firstDocId and lastDocId
|
||||
* (inclusive) can be found on both the leader and the replica
|
||||
*/
|
||||
private void assertDocsExistInBothReplicas(int firstDocId,
|
||||
int lastDocId) throws Exception {
|
||||
try (HttpSolrClient leaderSolr = getHttpSolrClient(NODE0.getBaseUrl().toString());
|
||||
HttpSolrClient replicaSolr = getHttpSolrClient(NODE1.getBaseUrl().toString())) {
|
||||
for (int d = firstDocId; d <= lastDocId; d++) {
|
||||
String docId = String.valueOf(d);
|
||||
assertDocExists("leader", leaderSolr, docId);
|
||||
assertDocExists("replica", replicaSolr, docId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* uses distrib=false RTG requests to verify that the specified docId can be found using the
|
||||
* specified solr client
|
||||
*/
|
||||
private void assertDocExists(final String clientName, final HttpSolrClient client, final String docId) throws Exception {
|
||||
final QueryResponse rsp = (new QueryRequest(params("qt", "/get",
|
||||
"id", docId,
|
||||
"_trace", clientName,
|
||||
"distrib", "false")))
|
||||
.process(client, COLLECTION);
|
||||
assertEquals(0, rsp.getStatus());
|
||||
|
||||
String match = JSONTestUtil.matchObj("/id", rsp.getResponse().get("doc"), docId);
|
||||
assertTrue("Doc with id=" + docId + " not found in " + clientName
|
||||
+ " due to: " + match + "; rsp="+rsp, match == null);
|
||||
}
|
||||
|
||||
}
|
|
@ -16,6 +16,7 @@
|
|||
*/
|
||||
package org.apache.solr.search;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -94,6 +95,23 @@ public class QueryEqualityTest extends SolrTestCaseJ4 {
|
|||
" +apache +solr");
|
||||
}
|
||||
|
||||
public void testQueryLuceneAllDocsWithField() throws Exception {
|
||||
// for all "primative" types, 'foo:*' should be functionally equivilent to "foo:[* TO *]"
|
||||
// whatever implementation/optimizations exist for one syntax, should exist for the other syntax as well
|
||||
// (regardless of docValues, multivalued, etc...)
|
||||
for (String field : Arrays.asList("foo_sI", "foo_sS", "foo_s1", "foo_s",
|
||||
"t_foo", "tv_foo", "tv_mv_foo",
|
||||
"foo_b",
|
||||
"foo_i", "foo_is", "foo_i_dvo",
|
||||
"foo_l", "foo_ll", "foo_l_dvo",
|
||||
"foo_f", "foo_f_dvo",
|
||||
"foo_d",
|
||||
"foo_dt")) {
|
||||
|
||||
assertQueryEquals("lucene", field + ":*", field + ":[* TO *]");
|
||||
}
|
||||
}
|
||||
|
||||
public void testQueryPrefix() throws Exception {
|
||||
SolrQueryRequest req = req("myField","foo_s");
|
||||
try {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
package org.apache.solr.search;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -28,6 +29,7 @@ import org.apache.lucene.search.BooleanClause;
|
|||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.DocValuesFieldExistsQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.PointInSetQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -35,6 +37,7 @@ import org.apache.lucene.search.TermInSetQuery;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.params.MapSolrParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
|
@ -44,7 +47,9 @@ import org.apache.solr.metrics.SolrMetricManager;
|
|||
import org.apache.solr.parser.QueryParser;
|
||||
import org.apache.solr.query.FilterQuery;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.schema.IndexSchema;
|
||||
import org.apache.solr.schema.SchemaField;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -59,6 +64,13 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
|
|||
createIndex();
|
||||
}
|
||||
|
||||
private static final List<String> HAS_VAL_FIELDS = new ArrayList<String>(31);
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
HAS_VAL_FIELDS.clear();
|
||||
}
|
||||
|
||||
public static void createIndex() {
|
||||
String v;
|
||||
v = "how now brown cow";
|
||||
|
@ -73,12 +85,55 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
|
|||
assertU(adoc("id", "13", "eee_s", "'balance'", "rrr_s", "/leading_slash"));
|
||||
|
||||
assertU(adoc("id", "20", "syn", "wifi ATM"));
|
||||
|
||||
|
||||
{ // make a doc that has a value in *lots* of fields that no other doc has
|
||||
SolrInputDocument doc = sdoc("id", "999");
|
||||
|
||||
// numbers...
|
||||
for (String t : Arrays.asList("i", "l", "f", "d")) {
|
||||
for (String s : Arrays.asList("", "s", "_dv", "s_dv", "_dvo")) {
|
||||
final String f = "has_val_" + t + s;
|
||||
HAS_VAL_FIELDS.add(f);
|
||||
doc.addField(f, "42");
|
||||
}
|
||||
}
|
||||
// boolean...
|
||||
HAS_VAL_FIELDS.add("has_val_b");
|
||||
doc.addField("has_val_b", "false");
|
||||
// dates (and strings/text -- they don't care about the format)...
|
||||
for (String s : Arrays.asList("dt", "s", "s1", "t")) {
|
||||
final String f = "has_val_" + s;
|
||||
HAS_VAL_FIELDS.add(f);
|
||||
doc.addField(f, "2019-01-12T00:00:00Z");
|
||||
}
|
||||
assertU(adoc(doc));
|
||||
}
|
||||
|
||||
assertU(adoc("id", "30", "shingle23", "A B X D E"));
|
||||
|
||||
assertU(commit());
|
||||
}
|
||||
|
||||
public void testDocsWithValuesInField() throws Exception {
|
||||
assertEquals("someone changed the test setup of HAS_VAL_FIELDS, w/o updating the sanity check",
|
||||
25, HAS_VAL_FIELDS.size());
|
||||
for (String f : HAS_VAL_FIELDS) {
|
||||
// for all of these fields, these 2 syntaxes should be functionally equivilent
|
||||
// in matching the one doc that contains these fields
|
||||
for (String q : Arrays.asList( f + ":*", f + ":[* TO *]" )) {
|
||||
assertJQ(req("q", q)
|
||||
, "/response/numFound==1"
|
||||
, "/response/docs/[0]/id=='999'"
|
||||
);
|
||||
// the same syntaxes should be valid even if no doc has the field...
|
||||
assertJQ(req("q", "bogus___" + q)
|
||||
, "/response/numFound==0"
|
||||
);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testPhrase() {
|
||||
// "text" field's type has WordDelimiterGraphFilter (WDGFF) and autoGeneratePhraseQueries=true
|
||||
|
@ -1135,14 +1190,14 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
|
|||
"is_dv", "fs_dv", "ds_dv", "ls_dv",
|
||||
"i_dvo", "f_dvo", "d_dvo", "l_dvo",
|
||||
};
|
||||
|
||||
|
||||
for (String suffix:fieldSuffix) {
|
||||
//Good queries
|
||||
qParser = QParser.getParser("foo_" + suffix + ":(1 2 3 4 5 6 7 8 9 10 20 19 18 17 16 15 14 13 12 25)", req);
|
||||
qParser.setIsFilter(true);
|
||||
qParser.getQuery();
|
||||
}
|
||||
|
||||
|
||||
for (String suffix:fieldSuffix) {
|
||||
qParser = QParser.getParser("foo_" + suffix + ":(1 2 3 4 5 6 7 8 9 10 20 19 18 17 16 15 14 13 12 NOT_A_NUMBER)", req);
|
||||
qParser.setIsFilter(true); // this may change in the future
|
||||
|
@ -1150,7 +1205,39 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 {
|
|||
assertEquals(SolrException.ErrorCode.BAD_REQUEST.code, e.code());
|
||||
assertTrue("Unexpected exception: " + e.getMessage(), e.getMessage().contains("Invalid Number: NOT_A_NUMBER"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFieldExistsQueries() throws SyntaxError {
|
||||
SolrQueryRequest req = req();
|
||||
IndexSchema indexSchema = h.getCore().getLatestSchema();
|
||||
String[] fieldSuffix = new String[] {
|
||||
"ti", "tf", "td", "tl", "tdt",
|
||||
"pi", "pf", "pd", "pl", "pdt",
|
||||
"i", "f", "d", "l", "dt", "s", "b",
|
||||
"is", "fs", "ds", "ls", "dts", "ss", "bs",
|
||||
"i_dv", "f_dv", "d_dv", "l_dv", "dt_dv", "s_dv", "b_dv",
|
||||
"is_dv", "fs_dv", "ds_dv", "ls_dv", "dts_dv", "ss_dv", "bs_dv",
|
||||
"i_dvo", "f_dvo", "d_dvo", "l_dvo", "dt_dvo",
|
||||
"t"
|
||||
};
|
||||
String[] existenceQueries = new String[] {
|
||||
"*", "[* TO *]"
|
||||
};
|
||||
|
||||
for (String existenceQuery : existenceQueries) {
|
||||
for (String suffix : fieldSuffix) {
|
||||
String field = "foo_" + suffix;
|
||||
String query = field + ":" + existenceQuery;
|
||||
QParser qParser = QParser.getParser(query, req);
|
||||
if (indexSchema.getField(field).hasDocValues()) {
|
||||
assertTrue("Field has docValues, so existence query \"" + query + "\" should return DocValuesFieldExistsQuery", qParser.getQuery() instanceof DocValuesFieldExistsQuery);
|
||||
} else {
|
||||
assertFalse("Field doesn't have docValues, so existence query \"" + query + "\" should not return DocValuesFieldExistsQuery", qParser.getQuery() instanceof DocValuesFieldExistsQuery);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
private static int origTableSize;
|
||||
private static FacetField.FacetMethod origDefaultFacetMethod;
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@BeforeClass
|
||||
public static void beforeTests() throws Exception {
|
||||
systemSetPropertySolrDisableShardsWhitelist("true");
|
||||
|
@ -83,6 +84,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@AfterClass
|
||||
public static void afterTests() throws Exception {
|
||||
systemClearPropertySolrDisableShardsWhitelist();
|
||||
|
@ -1063,7 +1065,6 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
}
|
||||
|
||||
public static void doStatsTemplated(Client client, ModifiableSolrParams p) throws Exception {
|
||||
int numShards = client.local() ? 1 : client.getClientProvider().all().size();
|
||||
p.set("Z_num_i", "Z_" + p.get("num_i") );
|
||||
p.set("Z_num_l", "Z_" + p.get("num_l") );
|
||||
p.set("sparse_num_d", "sparse_" + p.get("num_d") );
|
||||
|
@ -2290,6 +2291,19 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
"}"
|
||||
);
|
||||
|
||||
//test filter using queries from json.queries
|
||||
client.testJQ(params(p, "q", "*:*"
|
||||
, "json.queries", "{catS:{'#cat_sA': '${cat_s}:A'}, ff:[{'#id_1':'-id:1'},{'#id_2':'-id:2'}]}"
|
||||
, "json.facet", "{" +
|
||||
",t_filt1:{${terms} type:terms, field:${cat_s}, domain:{filter:{param:catS} } }" + // test filter via "param" type from .queries
|
||||
",t_filt2:{${terms} type:terms, field:${cat_s}, domain:{filter:{param:ff}} }" + // test multi-valued query parameter from .queries
|
||||
"}"
|
||||
)
|
||||
, "facets=={ count:6, " +
|
||||
",t_filt1:{ buckets:[ {val:A, count:2}] } " +
|
||||
",t_filt2:{ buckets:[ {val:B, count:2}, {val:A, count:1}] } " +
|
||||
"}"
|
||||
);
|
||||
|
||||
// test acc reuse (i.e. reset() method). This is normally used for stats that are not calculated in the first phase,
|
||||
// currently non-sorting stats.
|
||||
|
@ -2907,7 +2921,7 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
int commitPercent = 10;
|
||||
int ndocs=1000;
|
||||
|
||||
Map<Integer, Map<Integer, List<Integer>>> model = new HashMap(); // cat->where->list<ids>
|
||||
Map<Integer, Map<Integer, List<Integer>>> model = new HashMap<>(); // cat->where->list<ids>
|
||||
for (int i=0; i<ndocs; i++) {
|
||||
Integer cat = r.nextInt(numCat);
|
||||
Integer where = r.nextInt(numWhere);
|
||||
|
@ -3328,7 +3342,6 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
}
|
||||
|
||||
public void doTestErrors(Client client) throws Exception {
|
||||
ModifiableSolrParams p = params("rows", "0");
|
||||
client.deleteByQuery("*:*", null);
|
||||
|
||||
try {
|
||||
|
@ -3646,11 +3659,18 @@ public class TestJsonFacets extends SolrTestCaseHS {
|
|||
req("q", "*:*", "rows", "0", "json.facet", "{cat_s:{type:terms,field:cat_s,sort:[\"count desc\"]}}"),
|
||||
SolrException.ErrorCode.BAD_REQUEST);
|
||||
|
||||
|
||||
assertQEx("Should fail as facet is not of type map",
|
||||
"Expected Map for 'facet', received ArrayList=[{}]",
|
||||
req("q", "*:*", "rows", "0", "json.facet", "[{}]"), SolrException.ErrorCode.BAD_REQUEST);
|
||||
|
||||
assertQEx("Should fail as queries is not of type map",
|
||||
"Expected Map for 'queries', received [{}]",
|
||||
req("q", "*:*", "rows", "0", "json.queries", "[{}]"), SolrException.ErrorCode.BAD_REQUEST);
|
||||
|
||||
assertQEx("Should fail as queries are null in JSON",
|
||||
"Expected Map for 'queries', received null",
|
||||
req("json", "{query:\"*:*\", queries:null}"), SolrException.ErrorCode.BAD_REQUEST);
|
||||
|
||||
// range facets
|
||||
assertQEx("Should fail as 'other' is of type Map",
|
||||
"Expected list of string or comma separated string values for 'other', " +
|
||||
|
|
|
@ -0,0 +1,258 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.util;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.StringReader;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.SolrInputField;
|
||||
import org.apache.solr.util.SolrLogPostTool.LogRecordReader;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class SolrLogPostToolTest extends SolrTestCaseJ4 {
|
||||
|
||||
|
||||
@Test
|
||||
public void testQueryRecord() throws Exception{
|
||||
String record = "2019-12-09 15:05:01.931 INFO (qtp2103763750-21) [c:logs4 s:shard1 r:core_node2 x:logs4_shard1_replica_n1] o.a.s.c.S.Request [logs4_shard1_replica_n1] webapp=/solr path=/select params={q=*:*&_=1575835181759&isShard=true&wt=javabin&distrib=false} hits=234868 status=0 QTime=8\n";
|
||||
List<SolrInputDocument> docs = readDocs(record);
|
||||
assertEquals(docs.size(), 1);
|
||||
SolrInputDocument doc = docs.get(0);
|
||||
|
||||
SolrInputField query = doc.getField("q_s");
|
||||
SolrInputField date = doc.getField("date_dt");
|
||||
SolrInputField collection = doc.getField("collection_s");
|
||||
SolrInputField path = doc.getField("path_s");
|
||||
SolrInputField hits = doc.getField("hits_l");
|
||||
SolrInputField type = doc.getField("type_s");
|
||||
SolrInputField status = doc.getField("status_s");
|
||||
SolrInputField shard = doc.getField("shard_s");
|
||||
SolrInputField replica = doc.getField("replica_s");
|
||||
SolrInputField core = doc.getField("core_s");
|
||||
SolrInputField wt = doc.getField("wt_s");
|
||||
SolrInputField distrib = doc.getField("distrib_s");
|
||||
SolrInputField isShard = doc.getField("isShard_s");
|
||||
|
||||
assertEquals(query.getValue(), "*:*");
|
||||
assertEquals(date.getValue(), "2019-12-09T15:05:01.931");
|
||||
assertEquals(collection.getValue(), "logs4");
|
||||
assertEquals(path.getValue(), "/select");
|
||||
assertEquals(hits.getValue(), "234868");
|
||||
assertEquals(type.getValue(), "query");
|
||||
assertEquals(status.getValue(), "0");
|
||||
assertEquals(shard.getValue(), "shard1");
|
||||
assertEquals(replica.getValue(), "core_node2");
|
||||
assertEquals(core.getValue(), "logs4_shard1_replica_n1");
|
||||
assertEquals(wt.getValue(), "javabin");
|
||||
assertEquals(distrib.getValue(), "false");
|
||||
assertEquals(isShard.getValue(), "true");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUpdateRecords() throws Exception{
|
||||
String record = "2019-12-25 20:38:23.498 INFO (qtp2103763750-126) [c:logs3 s:shard1 r:core_node2 x:logs3_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [logs3_shard1_replica_n1] webapp=/solr path=/update params={commitWithin=1000&overwrite=true&wt=json&_=1577306114481}{deleteByQuery=*:* (-1653925534487281664)} 0 11\n" +
|
||||
"2019-12-25 20:42:13.411 INFO (qtp2103763750-303) [c:logs5 s:shard1 r:core_node2 x:logs5_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [logs5_shard1_replica_n1] webapp=/solr path=/update params={commitWithin=1000&overwrite=true&wt=json&_=1577306114481}{delete=[03bbe975-728a-4df8-aa25-fe25049dc0ef (-1653925775577972736)]} 0 1\n";
|
||||
List<SolrInputDocument> docs = readDocs(record);
|
||||
assertEquals(docs.size(), 2);
|
||||
SolrInputDocument doc = docs.get(0);
|
||||
SolrInputField date = doc.getField("date_dt");
|
||||
SolrInputField type = doc.getField("type_s");
|
||||
SolrInputField core = doc.getField("core_s");
|
||||
SolrInputField collection = doc.getField("collection_s");
|
||||
assertEquals(date.getValue(), "2019-12-25T20:38:23.498");
|
||||
assertEquals(type.getValue(), "deleteByQuery");
|
||||
assertEquals(collection.getValue(), "logs3");
|
||||
assertEquals(core.getValue(), "logs3_shard1_replica_n1");
|
||||
|
||||
SolrInputDocument doc1 = docs.get(1);
|
||||
SolrInputField date1 = doc1.getField("date_dt");
|
||||
SolrInputField type1 = doc1.getField("type_s");
|
||||
SolrInputField core1 = doc1.getField("core_s");
|
||||
SolrInputField collection1= doc1.getField("collection_s");
|
||||
assertEquals(date1.getValue(), "2019-12-25T20:42:13.411");
|
||||
assertEquals(type1.getValue(), "delete");
|
||||
assertEquals(collection1.getValue(), "logs5");
|
||||
assertEquals(core1.getValue(), "logs5_shard1_replica_n1");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testErrorRecord() throws Exception{
|
||||
String record = "2019-12-31 01:49:53.251 ERROR (qtp2103763750-240) [c:logs6 s:shard1 r:core_node2 x:logs6_shard1_replica_n1] o.a.s.h.RequestHandlerBase org.apache.solr.common.SolrException: org.apache.solr.search.SyntaxError: Cannot parse 'id:[* TO *': Encountered \"<EOF>\" at line 1, column 10.\n" +
|
||||
"Was expecting one of:\n" +
|
||||
" \"]\" ...\n" +
|
||||
" \"}\" ...\n" +
|
||||
" \n" +
|
||||
"\tat org.apache.solr.handler.component.QueryComponent.prepare(QueryComponent.java:218)\n" +
|
||||
"\tat org.apache.solr.handler.component.SearchHandler.handleRequestBody(SearchHandler.java:302)\n" +
|
||||
"\tat org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:197)\n" +
|
||||
"\tat org.apache.solr.core.SolrCore.execute(SolrCore.java:2582)\n" +
|
||||
"\tat org.apache.solr.servlet.HttpSolrCall.execute(HttpSolrCall.java:799)\n" +
|
||||
"\tat org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:578)\n" +
|
||||
"\tat org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:419)\n" +
|
||||
"\tat org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:351)\n" +
|
||||
"\tat org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1602)\n" +
|
||||
"\tat org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:540)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:146)\n" +
|
||||
"\tat org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:257)\n" +
|
||||
"\tat org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1711)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1347)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203)\n" +
|
||||
"\tat org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:480)\n" +
|
||||
"\tat org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1678)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1249)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:220)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:152)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)\n" +
|
||||
"\tat org.eclipse.jetty.rewrite.handler.RewriteHandler.handle(RewriteHandler.java:335)\n" +
|
||||
"\tat org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)\n" +
|
||||
"\tat org.eclipse.jetty.server.Server.handle(Server.java:505)\n" +
|
||||
"\tat org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:370)\n" +
|
||||
"\tat org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:267)\n" +
|
||||
"\tat org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:305)\n" +
|
||||
"\tat org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:103)\n" +
|
||||
"\tat org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:117)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:781)\n" +
|
||||
"\tat org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:917)\n" +
|
||||
"\tat java.base/java.lang.Thread.run(Thread.java:834)\n" +
|
||||
"Caused by: org.apache.solr.search.SyntaxError: Cannot parse 'id:[* TO *': Encountered \"<EOF>\" at line 1, column 10.\n" +
|
||||
"Was expecting one of:\n" +
|
||||
" \"]\" ...\n" +
|
||||
" \"}\" ...\n" +
|
||||
" \n" +
|
||||
"\tat org.apache.solr.parser.SolrQueryParserBase.parse(SolrQueryParserBase.java:266)\n" +
|
||||
"\tat org.apache.solr.search.LuceneQParser.parse(LuceneQParser.java:49)\n" +
|
||||
"\tat org.apache.solr.search.QParser.getQuery(QParser.java:174)\n" +
|
||||
"\tat org.apache.solr.handler.component.QueryComponent.prepare(QueryComponent.java:160)\n" +
|
||||
"\t... 41 more\n" +
|
||||
"Caused by: org.apache.solr.parser.ParseException: Encountered \"<EOF>\" at line 1, column 10.\n" +
|
||||
"Was expecting one of:\n" +
|
||||
" \"]\" ...\n" +
|
||||
" \"}\" ...\n" +
|
||||
" \n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.generateParseException(QueryParser.java:885)\n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.jj_consume_token(QueryParser.java:767)\n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.Term(QueryParser.java:479)\n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.Clause(QueryParser.java:278)\n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.Query(QueryParser.java:162)\n" +
|
||||
"\tat org.apache.solr.parser.QueryParser.TopLevelQuery(QueryParser.java:131)\n" +
|
||||
"\tat org.apache.solr.parser.SolrQueryParserBase.parse(SolrQueryParserBase.java:262)\n" +
|
||||
"\t... 44 more\n" +
|
||||
"\n"+
|
||||
"2019-12-09 15:05:01.931 INFO (qtp2103763750-21) [c:logs4 s:shard1 r:core_node2 x:logs4_shard1_replica_n1] o.a.s.c.S.Request [logs4_shard1_replica_n1] webapp=/solr path=/select params={q=*:*&_=1575835181759&isShard=true&wt=javabin&distrib=false} hits=234868 status=0 QTime=8\n";
|
||||
List<SolrInputDocument> docs = readDocs(record);
|
||||
assertEquals(docs.size(), 2);
|
||||
SolrInputDocument doc = docs.get(0);
|
||||
SolrInputField date = doc.getField("date_dt");
|
||||
SolrInputField type = doc.getField("type_s");
|
||||
SolrInputField shard = doc.getField("shard_s");
|
||||
SolrInputField replica = doc.getField("replica_s");
|
||||
SolrInputField core = doc.getField("core_s");
|
||||
SolrInputField stack = doc.getField("stack_t");
|
||||
SolrInputField root = doc.getField("root_cause_t");
|
||||
SolrInputField collection = doc.getField("collection_s");
|
||||
|
||||
|
||||
assertEquals(date.getValue(), "2019-12-31T01:49:53.251");
|
||||
assertEquals(type.getValue(), "error");
|
||||
assertEquals(collection.getValue(), "logs6");
|
||||
|
||||
|
||||
assertEquals(shard.getValue(), "shard1");
|
||||
assertEquals(replica.getValue(), "core_node2");
|
||||
assertEquals(core.getValue(), "logs6_shard1_replica_n1");
|
||||
assertTrue(stack.getValue().toString().contains(root.getValue().toString()));
|
||||
|
||||
SolrInputDocument doc1 = docs.get(1);
|
||||
SolrInputField date1 = doc1.getField("date_dt");
|
||||
SolrInputField type1 = doc1.getField("type_s");
|
||||
assertEquals(date1.getValue(), "2019-12-09T15:05:01.931");
|
||||
assertEquals(type1.getValue(), "query");
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCommit() throws Exception{
|
||||
String record = "2019-12-16 14:20:19.708 INFO (qtp812143047-22671) [c:production_201912 s:shard128 r:core_node7 x:production_201912_shard128_replica] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1653086376121335808,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}\n";
|
||||
List<SolrInputDocument> docs = readDocs(record);
|
||||
assertEquals(docs.size(), 1);
|
||||
SolrInputDocument doc = docs.get(0);
|
||||
|
||||
SolrInputField date = doc.getField("date_dt");
|
||||
SolrInputField type = doc.getField("type_s");
|
||||
SolrInputField shard = doc.getField("shard_s");
|
||||
SolrInputField replica = doc.getField("replica_s");
|
||||
SolrInputField core = doc.getField("core_s");
|
||||
SolrInputField openSearcher = doc.getField("open_searcher_s");
|
||||
SolrInputField softCommit = doc.getField("soft_commit_s");
|
||||
SolrInputField collection = doc.getField("collection_s");
|
||||
|
||||
assertEquals(date.getValue(), "2019-12-16T14:20:19.708");
|
||||
assertEquals(type.getValue(), "commit");
|
||||
assertEquals(shard.getValue(), "shard128");
|
||||
assertEquals(replica.getValue(), "core_node7");
|
||||
assertEquals(core.getValue(), "production_201912_shard128_replica");
|
||||
assertEquals(openSearcher.getValue(), "true");
|
||||
assertEquals(softCommit.getValue(), "false");
|
||||
assertEquals(collection.getValue(), "production_201912");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNewSearcher() throws Exception{
|
||||
String record = "2019-12-16 19:00:23.931 INFO (searcherExecutor-66-thread-1) [ ] o.a.s.c.SolrCore [production_cv_month_201912_shard35_replica_n1] Registered new searcher Searcher@16ef5fac[production_cv_month_201912_shard35_replica_n1] ...";
|
||||
List<SolrInputDocument> docs = readDocs(record);
|
||||
assertEquals(docs.size(), 1);
|
||||
SolrInputDocument doc = docs.get(0);
|
||||
SolrInputField date = doc.getField("date_dt");
|
||||
SolrInputField type = doc.getField("type_s");
|
||||
SolrInputField core = doc.getField("core_s");
|
||||
assertEquals(date.getValue(), "2019-12-16T19:00:23.931");
|
||||
assertEquals(type.getValue(), "newSearcher");
|
||||
assertEquals(core.getValue(), "production_cv_month_201912_shard35_replica_n1");
|
||||
}
|
||||
|
||||
private List<SolrInputDocument> readDocs(String records) throws Exception {
|
||||
BufferedReader bufferedReader = new BufferedReader(new StringReader(records));
|
||||
ArrayList<SolrInputDocument> list = new ArrayList();
|
||||
|
||||
try {
|
||||
LogRecordReader logRecordReader = new SolrLogPostTool.LogRecordReader(bufferedReader);
|
||||
SolrInputDocument doc = null;
|
||||
while ((doc = logRecordReader.readRecord()) != null) {
|
||||
list.add(doc);
|
||||
}
|
||||
} finally {
|
||||
bufferedReader.close();
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
}
|
|
@ -38,7 +38,7 @@
|
|||
</Item>
|
||||
</Array>
|
||||
</Arg>
|
||||
<Set name="host"><Property name="jetty.host" /></Set>
|
||||
<Set name="host"><Property name="solr.jetty.host" default="127.0.0.1"/></Set>
|
||||
<Set name="port"><Property name="jetty.port" default="8983" /></Set>
|
||||
<Set name="idleTimeout"><Property name="solr.jetty.http.idleTimeout" default="120000"/></Set>
|
||||
<Set name="acceptorPriorityDelta"><Property name="solr.jetty.http.acceptorPriorityDelta" default="0"/></Set>
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
</Item>
|
||||
</Array>
|
||||
</Arg>
|
||||
<Set name="host"><Property name="solr.jetty.host" /></Set>
|
||||
<Set name="host"><Property name="solr.jetty.host" default="127.0.0.1"/></Set>
|
||||
<Set name="port"><Property name="solr.jetty.https.port" default="8983" /></Set>
|
||||
<Set name="idleTimeout"><Property name="solr.jetty.https.timeout" default="120000"/></Set>
|
||||
<Set name="acceptorPriorityDelta"><Property name="solr.jetty.ssl.acceptorPriorityDelta" default="0"/></Set>
|
||||
|
|
|
@ -57,7 +57,7 @@
|
|||
</Item>
|
||||
</Array>
|
||||
</Arg>
|
||||
<Set name="host"><Property name="solr.jetty.host" /></Set>
|
||||
<Set name="host"><Property name="solr.jetty.host" default="127.0.0.1" /></Set>
|
||||
<Set name="port"><Property name="solr.jetty.https.port" default="8983" /></Set>
|
||||
<Set name="idleTimeout"><Property name="solr.jetty.https.timeout" default="120000"/></Set>
|
||||
<Set name="acceptorPriorityDelta"><Property name="solr.jetty.ssl.acceptorPriorityDelta" default="0"/></Set>
|
||||
|
|
|
@ -1239,6 +1239,9 @@ The BACKUP command will backup Solr indexes and configurations for a specified c
|
|||
`collection`::
|
||||
The name of the collection to be backed up. This parameter is required.
|
||||
|
||||
`name`::
|
||||
What to name the backup that is created. This is checked to make sure it doesn't already exist, and otherwise an error message is raised. This parameter is required.
|
||||
|
||||
`location`::
|
||||
The location on a shared drive for the backup command to write to. Alternately it can be set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
|
||||
|
||||
|
@ -1268,6 +1271,9 @@ You can use the collection <<collection-aliasing.adoc#createalias,CREATEALIAS>>
|
|||
`collection`::
|
||||
The collection where the indexes will be restored into. This parameter is required.
|
||||
|
||||
`name`::
|
||||
The name of the existing backup that you want to restore. This parameter is required.
|
||||
|
||||
`location`::
|
||||
The location on a shared drive for the RESTORE command to read from. Alternately it can be set as a <<cluster-node-management.adoc#clusterprop,cluster property>>.
|
||||
|
||||
|
|
|
@ -31,6 +31,11 @@ In this section you will learn how to start a SolrCloud cluster using startup sc
|
|||
This tutorial assumes that you're already familiar with the basics of using Solr. If you need a refresher, please see the <<getting-started.adoc#getting-started,Getting Started section>> to get a grounding in Solr concepts. If you load documents as part of that exercise, you should start over with a fresh Solr installation for these SolrCloud tutorials.
|
||||
====
|
||||
|
||||
[WARNING]
|
||||
====
|
||||
For security reasons, Solr nodes only accept connections from localhost by default. Administrators setting up SolrCloud deployments with multiple nodes must override this setting. For more details see <<securing-solr.adoc#network-configuration,here>>.
|
||||
====
|
||||
|
||||
== SolrCloud Example
|
||||
|
||||
=== Interactive Startup
|
||||
|
|
|
@ -79,6 +79,22 @@ SOLR_IP_BLACKLIST="192.168.0.3, 192.168.0.4"
|
|||
ZooKeeper is a central and important part of a SolrCloud cluster and understanding how to secure
|
||||
its content is covered in the <<zookeeper-access-control.adoc#zookeeper-access-control,ZooKeeper Access Control>> page.
|
||||
|
||||
|
||||
== Network Configuration
|
||||
|
||||
// tag::security-network-binding-1[]
|
||||
Administrators should consider their security setup carefully as an important step in moving to production. Solr provides a number of features out of the box to meet the security needs of users: authentication and authorization can be configured using a range of security plugins, privacy can be bolstered by enabling SSL/TLS, and (in SolrCloud) ZooKeeper data can be protected with ACL rules to prevent unauthorized reads and writes.
|
||||
|
||||
Even if these measures or others are taken, it is strongly recommended that Solr always be protected by a firewall. Solr is not designed to be exposed on the open internet.
|
||||
|
||||
It is also strongly recommended that Solr listen to only those network interfaces that are strictly required. To prevent administrators from unintentionally exposing Solr more broadly, Solr only listens on the loopback interface ("127.0.0.1") by default. Most deployments will need to change this value to something less restrictive so that it can be reached from other boxes. This can be done by setting a `SOLR_JETTY_HOST` value in your environment's "include script" (`solr.in.sh` or `solr.in.cmd`):
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
SOLR_JETTY_HOST="0.0.0.0"
|
||||
----
|
||||
// end::security-network-binding-1[]
|
||||
|
||||
== Enable Security Manager
|
||||
|
||||
Solr can run in a Java Security Manager sandbox by setting `SOLR_SECURITY_MANAGER_ENABLED=true` via environment variable or in `solr.in.sh`/`solr.in.cmd`. This feature is incompatible with Hadoop.
|
||||
|
|
|
@ -302,6 +302,10 @@ Check these limits every time you upgrade your kernel or operating system. These
|
|||
If these limits are exceeded, the problems reported by Solr vary depending on the specific operation responsible for exceeding the limit. Errors such as "too many open files", "connection error", and "max processes exceeded" have been reported, as well as SolrCloud recovery failures.
|
||||
====
|
||||
|
||||
== Security Considerations
|
||||
|
||||
include::securing-solr.adoc[tag=security-network-binding-1]
|
||||
|
||||
== Running Multiple Solr Nodes per Host
|
||||
|
||||
The `bin/solr` script is capable of running multiple instances on one machine, but for a *typical* installation, this is not a recommended setup. Extra CPU and memory resources are required for each additional instance. A single instance is easily capable of handling multiple indexes.
|
||||
|
|
|
@ -322,13 +322,13 @@ Comments may be nested.
|
|||
|
||||
Solr's standard query parser originated as a variation of Lucene's "classic" QueryParser. It diverges in the following ways:
|
||||
|
||||
* A `*` may be used for either or both endpoints to specify an open-ended range query
|
||||
* A `*` may be used for either or both endpoints to specify an open-ended range query, or by itself as an existence query.
|
||||
** `field:[* TO 100]` finds all field values less than or equal to 100
|
||||
** `field:[100 TO *]` finds all field values greater than or equal to 100
|
||||
** `field:[* TO *]` matches all documents with the field
|
||||
** `field:*` or `field:[* TO *]` finds all documents where the field exists (i.e. has a value)
|
||||
* Pure negative queries (all clauses prohibited) are allowed (only as a top-level clause)
|
||||
** `-inStock:false` finds all field values where inStock is not false
|
||||
** `-field:[* TO *]` finds all documents without a value for field
|
||||
** `-field:*` or `-field:[* TO *]` finds all documents without a value for the field
|
||||
* Support for embedded Solr queries (sub-queries) using any type of query parser as a nested clause using the local-params syntax.
|
||||
** `inStock:true OR {!dismax qf='name manu' v='ipod'}`
|
||||
+
|
||||
|
|
|
@ -181,12 +181,30 @@ class ConnectionImpl implements Connection {
|
|||
|
||||
@Override
|
||||
public void setTransactionIsolation(int level) throws SQLException {
|
||||
throw new UnsupportedOperationException();
|
||||
if(isClosed()) {
|
||||
throw new SQLException("Connection is closed.");
|
||||
}
|
||||
if(Connection.TRANSACTION_NONE == level) {
|
||||
throw new SQLException("Connection.TRANSACTION_NONE cannot be used.");
|
||||
}
|
||||
if(
|
||||
Connection.TRANSACTION_READ_COMMITTED == level ||
|
||||
Connection.TRANSACTION_READ_UNCOMMITTED == level ||
|
||||
Connection.TRANSACTION_REPEATABLE_READ == level ||
|
||||
Connection.TRANSACTION_SERIALIZABLE == level
|
||||
) {
|
||||
throw new SQLException(new UnsupportedOperationException());
|
||||
} else {
|
||||
throw new SQLException("Unsupported transaction type specified.");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getTransactionIsolation() throws SQLException {
|
||||
throw new UnsupportedOperationException();
|
||||
if(isClosed()) {
|
||||
throw new SQLException("Connection is closed.");
|
||||
}
|
||||
return Connection.TRANSACTION_NONE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -526,7 +526,6 @@ public class JdbcTest extends SolrCloudTestCase {
|
|||
// assertEquals(0, databaseMetaData.getDriverMajorVersion());
|
||||
// assertEquals(0, databaseMetaData.getDriverMinorVersion());
|
||||
|
||||
|
||||
List<String> tableSchemas = new ArrayList<>(Arrays.asList(zkHost, "metadata"));
|
||||
try(ResultSet rs = databaseMetaData.getSchemas()) {
|
||||
assertTrue(rs.next());
|
||||
|
@ -551,10 +550,8 @@ public class JdbcTest extends SolrCloudTestCase {
|
|||
solrClient.connect();
|
||||
ZkStateReader zkStateReader = solrClient.getZkStateReader();
|
||||
|
||||
SortedSet<String> tables = new TreeSet<>();
|
||||
|
||||
Set<String> collectionsSet = zkStateReader.getClusterState().getCollectionsMap().keySet();
|
||||
tables.addAll(collectionsSet);
|
||||
SortedSet<String> tables = new TreeSet<>(collectionsSet);
|
||||
|
||||
Aliases aliases = zkStateReader.getAliases();
|
||||
tables.addAll(aliases.getCollectionAliasListMap().keySet());
|
||||
|
@ -571,6 +568,15 @@ public class JdbcTest extends SolrCloudTestCase {
|
|||
assertFalse(rs.next());
|
||||
}
|
||||
|
||||
assertEquals(Connection.TRANSACTION_NONE, con.getTransactionIsolation());
|
||||
try {
|
||||
con.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
|
||||
fail("should not have been able to set transaction isolation");
|
||||
} catch (SQLException e) {
|
||||
assertEquals(UnsupportedOperationException.class, e.getCause().getClass());
|
||||
}
|
||||
assertEquals(Connection.TRANSACTION_NONE, con.getTransactionIsolation());
|
||||
|
||||
assertTrue(con.isReadOnly());
|
||||
con.setReadOnly(true);
|
||||
assertTrue(con.isReadOnly());
|
||||
|
@ -579,7 +585,6 @@ public class JdbcTest extends SolrCloudTestCase {
|
|||
con.clearWarnings();
|
||||
assertNull(con.getWarnings());
|
||||
|
||||
|
||||
try (Statement statement = con.createStatement()) {
|
||||
checkStatement(con, statement);
|
||||
|
||||
|
|
Loading…
Reference in New Issue