Merge branch 'branch_6x' of https://git-wip-us.apache.org/repos/asf/lucene-solr into branch_6x

This commit is contained in:
Karl Wright 2016-05-20 13:51:06 -04:00
commit 817cbf0c69
26 changed files with 1512 additions and 589 deletions

View File

@ -260,7 +260,7 @@ Other Changes
* SOLR-9066: Make CountMetric return long instead of double (Kevin Risden)
* SOLR-9065: Migrate SolrJ distributed tests to SolrCloudTestCase. (Alan Woodward)
* SOLR-9065, SOLR-9072, SOLR-9132: Migrate some distributed tests to SolrCloudTestCase. (Alan Woodward)
* SOLR-8184: Negative tests for JDBC Connection String (Susheel Kumar, Jason Gerlowski, Kevin Risden)
@ -274,11 +274,11 @@ Other Changes
* SOLR-9105: Fix a bunch of typos across 103 files (Bartosz Krasiński via janhoy)
* SOLR-9072: Migrate morphline-core tests to SolrCloudTestCase. (Alan Woodward)
* SOLR-9115: Get rid of javax.xml.bind.DatatypeConverter in SimplePostTool
for Java 9 compatibility. (Uwe Schindler)
* SOLR-9131: Fix "start solr" text in cluster.vm Velocity template (janhoy)
================== 6.0.1 ==================
(No Changes)

View File

@ -15,6 +15,13 @@
* limitations under the License.
*/
package org.apache.solr.search.mlt;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.mlt.MoreLikeThis;
import org.apache.lucene.search.BooleanClause;
@ -39,13 +46,6 @@ import org.apache.solr.search.QParser;
import org.apache.solr.search.QueryParsing;
import org.apache.solr.util.SolrPluginUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.regex.Pattern;
public class CloudMLTQParser extends QParser {
// Pattern is thread safe -- TODO? share this with general 'fl' param
private static final Pattern splitList = Pattern.compile(",| ");
@ -112,13 +112,12 @@ public class CloudMLTQParser extends QParser {
}
}
} else {
Map<String, SchemaField> fields = req.getSchema().getFields();
for (String field : doc.getFieldNames()) {
// Only use fields that are stored and have an explicit analyzer.
// This makes sense as the query uses tf/idf/.. for query construction.
// We might want to relook and change this in the future though.
if(fields.get(field).stored()
&& fields.get(field).getType().isExplicitAnalyzer()) {
SchemaField f = req.getSchema().getFieldOrNull(field);
if (f != null && f.stored() && f.getType().isExplicitAnalyzer()) {
fieldNames.add(field);
}
}

View File

@ -0,0 +1,275 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- The Solr schema file, version 1.6 -->
<schema name="cloud-dynamic" version="1.6">
<!-- attribute "name" is the name of this schema and is only used for display purposes.
Applications should change this to reflect the nature of the search collection.
version="x.y" is Solr's version number for the schema syntax and semantics. It should
not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued by nature
1.1: multiValued attribute introduced, false by default
1.2: omitTermFreqAndPositions attribute introduced, true by default except for text fields.
1.3: removed optional field compress feature
1.4: default auto-phrase (QueryParser feature) to off
1.5: omitNorms defaults to true for primitive field types (int, float, boolean, string...)
1.6: useDocValuesAsStored defaults to true.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldType name="failtype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- Demonstrating ignoreCaseChange -->
<fieldType name="wdf_nocase" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="wdf_preserve" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- HighlitText optimizes storage for (long) columns which will be highlit -->
<fieldType name="highlittext" class="solr.TextField"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" sortMissingLast="true"/>
<fieldType name="tdate" class="solr.TrieDateField" sortMissingLast="true" precisionStep="6"/>
<fieldType name="text" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="unstemmed" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- field type that doesn't generate phrases from unquoted multiple tokens per analysis unit -->
<fieldType name="text_np" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="nametext" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.core.WhitespaceAnalyzer"/>
</fieldType>
<fieldType name="unstored" class="solr.StrField" indexed="true" stored="false"/>
<fieldType name="textgap" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="uuid" class="solr.UUIDField"/>
<fieldType name="ignored" class="solr.StrField" indexed="false" stored="false"/>
<fieldType name="random" class="solr.RandomSortField" indexed="true"/>
<!-- Poly field -->
<fieldType name="xy" class="solr.PointType" dimension="2" subFieldType="double"/>
<fieldType name="xyd" class="solr.PointType" dimension="2" subFieldSuffix="*_d"/>
<fieldType name="geohash" class="solr.GeoHashField"/>
<fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
<!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
<fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
<fieldType class="org.apache.solr.schema.SortableBinaryField" name="sortable_binary"/>
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="true"/>
<field name="signatureField" type="string" indexed="true" stored="false"/>
<field name="uuid" type="uuid" stored="true"/>
<field name="name" type="nametext" indexed="true" stored="true"/>
<field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="subject" type="text" indexed="true" stored="true"/>
<field name="title" type="nametext" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="bday" type="date" indexed="true" stored="true"/>
<field name="text_np" type="text_np" indexed="true" stored="false"/>
<!-- to test property inheritance and overriding -->
<field name="shouldbeunstored" type="unstored"/>
<field name="shouldbestored" type="unstored" stored="true"/>
<field name="shouldbeunindexed" type="unstored" indexed="false" stored="true"/>
<!-- test different combinations of indexed and stored -->
<field name="bind" type="boolean" indexed="true" stored="false"/>
<field name="bsto" type="boolean" indexed="false" stored="true"/>
<field name="bindsto" type="boolean" indexed="true" stored="true"/>
<field name="isto" type="int" indexed="false" stored="true"/>
<field name="iind" type="int" indexed="true" stored="false"/>
<field name="ssto" type="string" indexed="false" stored="true"/>
<field name="sind" type="string" indexed="true" stored="false"/>
<field name="sindsto" type="string" indexed="true" stored="true"/>
<!-- test combinations of term vector settings -->
<field name="test_basictv" type="text" termVectors="true"/>
<field name="test_notv" type="text" termVectors="false"/>
<field name="test_postv" type="text" termVectors="true" termPositions="true"/>
<field name="test_offtv" type="text" termVectors="true" termOffsets="true"/>
<field name="test_posofftv" type="text" termVectors="true"
termPositions="true" termOffsets="true"/>
<!-- test highlit field settings -->
<field name="test_hlt" type="highlittext" indexed="true"/>
<field name="test_hlt_off" type="highlittext" indexed="true"/>
<!--
<field name="timestamp" type="date" indexed="true" stored="true" default="NOW"/>
-->
<field name="timestamp" type="date" indexed="true" stored="true"/>
<!-- Test a point field for distances -->
<field name="point" type="xy" indexed="true" stored="true" multiValued="false"/>
<field name="pointD" type="xyd" indexed="true" stored="true" multiValued="false"/>
<field name="point_hash" type="geohash" indexed="true" stored="true" multiValued="false"/>
<field name="store" type="location" indexed="true" stored="true"/>
<!-- to test uniq fields -->
<field name="uniq" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="uniq2" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="uniq3" type="string" indexed="true" stored="true"/>
<field name="nouniq" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="copyfield_source" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="payload" type="sortable_binary" indexed="false"
stored="true" multiValued="false" docValues="true"/>
<!-- for versioning -->
<field name="_version_" type="long" indexed="true" stored="true"/>
<!-- points to the root document of a block of nested documents -->
<field name="_root_" type="string" indexed="true" stored="true"/>
<field name="multi_int_with_docvals" type="tint" multiValued="true" docValues="true" indexed="false"/>
<dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="t_*" type="text" indexed="true" stored="true"/>
<dynamicField name="tv_*" type="text" indexed="true" stored="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<dynamicField name="tv_mv_*" type="text" indexed="true" stored="true" multiValued="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<dynamicField name="*_mfacet" type="string" indexed="true" stored="false" multiValued="true"/>
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_ll" type="long" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_u" type="unstemmed" indexed="true" stored="true"/>
<dynamicField name="*_ws" type="nametext" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<!-- some trie-coded dynamic fields for faster range queries -->
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="ignored_*" type="ignored" multiValued="true"/>
<dynamicField name="attr_*" type="text" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="random_*" type="random"/>
<defaultSearchField>text</defaultSearchField>
<uniqueKey>id</uniqueKey>
<!-- example of a custom similarity -->
<similarity class="solr.CustomSimilarityFactory">
<str name="echo">I am your default sim</str>
</similarity>
</schema>

View File

@ -0,0 +1,48 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- Minimal solrconfig.xml with /select, /admin and /update only -->
<config>
<dataDir>${solr.data.dir:}</dataDir>
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
<schemaFactory class="ClassicIndexSchemaFactory"/>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<updateHandler class="solr.DirectUpdateHandler2">
<commitWithin>
<softCommit>${solr.commitwithin.softcommit:true}</softCommit>
</commitWithin>
<updateLog></updateLog>
</updateHandler>
<requestHandler name="/select" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
<str name="indent">true</str>
<str name="df">text</str>
</lst>
</requestHandler>
</config>

View File

@ -0,0 +1,271 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!-- The Solr schema file, version 1.6 -->
<schema name="cloud-dynamic" version="1.6">
<!-- attribute "name" is the name of this schema and is only used for display purposes.
Applications should change this to reflect the nature of the search collection.
version="x.y" is Solr's version number for the schema syntax and semantics. It should
not normally be changed by applications.
1.0: multiValued attribute did not exist, all fields are multiValued by nature
1.1: multiValued attribute introduced, false by default
1.2: omitTermFreqAndPositions attribute introduced, true by default except for text fields.
1.3: removed optional field compress feature
1.4: default auto-phrase (QueryParser feature) to off
1.5: omitNorms defaults to true for primitive field types (int, float, boolean, string...)
1.6: useDocValuesAsStored defaults to true.
-->
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0"/>
<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0"/>
<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0"/>
<!-- Field type demonstrating an Analyzer failure -->
<fieldType name="failtype1" class="solr.TextField">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
catenateNumbers="0" catenateAll="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- Demonstrating ignoreCaseChange -->
<fieldType name="wdf_nocase" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="0" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="0"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="wdf_preserve" class="solr.TextField">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="0" preserveOriginal="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- HighlitText optimizes storage for (long) columns which will be highlit -->
<fieldType name="highlittext" class="solr.TextField"/>
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
<fieldType name="string" class="solr.StrField" sortMissingLast="true"/>
<!-- format for date is 1995-12-31T23:59:59.999Z and only the fractional
seconds part (.999) is optional.
-->
<fieldType name="date" class="solr.TrieDateField" sortMissingLast="true"/>
<fieldType name="tdate" class="solr.TrieDateField" sortMissingLast="true" precisionStep="6"/>
<fieldType name="text" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="unstemmed" class="solr.TextField" positionIncrementGap="100" autoGeneratePhraseQueries="true">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<!-- field type that doesn't generate phrases from unquoted multiple tokens per analysis unit -->
<fieldType name="text_np" class="solr.TextField" positionIncrementGap="100">
<analyzer type="index">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="1"
catenateNumbers="1" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
<analyzer type="query">
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" catenateWords="0"
catenateNumbers="0" catenateAll="0" splitOnCaseChange="1"/>
<filter class="solr.LowerCaseFilterFactory"/>
<filter class="solr.PorterStemFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="nametext" class="solr.TextField">
<analyzer class="org.apache.lucene.analysis.core.WhitespaceAnalyzer"/>
</fieldType>
<fieldType name="unstored" class="solr.StrField" indexed="true" stored="false"/>
<fieldType name="textgap" class="solr.TextField" multiValued="true" positionIncrementGap="100">
<analyzer>
<tokenizer class="solr.MockTokenizerFactory"/>
<filter class="solr.LowerCaseFilterFactory"/>
</analyzer>
</fieldType>
<fieldType name="uuid" class="solr.UUIDField"/>
<fieldType name="ignored" class="solr.StrField" indexed="false" stored="false"/>
<fieldType name="random" class="solr.RandomSortField" indexed="true"/>
<!-- Poly field -->
<fieldType name="xy" class="solr.PointType" dimension="2" subFieldType="double"/>
<fieldType name="xyd" class="solr.PointType" dimension="2" subFieldSuffix="*_d"/>
<fieldType name="geohash" class="solr.GeoHashField"/>
<fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d"/>
<!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
<fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate"/>
<field name="id" type="string" indexed="true" stored="true" multiValued="false" required="true"/>
<field name="signatureField" type="string" indexed="true" stored="false"/>
<field name="uuid" type="uuid" stored="true"/>
<field name="name" type="nametext" indexed="true" stored="true"/>
<field name="text" type="text" indexed="true" stored="false" multiValued="true"/>
<field name="subject" type="text" indexed="true" stored="true"/>
<field name="title" type="nametext" indexed="true" stored="true"/>
<field name="weight" type="float" indexed="true" stored="true"/>
<field name="bday" type="date" indexed="true" stored="true"/>
<field name="text_np" type="text_np" indexed="true" stored="false"/>
<!-- to test property inheritance and overriding -->
<field name="shouldbeunstored" type="unstored"/>
<field name="shouldbestored" type="unstored" stored="true"/>
<field name="shouldbeunindexed" type="unstored" indexed="false" stored="true"/>
<!-- test different combinations of indexed and stored -->
<field name="bind" type="boolean" indexed="true" stored="false"/>
<field name="bsto" type="boolean" indexed="false" stored="true"/>
<field name="bindsto" type="boolean" indexed="true" stored="true"/>
<field name="isto" type="int" indexed="false" stored="true"/>
<field name="iind" type="int" indexed="true" stored="false"/>
<field name="ssto" type="string" indexed="false" stored="true"/>
<field name="sind" type="string" indexed="true" stored="false"/>
<field name="sindsto" type="string" indexed="true" stored="true"/>
<!-- test combinations of term vector settings -->
<field name="test_basictv" type="text" termVectors="true"/>
<field name="test_notv" type="text" termVectors="false"/>
<field name="test_postv" type="text" termVectors="true" termPositions="true"/>
<field name="test_offtv" type="text" termVectors="true" termOffsets="true"/>
<field name="test_posofftv" type="text" termVectors="true"
termPositions="true" termOffsets="true"/>
<!-- test highlit field settings -->
<field name="test_hlt" type="highlittext" indexed="true"/>
<field name="test_hlt_off" type="highlittext" indexed="true"/>
<!--
<field name="timestamp" type="date" indexed="true" stored="true" default="NOW"/>
-->
<field name="timestamp" type="date" indexed="true" stored="true"/>
<!-- Test a point field for distances -->
<field name="point" type="xy" indexed="true" stored="true" multiValued="false"/>
<field name="pointD" type="xyd" indexed="true" stored="true" multiValued="false"/>
<field name="point_hash" type="geohash" indexed="true" stored="true" multiValued="false"/>
<field name="store" type="location" indexed="true" stored="true"/>
<!-- to test uniq fields -->
<field name="uniq" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="uniq2" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="uniq3" type="string" indexed="true" stored="true"/>
<field name="nouniq" type="string" indexed="true" stored="true" multiValued="true"/>
<field name="copyfield_source" type="string" indexed="true" stored="true" multiValued="true"/>
<!-- for versioning -->
<field name="_version_" type="long" indexed="true" stored="true"/>
<!-- points to the root document of a block of nested documents -->
<field name="_root_" type="string" indexed="true" stored="true"/>
<field name="multi_int_with_docvals" type="tint" multiValued="true" docValues="true" indexed="false"/>
<dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false"/>
<dynamicField name="*_sI" type="string" indexed="true" stored="false"/>
<dynamicField name="*_sS" type="string" indexed="false" stored="true"/>
<dynamicField name="t_*" type="text" indexed="true" stored="true"/>
<dynamicField name="tv_*" type="text" indexed="true" stored="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<dynamicField name="tv_mv_*" type="text" indexed="true" stored="true" multiValued="true"
termVectors="true" termPositions="true" termOffsets="true"/>
<dynamicField name="*_mfacet" type="string" indexed="true" stored="false" multiValued="true"/>
<dynamicField name="*_i" type="int" indexed="true" stored="true"/>
<dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_s1" type="string" indexed="true" stored="true" multiValued="false"/>
<dynamicField name="*_s" type="string" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_l" type="long" indexed="true" stored="true"/>
<dynamicField name="*_ll" type="long" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="*_t" type="text" indexed="true" stored="true"/>
<dynamicField name="*_u" type="unstemmed" indexed="true" stored="true"/>
<dynamicField name="*_ws" type="nametext" indexed="true" stored="true"/>
<dynamicField name="*_b" type="boolean" indexed="true" stored="true"/>
<dynamicField name="*_f" type="float" indexed="true" stored="true"/>
<dynamicField name="*_d" type="double" indexed="true" stored="true"/>
<dynamicField name="*_dt" type="date" indexed="true" stored="true"/>
<!-- some trie-coded dynamic fields for faster range queries -->
<dynamicField name="*_ti" type="tint" indexed="true" stored="true"/>
<dynamicField name="*_tl" type="tlong" indexed="true" stored="true"/>
<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true"/>
<dynamicField name="*_td" type="tdouble" indexed="true" stored="true"/>
<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true"/>
<dynamicField name="ignored_*" type="ignored" multiValued="true"/>
<dynamicField name="attr_*" type="text" indexed="true" stored="true" multiValued="true"/>
<dynamicField name="random_*" type="random"/>
<defaultSearchField>text</defaultSearchField>
<uniqueKey>id</uniqueKey>
<!-- example of a custom similarity -->
<similarity class="solr.CustomSimilarityFactory">
<str name="echo">I am your default sim</str>
</similarity>
</schema>

View File

@ -0,0 +1,47 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
A solrconfig.xml snippet containing indexConfig settings for randomized testing.
-->
<indexConfig>
<!-- this sys property is not set by SolrTestCaseJ4 because we ideally want to use
the RandomMergePolicy in all tests - but some tests expect very specific
Merge behavior, so those tests can set it as needed.
-->
<mergePolicy enable="${solr.tests.useMergePolicy:true}" class="${solr.tests.mergePolicy:org.apache.solr.util.RandomMergePolicy}" />
<mergePolicyFactory enable="${solr.tests.useMergePolicyFactory:true}" class="${solr.tests.mergePolicyFactory:org.apache.solr.util.RandomMergePolicyFactory}" />
<useCompoundFile>${useCompoundFile:false}</useCompoundFile>
<maxBufferedDocs>${solr.tests.maxBufferedDocs}</maxBufferedDocs>
<ramBufferSizeMB>${solr.tests.ramBufferSizeMB}</ramBufferSizeMB>
<mergeScheduler class="${solr.tests.mergeScheduler}" />
<writeLockTimeout>1000</writeLockTimeout>
<commitLockTimeout>10000</commitLockTimeout>
<!-- this sys property is not set by SolrTestCaseJ4 because almost all tests should
use the single process lockType for speed - but tests that explicitly need
to vary the lockType canset it as needed.
-->
<lockType>${solr.tests.lockType:single}</lockType>
</indexConfig>

View File

@ -0,0 +1,96 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<!--
See DocExpirationUpdateProcessorFactoryTest
and DistribDocExpirationUpdateProcessorTest
-->
<config>
<dataDir>${solr.data.dir:}</dataDir>
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
<schemaFactory class="ClassicIndexSchemaFactory"/>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<xi:include href="solrconfig.snippet.randomindexconfig.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
<updateHandler class="solr.DirectUpdateHandler2">
<updateLog>
<str name="dir">${solr.ulog.dir:}</str>
</updateLog>
</updateHandler>
<requestHandler name="/select" class="solr.SearchHandler" default="true" />
<updateRequestProcessorChain name="convert-ttl-defaults">
<processor class="solr.processor.DocExpirationUpdateProcessorFactory">
<str name="expirationFieldName">_expire_at_tdt</str>
</processor>
</updateRequestProcessorChain>
<updateRequestProcessorChain name="convert-ttl-field">
<processor class="solr.processor.DocExpirationUpdateProcessorFactory">
<str name="ttlFieldName">_ttl_field_</str>
<null name="ttlParamName"/>
<str name="expirationFieldName">_expire_at_tdt</str>
</processor>
<processor class="solr.IgnoreFieldUpdateProcessorFactory">
<str name="fieldName">_ttl_field_</str>
</processor>
</updateRequestProcessorChain>
<updateRequestProcessorChain name="convert-ttl-param">
<processor class="solr.processor.DocExpirationUpdateProcessorFactory">
<str name="ttlParamName">_ttl_param_</str>
<null name="ttlFieldName"/>
<str name="expirationFieldName">_expire_at_tdt</str>
</processor>
</updateRequestProcessorChain>
<updateRequestProcessorChain name="convert-ttl-field-with-param-default">
<processor class="solr.processor.DocExpirationUpdateProcessorFactory">
<str name="ttlFieldName">_ttl_field_</str>
<str name="ttlParamName">_ttl_param_</str>
<str name="expirationFieldName">_expire_at_tdt</str>
</processor>
<processor class="solr.IgnoreFieldUpdateProcessorFactory">
<str name="fieldName">_ttl_field_</str>
</processor>
</updateRequestProcessorChain>
<updateRequestProcessorChain name="scheduled-delete" default="true">
<!-- NOTE: this chain is default so we can see that
autoDeleteChainName defaults to the default chain for the SolrCore
-->
<processor class="solr.processor.DocExpirationUpdateProcessorFactory">
<!-- str name="autoDeleteChainName">scheduled-delete</str -->
<int name="autoDeletePeriodSeconds">3</int>
<str name="expirationFieldName">eXpField_tdt</str>
<str name="ttlFieldName">tTl_s</str>
<null name="ttlParamName"/>
</processor>
<processor class="solr.RecordingUpdateProcessorFactory" />
<processor class="solr.LogUpdateProcessorFactory" />
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
</config>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<schema name="minimal" version="1.1">
<fieldType name="string" class="solr.StrField"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<dynamicField name="*" type="string" indexed="true" stored="true"/>
<!-- for versioning -->
<field name="_version_" type="long" indexed="true" stored="true"/>
<field name="_root_" type="int" indexed="true" stored="true" multiValued="false" required="false"/>
<field name="id" type="string" indexed="true" stored="true"/>
<uniqueKey>id</uniqueKey>
</schema>

View File

@ -0,0 +1,117 @@
<?xml version="1.0" ?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<config>
<jmx />
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
<directoryFactory name="DirectoryFactory" class="${solr.directoryFactory:solr.RAMDirectoryFactory}">
<!-- used to keep RAM reqs down for HdfsDirectoryFactory -->
<bool name="solr.hdfs.blockcache.enabled">${solr.hdfs.blockcache.enabled:true}</bool>
<int name="solr.hdfs.blockcache.blocksperbank">${solr.hdfs.blockcache.blocksperbank:1024}</int>
<str name="solr.hdfs.home">${solr.hdfs.home:}</str>
<str name="solr.hdfs.confdir">${solr.hdfs.confdir:}</str>
<str name="solr.hdfs.blockcache.global">${solr.hdfs.blockcache.global:true}</str>
</directoryFactory>
<schemaFactory class="ClassicIndexSchemaFactory"/>
<dataDir>${solr.data.dir:}</dataDir>
<!-- an update processor the explicitly excludes distrib to test
clean errors when people attempt atomic updates w/o it
-->
<updateRequestProcessorChain name="nodistrib" >
<processor class="solr.NoOpDistributingUpdateProcessorFactory" />
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<requestHandler name="standard" class="solr.StandardRequestHandler">
</requestHandler>
<updateHandler class="solr.DirectUpdateHandler2">
<updateLog>
<str name="dir">${solr.ulog.dir:}</str>
</updateLog>
</updateHandler>
<updateRequestProcessorChain name="dedupe">
<processor class="org.apache.solr.update.processor.SignatureUpdateProcessorFactory">
<bool name="enabled">true</bool>
<bool name="overwriteDupes">true</bool>
<str name="fields">v_t,t_field</str>
<str name="signatureClass">org.apache.solr.update.processor.TextProfileSignature</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="stored_sig">
<!-- this chain is valid even though the signature field is not
indexed, because we are not asking for dups to be overwritten
-->
<processor class="org.apache.solr.update.processor.SignatureUpdateProcessorFactory">
<bool name="enabled">true</bool>
<str name="signatureField">non_indexed_signature_sS</str>
<bool name="overwriteDupes">false</bool>
<str name="fields">v_t,t_field</str>
<str name="signatureClass">org.apache.solr.update.processor.TextProfileSignature</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="distrib-dup-test-chain-explicit">
<!-- explicit test using processors before and after distrib -->
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_A_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.DistributedUpdateProcessorFactory" />
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_B_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<updateRequestProcessorChain name="distrib-dup-test-chain-implicit">
<!-- implicit test w/o distrib declared-->
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_A_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RegexReplaceProcessorFactory">
<str name="fieldName">regex_dup_B_s</str>
<str name="pattern">x</str>
<str name="replacement">x_x</str>
</processor>
<processor class="solr.RunUpdateProcessorFactory" />
</updateRequestProcessorChain>
<searchComponent name="delayingSearchComponent"
class="org.apache.solr.search.DelayingSearchComponent"/>
<requestHandler name="/select" class="solr.SearchHandler">
<arr name="first-components">
<str>delayingSearchComponent</str>
</arr>
</requestHandler>
</config>

View File

@ -40,123 +40,130 @@ import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
import org.apache.solr.client.solrj.impl.HttpClientUtil;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.update.AddUpdateCommand;
import org.apache.solr.util.TestInjection;
import org.junit.BeforeClass;
import org.junit.Test;
@SuppressSSL
public class ConnectionReuseTest extends AbstractFullDistribZkTestBase {
public class ConnectionReuseTest extends SolrCloudTestCase {
private AtomicInteger id = new AtomicInteger();
private static final String COLLECTION = "collection1";
@BeforeClass
public static void beforeConnectionReuseTest() {
if (true) TestInjection.failUpdateRequests = "true:100";
}
public ConnectionReuseTest() {
fixShardCount(1);
sliceCount = 1;
stress = 0;
public static void setupCluster() throws Exception {
TestInjection.failUpdateRequests = "true:100";
configureCluster(1)
.addConfig("config", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
CollectionAdminRequest.createCollection(COLLECTION, "config", 1, 1)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 1, 1));
}
public static String getSchemaFile() {
return "schema.xml";
}
public static String getSolrConfigFile() {
// use this because it has /update and is minimal
return "solrconfig-tlog.xml";
private SolrClient buildClient(HttpClient httpClient, URL url) {
switch (random().nextInt(3)) {
case 0:
// currently only testing with 1 thread
return getConcurrentUpdateSolrClient(url.toString() + "/" + COLLECTION, httpClient, 6, 1);
case 1:
return getHttpSolrClient(url.toString() + "/" + COLLECTION, httpClient);
case 2:
CloudSolrClient client = getCloudSolrClient(cluster.getZkServer().getZkAddress(), random().nextBoolean(), httpClient);
client.setParallelUpdates(random().nextBoolean());
client.setDefaultCollection(COLLECTION);
client.getLbClient().setConnectionTimeout(30000);
client.getLbClient().setSoTimeout(60000);
return client;
}
throw new RuntimeException("impossible");
}
@Test
public void test() throws Exception {
URL url = new URL(((HttpSolrClient) clients.get(0)).getBaseURL());
SolrClient client;
public void testConnectionReuse() throws Exception {
URL url = cluster.getJettySolrRunners().get(0).getBaseUrl();
HttpClient httpClient = HttpClientUtil.createClient(null);
int rndClient = random().nextInt(3);
if (rndClient == 0) {
client = getConcurrentUpdateSolrClient(url.toString(), httpClient, 6, 1); // currently only testing with 1 thread
} else if (rndClient == 1) {
client = getHttpSolrClient(url.toString(), httpClient);
} else if (rndClient == 2) {
client = getCloudSolrClient(zkServer.getZkAddress(), random().nextBoolean(), httpClient);
((CloudSolrClient) client).setParallelUpdates(random().nextBoolean());
((CloudSolrClient) client).setDefaultCollection(DEFAULT_COLLECTION);
((CloudSolrClient) client).getLbClient().setConnectionTimeout(30000);
((CloudSolrClient) client).getLbClient().setSoTimeout(60000);
} else {
throw new RuntimeException("impossible");
}
PoolingClientConnectionManager cm = (PoolingClientConnectionManager) httpClient.getConnectionManager();
HttpHost target = new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http");
HttpRoute route = new HttpRoute(target);
ClientConnectionRequest mConn = getClientConnectionRequest(httpClient, route);
ManagedClientConnection conn1 = getConn(mConn);
headerRequest(target, route, conn1);
conn1.releaseConnection();
cm.releaseConnection(conn1, -1, TimeUnit.MILLISECONDS);
int queueBreaks = 0;
int cnt1 = atLeast(3);
int cnt2 = atLeast(30);
for (int j = 0; j < cnt1; j++) {
for (int i = 0; i < cnt2; i++) {
try (SolrClient client = buildClient(httpClient, url)) {
HttpHost target = new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http");
HttpRoute route = new HttpRoute(target);
ClientConnectionRequest mConn = getClientConnectionRequest(httpClient, route);
ManagedClientConnection conn1 = getConn(mConn);
headerRequest(target, route, conn1);
conn1.releaseConnection();
cm.releaseConnection(conn1, -1, TimeUnit.MILLISECONDS);
int queueBreaks = 0;
int cnt1 = atLeast(3);
int cnt2 = atLeast(30);
for (int j = 0; j < cnt1; j++) {
boolean done = false;
AddUpdateCommand c = new AddUpdateCommand(null);
c.solrDoc = sdoc("id", id.incrementAndGet());
try {
client.add(c.solrDoc);
} catch (Exception e) {
e.printStackTrace();
}
if (!done && i > 0 && i < cnt2 - 1 && client instanceof ConcurrentUpdateSolrClient && random().nextInt(10) > 8) {
queueBreaks++;
done = true;
Thread.sleep(350); // wait past streaming client poll time of 250ms
for (int i = 0; i < cnt2; i++) {
AddUpdateCommand c = new AddUpdateCommand(null);
c.solrDoc = sdoc("id", id.incrementAndGet());
try {
client.add(c.solrDoc);
} catch (Exception e) {
e.printStackTrace();
}
if (!done && i > 0 && i < cnt2 - 1 && client instanceof ConcurrentUpdateSolrClient
&& random().nextInt(10) > 8) {
queueBreaks++;
done = true;
Thread.sleep(350); // wait past streaming client poll time of 250ms
}
}
}
if (client instanceof ConcurrentUpdateSolrClient) {
((ConcurrentUpdateSolrClient) client).blockUntilFinished();
}
route = new HttpRoute(new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http"));
mConn = cm.requestConnection(route, null);
ManagedClientConnection conn2 = getConn(mConn);
HttpConnectionMetrics metrics = conn2.getMetrics();
headerRequest(target, route, conn2);
conn2.releaseConnection();
cm.releaseConnection(conn2, -1, TimeUnit.MILLISECONDS);
assertNotNull("No connection metrics found - is the connection getting aborted? server closing the connection? " + client.getClass().getSimpleName(), metrics);
// we try and make sure the connection we get has handled all of the requests in this test
if (client instanceof ConcurrentUpdateSolrClient) {
// we can't fully control queue polling breaking up requests - allow a bit of leeway
int exp = cnt1 + queueBreaks + 2;
assertTrue(
"We expected all communication via streaming client to use one connection! expected=" + exp + " got="
+ metrics.getRequestCount(),
Math.max(exp, metrics.getRequestCount()) - Math.min(exp, metrics.getRequestCount()) < 3);
} else {
assertTrue("We expected all communication to use one connection! " + client.getClass().getSimpleName(),
cnt1 * cnt2 + 2 <= metrics.getRequestCount());
}
}
finally {
HttpClientUtil.close(httpClient);
}
route = new HttpRoute(new HttpHost(url.getHost(), url.getPort(), isSSLMode() ? "https" : "http"));
mConn = cm.requestConnection(route, null);
ManagedClientConnection conn2 = getConn(mConn);
HttpConnectionMetrics metrics = conn2.getMetrics();
headerRequest(target, route, conn2);
conn2.releaseConnection();
cm.releaseConnection(conn2, -1, TimeUnit.MILLISECONDS);
assertNotNull("No connection metrics found - is the connection getting aborted? server closing the connection? " + client.getClass().getSimpleName(), metrics);
// we try and make sure the connection we get has handled all of the requests in this test
if (client instanceof ConcurrentUpdateSolrClient) {
// we can't fully control queue polling breaking up requests - allow a bit of leeway
int exp = cnt1 + queueBreaks + 2;
assertTrue(
"We expected all communication via streaming client to use one connection! expected=" + exp + " got="
+ metrics.getRequestCount(),
Math.max(exp, metrics.getRequestCount()) - Math.min(exp, metrics.getRequestCount()) < 3);
} else {
assertTrue("We expected all communication to use one connection! " + client.getClass().getSimpleName(),
cnt1 * cnt2 + 2 <= metrics.getRequestCount());
}
client.close();
}
public ManagedClientConnection getConn(ClientConnectionRequest mConn)

View File

@ -17,55 +17,45 @@
package org.apache.solr.cloud;
import java.io.File;
import java.io.IOException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.SnapShooter;
import org.junit.BeforeClass;
import org.junit.Test;
@Slow
public class CleanupOldIndexTest extends AbstractFullDistribZkTestBase {
@LuceneTestCase.Slow
public class CleanupOldIndexTest extends SolrCloudTestCase {
private StoppableIndexingThread indexThread;
public CleanupOldIndexTest() {
super();
sliceCount = 1;
fixShardCount(2);
schemaString = "schema15.xml";
}
public static String[] fieldNames = new String[]{"f_i", "f_f", "f_d", "f_l", "f_dt"};
public static RandVal[] randVals = new RandVal[]{rint, rfloat, rdouble, rlong, rdate};
protected String[] getFieldNames() {
return fieldNames;
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(2)
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
.configure();
}
protected RandVal[] getRandValues() {
return randVals;
}
private static final String COLLECTION = "oldindextest";
@Test
public void test() throws Exception {
handle.clear();
handle.put("timestamp", SKIPVAL);
CollectionAdminRequest.createCollection(COLLECTION, "conf1", 1, 2)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
cluster.getSolrClient().setDefaultCollection(COLLECTION); // TODO make this configurable on StoppableIndexingThread
int[] maxDocList = new int[] {300, 700, 1200};
int maxDoc = maxDocList[random().nextInt(maxDocList.length - 1)];
indexThread = new StoppableIndexingThread(controlClient, cloudClient, "1", true, maxDoc, 1, true);
StoppableIndexingThread indexThread = new StoppableIndexingThread(null, cluster.getSolrClient(), "1", true, maxDoc, 1, true);
indexThread.start();
// give some time to index...
@ -73,10 +63,10 @@ public class CleanupOldIndexTest extends AbstractFullDistribZkTestBase {
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// create some "old" index directories
JettySolrRunner jetty = chaosMonkey.getShard("shard1", 1);
JettySolrRunner jetty = cluster.getRandomJetty(random());
CoreContainer coreContainer = jetty.getCoreContainer();
File dataDir = null;
try (SolrCore solrCore = coreContainer.getCore("collection1")) {
try (SolrCore solrCore = coreContainer.getCore(coreContainer.getCoreDescriptors().get(0).getName())) {
dataDir = new File(solrCore.getDataDir());
}
assertTrue(dataDir.isDirectory());
@ -94,66 +84,27 @@ public class CleanupOldIndexTest extends AbstractFullDistribZkTestBase {
assertTrue(oldIndexDir2.isDirectory());
// bring shard replica down
JettySolrRunner replica = chaosMonkey.stopShard("shard1", 1).jetty;
jetty.stop();
// wait a moment - lets allow some docs to be indexed so replication time is non 0
Thread.sleep(waitTimes[random().nextInt(waitTimes.length - 1)]);
// bring shard replica up
replica.start();
jetty.start();
// make sure replication can start
Thread.sleep(3000);
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
// stop indexing threads
indexThread.safeStop();
indexThread.join();
Thread.sleep(1000);
waitForThingsToLevelOut(120);
waitForRecoveriesToFinish(DEFAULT_COLLECTION, zkStateReader, false, true);
// test that leader and replica have same doc count
String fail = checkShardConsistency("shard1", false, false);
if (fail != null)
fail(fail);
SolrQuery query = new SolrQuery("*:*");
query.setParam("distrib", "false");
long client1Docs = shardToJetty.get("shard1").get(0).client.solrClient.query(query).getResults().getNumFound();
long client2Docs = shardToJetty.get("shard1").get(1).client.solrClient.query(query).getResults().getNumFound();
assertTrue(client1Docs > 0);
assertEquals(client1Docs, client2Docs);
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 1, 2));
assertTrue(!oldIndexDir1.isDirectory());
assertTrue(!oldIndexDir2.isDirectory());
}
@Override
protected void indexDoc(SolrInputDocument doc) throws IOException, SolrServerException {
controlClient.add(doc);
cloudClient.add(doc);
}
@Override
public void distribTearDown() throws Exception {
// make sure threads have been stopped...
indexThread.safeStop();
indexThread.join();
super.distribTearDown();
}
// skip the randoms - they can deadlock...
@Override
protected void indexr(Object... fields) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
addFields(doc, fields);
addFields(doc, "rnd_b", true);
indexDoc(doc);
}
}

View File

@ -16,55 +16,62 @@
*/
package org.apache.solr.cloud;
import org.apache.lucene.util.LuceneTestCase.Slow;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.response.SolrQueryResponse;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Distributed test for {@link org.apache.lucene.index.ExitableDirectoryReader}
*/
@Slow
public class CloudExitableDirectoryReaderTest extends AbstractFullDistribZkTestBase {
public class CloudExitableDirectoryReaderTest extends SolrCloudTestCase {
private static final int NUM_DOCS_PER_TYPE = 20;
private static final String sleep = "2";
public CloudExitableDirectoryReaderTest() {
configString = "solrconfig-tlog-with-delayingcomponent.xml";
schemaString = "schema.xml";
}
@Override
protected String getCloudSolrConfig() {
return configString;
private static final String COLLECTION = "exitable";
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(2)
.addConfig("conf", TEST_PATH().resolve("configsets").resolve("exitable-directory").resolve("conf"))
.configure();
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
}
@Test
public void test() throws Exception {
handle.clear();
handle.put("timestamp", SKIPVAL);
waitForRecoveriesToFinish(false);
indexDocs();
doTimeoutTests();
}
public void indexDocs() throws Exception {
int counter = 1;
UpdateRequest req = new UpdateRequest();
for(; (counter % NUM_DOCS_PER_TYPE) != 0; counter++ )
indexDoc(sdoc("id", Integer.toString(counter), "name", "a" + counter));
req.add(sdoc("id", Integer.toString(counter), "name", "a" + counter));
counter++;
for(; (counter % NUM_DOCS_PER_TYPE) != 0; counter++ )
indexDoc(sdoc("id", Integer.toString(counter), "name", "b" + counter));
req.add(sdoc("id", Integer.toString(counter), "name", "b" + counter));
counter++;
for(; counter % NUM_DOCS_PER_TYPE != 0; counter++ )
indexDoc(sdoc("id", Integer.toString(counter), "name", "dummy term doc" + counter));
req.add(sdoc("id", Integer.toString(counter), "name", "dummy term doc" + counter));
commit();
req.commit(cluster.getSolrClient(), COLLECTION);
}
public void doTimeoutTests() throws Exception {
@ -96,13 +103,13 @@ public class CloudExitableDirectoryReaderTest extends AbstractFullDistribZkTestB
* execute a request, verify that we get an expected error
*/
public void assertPartialResults(ModifiableSolrParams p) throws Exception {
QueryResponse rsp = queryServer(p);
QueryResponse rsp = cluster.getSolrClient().query(COLLECTION, p);
assertEquals(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY+" were expected",
true, rsp.getHeader().get(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY));
}
public void assertSuccess(ModifiableSolrParams p) throws Exception {
QueryResponse response = queryServer(p);
QueryResponse response = cluster.getSolrClient().query(COLLECTION, p);
assertEquals("Wrong #docs in response", NUM_DOCS_PER_TYPE - 1, response.getResults().getNumFound());
}
}

View File

@ -16,41 +16,34 @@
*/
package org.apache.solr.cloud;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
import org.apache.solr.common.SolrException;
import org.junit.BeforeClass;
import org.junit.Test;
@LuceneTestCase.Slow
public class ConfigSetsAPITest extends AbstractFullDistribZkTestBase {
public ConfigSetsAPITest() {
super();
sliceCount = 1;
public class ConfigSetsAPITest extends SolrCloudTestCase {
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(1)
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.configure();
}
@Test
public void testConfigSetDeleteWhenInUse() throws Exception {
CollectionAdminRequest.Create create = new CollectionAdminRequest.Create();
create.setConfigName("conf1");
create.setCollectionName("test_configset_delete");
create.setNumShards(1);
create.process(cloudClient);
waitForCollection(cloudClient.getZkStateReader(), "test_configset_delete", 1);
CollectionAdminRequest.createCollection("test_configset_delete", "conf1", 1, 1)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
// TODO - check exception response!
ConfigSetAdminRequest.Delete deleteConfigRequest = new ConfigSetAdminRequest.Delete();
deleteConfigRequest.setConfigSetName("conf1");
try {
deleteConfigRequest.process(cloudClient);
fail("The config deletion should cause an exception as it's currently being used by a collection.");
} catch (SolrException e) {
// Do nothing
}
expectThrows(SolrException.class, () -> {
deleteConfigRequest.process(cluster.getSolrClient());
});
// Clean up the collection
CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete();
deleteCollectionRequest.setCollectionName("test_configset_delete");
deleteCollectionRequest.process(cloudClient);
}
}

View File

@ -16,62 +16,62 @@
*/
package org.apache.solr.cloud;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.handler.ReplicationHandler;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.update.processor.DocExpirationUpdateProcessorFactory; // jdoc
import org.apache.solr.update.processor.DocExpirationUpdateProcessorFactoryTest;
import org.apache.solr.update.processor.DocExpirationUpdateProcessorFactory;
import org.apache.solr.util.TimeOut;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.HashSet;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;
/** Test of {@link DocExpirationUpdateProcessorFactory} in a cloud setup */
@Slow // Has to do some sleeping to wait for a future expiration
public class DistribDocExpirationUpdateProcessorTest extends AbstractFullDistribZkTestBase {
public class DistribDocExpirationUpdateProcessorTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public DistribDocExpirationUpdateProcessorTest() {
configString = DocExpirationUpdateProcessorFactoryTest.CONFIG_XML;
schemaString = DocExpirationUpdateProcessorFactoryTest.SCHEMA_XML;
}
private static final String COLLECTION = "expiry";
@Override
protected String getCloudSolrConfig() {
return configString;
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(2)
.addConfig("conf", TEST_PATH().resolve("configsets").resolve("doc-expiry").resolve("conf"))
.configure();
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
}
@Test
public void test() throws Exception {
assertTrue("only one shard?!?!?!", 1 < shardToJetty.keySet().size());
log.info("number of shards: {}", shardToJetty.keySet().size());
handle.clear();
handle.put("maxScore", SKIPVAL);
handle.put("timestamp", SKIPVAL);
// some docs with no expiration
UpdateRequest req1 = new UpdateRequest();
for (int i = 1; i <= 100; i++) {
indexDoc(sdoc("id", i));
req1.add(sdoc("id", i));
}
commit();
waitForRecoveriesToFinish(false, 45);
req1.commit(cluster.getSolrClient(), COLLECTION);
// this doc better not already exist
waitForNoResults(0, params("q","id:999","rows","0","_trace","sanity_check"));
@ -81,10 +81,8 @@ public class DistribDocExpirationUpdateProcessorTest extends AbstractFullDistrib
final Map<String,Long> initIndexVersions = getIndexVersionOfAllReplicas();
assertTrue("WTF? no versions?", 0 < initIndexVersions.size());
// add a doc with a short TTL
indexDoc(sdoc("id", "999", "tTl_s","+30SECONDS"));
commit();
new UpdateRequest().add(sdoc("id", "999", "tTl_s","+30SECONDS")).commit(cluster.getSolrClient(), COLLECTION);
// wait for one doc to be deleted
waitForNoResults(180, params("q","id:999","rows","0","_trace","did_it_expire_yet"));
@ -99,20 +97,19 @@ public class DistribDocExpirationUpdateProcessorTest extends AbstractFullDistrib
final Set<String> shardsThatChange = new HashSet<String>();
int coresCompared = 0;
for (String shard : shardToJetty.keySet()) {
for (CloudJettyRunner replicaRunner : shardToJetty.get(shard)) {
coresCompared++;
DocCollection collectionState = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
for (Replica replica : collectionState.getReplicas()) {
coresCompared++;
String name = replica.getName();
String core = replica.getCoreName();
Long initVersion = initIndexVersions.get(core);
Long finalVersion = finalIndexVersions.get(core);
assertNotNull(name + ": no init version for core: " + core, initVersion);
assertNotNull(name + ": no final version for core: " + core, finalVersion);
String core = replicaRunner.coreNodeName;
Long initVersion = initIndexVersions.get(core);
Long finalVersion = finalIndexVersions.get(core);
assertNotNull(shard + ": no init version for core: " + core, initVersion);
assertNotNull(shard + ": no final version for core: " + core, finalVersion);
if (!initVersion.equals(finalVersion)) {
nodesThatChange.add(core + "("+shard+")");
shardsThatChange.add(shard);
}
if (!initVersion.equals(finalVersion)) {
nodesThatChange.add(core + "("+name+")");
shardsThatChange.add(name);
}
}
@ -136,32 +133,36 @@ public class DistribDocExpirationUpdateProcessorTest extends AbstractFullDistrib
private Map<String,Long> getIndexVersionOfAllReplicas() throws IOException, SolrServerException {
Map<String,Long> results = new HashMap<String,Long>();
for (List<CloudJettyRunner> listOfReplicas : shardToJetty.values()) {
for (CloudJettyRunner replicaRunner : listOfReplicas) {
DocCollection collectionState = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
for (Replica replica : collectionState.getReplicas()) {
String coreName = replica.getCoreName();
try (HttpSolrClient client = getHttpSolrClient(replica.getCoreUrl())) {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("command","indexversion");
params.set("_trace","getIndexVersion");
params.set("qt",ReplicationHandler.PATH);
params.set("command", "indexversion");
params.set("_trace", "getIndexVersion");
params.set("qt", ReplicationHandler.PATH);
QueryRequest req = new QueryRequest(params);
NamedList<Object> res = replicaRunner.client.solrClient.request(req);
assertNotNull("null response from server: " + replicaRunner.coreNodeName, res);
NamedList<Object> res = client.request(req);
assertNotNull("null response from server: " + coreName, res);
Object version = res.get("indexversion");
assertNotNull("null version from server: " + replicaRunner.coreNodeName, version);
assertTrue("version isn't a long: "+replicaRunner.coreNodeName,
version instanceof Long);
results.put(replicaRunner.coreNodeName, (Long)version);
assertNotNull("null version from server: " + coreName, version);
assertTrue("version isn't a long: " + coreName, version instanceof Long);
results.put(coreName, (Long) version);
long numDocs = replicaRunner.client.solrClient.query
(params("q","*:*","distrib","false","rows","0","_trace","counting_docs"))
.getResults().getNumFound();
log.info("core=" + replicaRunner.coreNodeName + "; ver=" + version +
"; numDocs=" + numDocs);
long numDocs = client.query(params("q", "*:*", "distrib", "false", "rows", "0", "_trace", "counting_docs"))
.getResults().getNumFound();
log.info("core=" + coreName + "; ver=" + version +
"; numDocs=" + numDocs);
}
}
return results;
}
@ -175,10 +176,10 @@ public class DistribDocExpirationUpdateProcessorTest extends AbstractFullDistrib
throws SolrServerException, InterruptedException, IOException {
final TimeOut timeout = new TimeOut(maxTimeLimitSeconds, TimeUnit.SECONDS);
long numFound = cloudClient.query(params).getResults().getNumFound();
long numFound = cluster.getSolrClient().query(COLLECTION, params).getResults().getNumFound();
while (0L < numFound && ! timeout.hasTimedOut()) {
Thread.sleep(Math.max(1, Math.min(5000, timeout.timeLeft(TimeUnit.MILLISECONDS))));
numFound = cloudClient.query(params).getResults().getNumFound();
numFound = cluster.getSolrClient().query(COLLECTION, params).getResults().getNumFound();
}
assertEquals("Give up waiting for no results: " + params,
0L, numFound);

View File

@ -26,6 +26,7 @@ import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.JSONTestUtil;
@ -38,65 +39,67 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.CoreAdminResponse;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.update.processor.DistributedUpdateProcessor;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
import static org.apache.solr.update.processor.DistributedUpdateProcessor.DISTRIB_FROM;
import static org.apache.solr.update.processor.DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM;
@Slow
@SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
public class DistributedVersionInfoTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
protected static final int maxWaitSecsToSeeAllActive = 30;
@Test
public void test() throws Exception {
waitForThingsToLevelOut(30000);
log.info("DistributedVersionInfoTest RUNNING");
testReplicaVersionHandling();
log.info("DistributedVersionInfoTest succeeded ... shutting down now!");
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(3)
.addConfig("conf", configset("cloud-minimal"))
.configure();
}
protected void testReplicaVersionHandling() throws Exception {
final String testCollectionName = "c8n_vers_1x3";
String shardId = "shard1";
int rf = 3;
createCollectionRetry(testCollectionName, 1, rf, 1);
cloudClient.setDefaultCollection(testCollectionName);
private static final String COLLECTION = "c8n_vers_1x3";
final Replica leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, shardId);
List<Replica> notLeaders =
ensureAllReplicasAreActive(testCollectionName, shardId, 1, rf, maxWaitSecsToSeeAllActive);
@Test
public void testReplicaVersionHandling() throws Exception {
final String shardId = "shard1";
CollectionAdminRequest.createCollection(COLLECTION, "conf", 1, 3)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
final ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
stateReader.waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 1, 3));
final Replica leader = stateReader.getLeaderRetry(COLLECTION, shardId);
// start by reloading the empty collection so we try to calculate the max from an empty index
reloadCollection(leader, testCollectionName);
notLeaders =
ensureAllReplicasAreActive(testCollectionName, shardId, 1, rf, maxWaitSecsToSeeAllActive);
reloadCollection(leader, COLLECTION);
sendDoc(1);
cloudClient.commit();
cluster.getSolrClient().commit(COLLECTION);
// verify doc is on the leader and replica
assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 1, null);
final List<Replica> notLeaders = stateReader.getClusterState().getCollection(COLLECTION).getReplicas()
.stream()
.filter(r -> r.getCoreName().equals(leader.getCoreName()) == false)
.collect(Collectors.toList());
assertDocsExistInAllReplicas(leader, notLeaders, COLLECTION, 1, 1, null);
// get max version from the leader and replica
Replica replica = notLeaders.get(0);
@ -108,7 +111,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
try (SolrClient client = getHttpSolrClient(replica.getCoreUrl())) {
String docId = String.valueOf(1);
SolrInputDocument doc = new SolrInputDocument();
doc.setField(id, docId);
doc.setField("id", docId);
doc.setField("_version_", maxOnReplica - 1); // bad version!!!
// simulate what the leader does when sending a doc to a replica
@ -129,7 +132,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
assertEquals("older version should have been thrown away", maxOnReplica, docVersion);
}
reloadCollection(leader, testCollectionName);
reloadCollection(leader, COLLECTION);
maxOnLeader = getMaxVersionFromIndex(leader);
maxOnReplica = getMaxVersionFromIndex(replica);
@ -175,7 +178,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
for (int i=0; i < 3; i++) {
try {
reloadCollection(leader, testCollectionName);
reloadCollection(leader, COLLECTION);
} catch (Exception e) {}
try {
@ -215,7 +218,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
for (int i=0; i < 20; i++) {
try {
cloudClient.commit();
cluster.getSolrClient().commit(COLLECTION);
} catch (Exception e) {}
try {
@ -236,7 +239,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
committerThread.join();
deleteThread.join();
cloudClient.commit();
cluster.getSolrClient().commit(COLLECTION);
log.info("Total of "+deletedDocs.size()+" docs deleted");
@ -244,23 +247,14 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version before reload", maxOnLeader, maxOnReplica);
reloadCollection(leader, testCollectionName);
reloadCollection(leader, COLLECTION);
maxOnLeader = getMaxVersionFromIndex(leader);
maxOnReplica = getMaxVersionFromIndex(replica);
assertEquals("leader and replica should have same max version after reload", maxOnLeader, maxOnReplica);
assertDocsExistInAllReplicas(notLeaders, testCollectionName, 1, 1000, deletedDocs);
assertDocsExistInAllReplicas(leader, notLeaders, COLLECTION, 1, 1000, deletedDocs);
// try to clean up
try {
CollectionAdminRequest.Delete req = new CollectionAdminRequest.Delete()
.setCollectionName(testCollectionName);
req.process(cloudClient);
} catch (Exception e) {
// don't fail the test
log.warn("Could not delete collection {} after test completed", testCollectionName);
}
}
protected long getMaxVersionFromIndex(Replica replica) throws IOException, SolrServerException {
@ -291,15 +285,12 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
return vers.longValue();
}
protected void assertDocsExistInAllReplicas(List<Replica> notLeaders,
protected void assertDocsExistInAllReplicas(Replica leader, List<Replica> notLeaders,
String testCollectionName,
int firstDocId,
int lastDocId,
Set<Integer> deletedDocs)
throws Exception
{
Replica leader =
cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
throws Exception {
HttpSolrClient leaderSolr = getHttpSolrClient(leader);
List<HttpSolrClient> replicas = new ArrayList<HttpSolrClient>(notLeaders.size());
for (Replica r : notLeaders)
@ -332,9 +323,9 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
protected void sendDoc(int docId) throws Exception {
SolrInputDocument doc = new SolrInputDocument();
doc.addField(id, String.valueOf(docId));
doc.addField("id", String.valueOf(docId));
doc.addField("a_t", "hello" + docId);
sendDocsWithRetry(Collections.singletonList(doc), 2, 3, 100);
AbstractFullDistribZkTestBase.sendDocsWithRetry(cluster.getSolrClient(), COLLECTION, Collections.singletonList(doc), 2, 3, 100);
}
/**
@ -346,7 +337,7 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
QueryRequest qr = new QueryRequest(params("qt", "/get", "id", docId, "distrib", "false", "fl", "id,_version_"));
NamedList rsp = solr.request(qr);
SolrDocument doc = (SolrDocument)rsp.get("doc");
String match = JSONTestUtil.matchObj("/id", doc, new Integer(docId));
String match = JSONTestUtil.matchObj("/id", doc, docId);
assertTrue("Doc with id=" + docId + " not found in " + solr.getBaseURL() +
" due to: " + match + "; rsp=" + rsp, match == null);
@ -370,15 +361,8 @@ public class DistributedVersionInfoTest extends AbstractFullDistribZkTestBase {
// send reload command for the collection
log.info("Sending RELOAD command for " + testCollectionName);
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.RELOAD.toString());
params.set("name", testCollectionName);
QueryRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
log.info("Sending reload command to " + testCollectionName);
client.request(request);
CollectionAdminRequest.reloadCollection(testCollectionName)
.process(client);
Thread.sleep(2000); // reload can take a short while
// verify reload is done, waiting up to 30 seconds for slow test environments

View File

@ -16,19 +16,27 @@
*/
package org.apache.solr.handler.component;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.junit.Test;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.apache.solr.BaseDistributedSearchTestCase;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ShardParams;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.StrUtils;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test for QueryComponent's distributed querying optimization.
@ -38,103 +46,122 @@ import java.util.Set;
*
* @see QueryComponent
*/
public class DistributedQueryComponentOptimizationTest extends AbstractFullDistribZkTestBase {
public class DistributedQueryComponentOptimizationTest extends SolrCloudTestCase {
private static final String COLLECTION = "optimize";
private static final String SHARD1 = "shard1";
private static final String SHARD2 = "shard2";
private static final int sliceCount = 3;
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(3)
.withSolrXml(TEST_PATH().resolve("solr-trackingshardhandler.xml"))
.addConfig("conf", configset("cloud-dynamic"))
.configure();
CollectionAdminRequest.createCollection(COLLECTION, "conf", 3, 1)
.setMaxShardsPerNode(1)
.processAndWait(cluster.getSolrClient(), DEFAULT_TIMEOUT);
cluster.getSolrClient().waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, sliceCount, 1));
new UpdateRequest()
.add(sdoc(id, "1", "text", "a", "test_sS", "21", "payload", ByteBuffer.wrap(new byte[]{0x12, 0x62, 0x15})))
.add(sdoc(id, "2", "text", "b", "test_sS", "22", "payload", ByteBuffer.wrap(new byte[]{0x25, 0x21, 0x16}))) // 5
.add(sdoc(id, "3", "text", "a", "test_sS", "23", "payload", ByteBuffer.wrap(new byte[]{0x35, 0x32, 0x58}))) // 8
.add(sdoc(id, "4", "text", "b", "test_sS", "24", "payload", ByteBuffer.wrap(new byte[]{0x25, 0x21, 0x15}))) // 4
.add(sdoc(id, "5", "text", "a", "test_sS", "25", "payload", ByteBuffer.wrap(new byte[]{0x35, 0x35, 0x10, 0x00}))) // 9
.add(sdoc(id, "6", "text", "c", "test_sS", "26", "payload", ByteBuffer.wrap(new byte[]{0x1a, 0x2b, 0x3c, 0x00, 0x00, 0x03}))) // 3
.add(sdoc(id, "7", "text", "c", "test_sS", "27", "payload", ByteBuffer.wrap(new byte[]{0x00, 0x3c, 0x73}))) // 1
.add(sdoc(id, "8", "text", "c", "test_sS", "28", "payload", ByteBuffer.wrap(new byte[]{0x59, 0x2d, 0x4d}))) // 11
.add(sdoc(id, "9", "text", "a", "test_sS", "29", "payload", ByteBuffer.wrap(new byte[]{0x39, 0x79, 0x7a}))) // 10
.add(sdoc(id, "10", "text", "b", "test_sS", "30", "payload", ByteBuffer.wrap(new byte[]{0x31, 0x39, 0x7c}))) // 6
.add(sdoc(id, "11", "text", "d", "test_sS", "31", "payload", ByteBuffer.wrap(new byte[]{(byte) 0xff, (byte) 0xaf, (byte) 0x9c}))) // 13
.add(sdoc(id, "12", "text", "d", "test_sS", "32", "payload", ByteBuffer.wrap(new byte[]{0x34, (byte) 0xdd, 0x4d}))) // 7
.add(sdoc(id, "13", "text", "d", "test_sS", "33", "payload", ByteBuffer.wrap(new byte[]{(byte) 0x80, 0x11, 0x33}))) // 12
// SOLR-6545, wild card field list
.add(sdoc(id, "19", "text", "d", "cat_a_sS", "1", "dynamic_s", "2", "payload", ByteBuffer.wrap(new byte[]{(byte) 0x80, 0x11, 0x34})))
.commit(cluster.getSolrClient(), COLLECTION);
public DistributedQueryComponentOptimizationTest() {
stress = 0;
schemaString = "schema-custom-field.xml";
}
@Override
protected String getSolrXml() {
return "solr-trackingshardhandler.xml";
private static final String id = "id";
@Test
public void testBasics() throws Exception {
QueryResponse rsp;
rsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20"));
assertFieldValues(rsp.getResults(), id, "7", "1", "6", "4", "2", "10", "12", "3", "5", "9", "8", "13", "19", "11");
assertFieldValues(rsp.getResults(), "test_sS", "27", "21", "26", "24", "22", "30", "32", "23", "25", "29", "28", "33", null, "31");
rsp = cluster.getSolrClient().query(COLLECTION, new SolrQuery("q", "*:*", "fl", "id,score", "sort", "payload desc", "rows", "20"));
assertFieldValues(rsp.getResults(), id, "11", "19", "13", "8", "9", "5", "3", "12", "10", "2", "4", "6", "1", "7");
}
@Test
@ShardsFixed(num = 3)
public void test() throws Exception {
waitForThingsToLevelOut(30);
del("*:*");
public void testFieldList() throws Exception {
index(id, "1", "text", "a", "test_sS", "21", "payload", ByteBuffer.wrap(new byte[]{0x12, 0x62, 0x15}), // 2
// quick check to prove "*" dynamicField hasn't been broken by somebody mucking with schema
"asdfasdf_field_should_match_catchall_dynamic_field_adsfasdf", "value");
index(id, "2", "text", "b", "test_sS", "22", "payload", ByteBuffer.wrap(new byte[]{0x25, 0x21, 0x16})); // 5
index(id, "3", "text", "a", "test_sS", "23", "payload", ByteBuffer.wrap(new byte[]{0x35, 0x32, 0x58})); // 8
index(id, "4", "text", "b", "test_sS", "24", "payload", ByteBuffer.wrap(new byte[]{0x25, 0x21, 0x15})); // 4
index(id, "5", "text", "a", "test_sS", "25", "payload", ByteBuffer.wrap(new byte[]{0x35, 0x35, 0x10, 0x00})); // 9
index(id, "6", "text", "c", "test_sS", "26", "payload", ByteBuffer.wrap(new byte[]{0x1a, 0x2b, 0x3c, 0x00, 0x00, 0x03})); // 3
index(id, "7", "text", "c", "test_sS", "27", "payload", ByteBuffer.wrap(new byte[]{0x00, 0x3c, 0x73})); // 1
index(id, "8", "text", "c", "test_sS", "28", "payload", ByteBuffer.wrap(new byte[]{0x59, 0x2d, 0x4d})); // 11
index(id, "9", "text", "a", "test_sS", "29", "payload", ByteBuffer.wrap(new byte[]{0x39, 0x79, 0x7a})); // 10
index(id, "10", "text", "b", "test_sS", "30", "payload", ByteBuffer.wrap(new byte[]{0x31, 0x39, 0x7c})); // 6
index(id, "11", "text", "d", "test_sS", "31", "payload", ByteBuffer.wrap(new byte[]{(byte) 0xff, (byte) 0xaf, (byte) 0x9c})); // 13
index(id, "12", "text", "d", "test_sS", "32", "payload", ByteBuffer.wrap(new byte[]{0x34, (byte) 0xdd, 0x4d})); // 7
index(id, "13", "text", "d", "test_sS", "33", "payload", ByteBuffer.wrap(new byte[]{(byte) 0x80, 0x11, 0x33})); // 12
commit();
QueryResponse rsp;
rsp = query("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
assertFieldValues(rsp.getResults(), "test_sS", "27", "21", "26", "24", "22", "30", "32", "23", "25", "29", "28", "33", "31");
rsp = query("q", "*:*", "fl", "id,score", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 11, 13, 8, 9, 5, 3, 12, 10, 2, 4, 6, 1, 7);
// works with just fl=id as well
rsp = query("q", "*:*", "fl", "id", "sort", "payload desc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 11, 13, 8, 9, 5, 3, 12, 10, 2, 4, 6, 1, 7);
QueryResponse rsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "id", "sort", "payload desc", "rows", "20"));
assertFieldValues(rsp.getResults(), id, "11", "19", "13", "8", "9", "5", "3", "12", "10", "2", "4", "6", "1", "7");
rsp = query("q", "*:*", "fl", "id,score", "sort", "payload asc", "rows", "20");
assertFieldValues(rsp.getResults(), id, 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
rsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "id,score", "sort", "payload asc", "rows", "20"));
assertFieldValues(rsp.getResults(), id, "7", "1", "6", "4", "2", "10", "12", "3", "5", "9", "8", "13", "19", "11");
}
rsp = query("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20", "distrib.singlePass", "true");
assertFieldValues(rsp.getResults(), id, 7, 1, 6, 4, 2, 10, 12, 3, 5, 9, 8, 13, 11);
assertFieldValues(rsp.getResults(), "test_sS", "27", "21", "26", "24", "22", "30", "32", "23", "25", "29", "28", "33", "31");
@Test
public void testDistribSinglePass() throws Exception {
QueryResponse nonDistribRsp = query("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20");
QueryResponse rsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20", "distrib.singlePass", "true"));
assertFieldValues(rsp.getResults(), id, "7", "1", "6", "4", "2", "10", "12", "3", "5", "9", "8", "13", "19", "11");
assertFieldValues(rsp.getResults(), "test_sS", "27", "21", "26", "24", "22", "30", "32", "23", "25", "29", "28", "33", null, "31");
QueryResponse nonDistribRsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "id,test_sS,score", "sort", "payload asc", "rows", "20"));
compareResponses(rsp, nonDistribRsp); // make sure distrib and distrib.singlePass return the same thing
nonDistribRsp = query("q", "*:*", "fl", "score", "sort", "payload asc", "rows", "20");
rsp = query("q", "*:*", "fl", "score", "sort", "payload asc", "rows", "20", "distrib.singlePass", "true");
nonDistribRsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "score", "sort", "payload asc", "rows", "20"));
rsp = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("q", "*:*", "fl", "score", "sort", "payload asc", "rows", "20", "distrib.singlePass", "true"));
compareResponses(rsp, nonDistribRsp); // make sure distrib and distrib.singlePass return the same thing
}
@Test
public void testOptimizations() throws Exception {
// verify that the optimization actually works
queryWithAsserts("q", "*:*", "fl", "id", "sort", "payload desc", "rows", "20"); // id only is optimized by default
queryWithAsserts("q", "*:*", "fl", "id,score", "sort", "payload desc", "rows", "20"); // id,score only is optimized by default
queryWithAsserts("q", "*:*", "fl", "score", "sort", "payload asc", "rows", "20", "distrib.singlePass", "true");
// SOLR-6545, wild card field list
index(id, "19", "text", "d", "cat_a_sS", "1", "dynamic", "2", "payload", ByteBuffer.wrap(new byte[]{(byte) 0x80, 0x11, 0x34}));
commit();
}
nonDistribRsp = queryWithAsserts("q", "id:19", "fl", "id,*a_sS", "sort", "payload asc");
rsp = queryWithAsserts("q", "id:19", "fl", "id,*a_sS", "sort", "payload asc", "distrib.singlePass", "true");
@Test
public void testWildcardFieldList() throws Exception {
assertFieldValues(nonDistribRsp.getResults(), "id", 19);
assertFieldValues(rsp.getResults(), "id", 19);
QueryResponse nonDistribRsp = queryWithAsserts("q", "id:19", "fl", "id,*a_sS", "sort", "payload asc");
QueryResponse rsp = queryWithAsserts("q", "id:19", "fl", "id,*a_sS", "sort", "payload asc", "distrib.singlePass", "true");
nonDistribRsp = queryWithAsserts("q", "id:19", "fl", "id,dynamic,cat*", "sort", "payload asc");
rsp = queryWithAsserts("q", "id:19", "fl", "id,dynamic,cat*", "sort", "payload asc", "distrib.singlePass", "true");
assertFieldValues(nonDistribRsp.getResults(), "id", 19);
assertFieldValues(rsp.getResults(), "id", 19);
assertFieldValues(nonDistribRsp.getResults(), "id", "19");
assertFieldValues(rsp.getResults(), "id", "19");
nonDistribRsp = queryWithAsserts("q", "id:19", "fl", "id,dynamic_s,cat*", "sort", "payload asc");
rsp = queryWithAsserts("q", "id:19", "fl", "id,dynamic_s,cat*", "sort", "payload asc", "distrib.singlePass", "true");
assertFieldValues(nonDistribRsp.getResults(), "id", "19");
assertFieldValues(rsp.getResults(), "id", "19");
queryWithAsserts("q", "id:19", "fl", "id,*a_sS", "sort", "payload asc", "distrib.singlePass", "true");
queryWithAsserts("q", "id:19", "fl", "id,dynamic,cat*", "sort", "payload asc", "distrib.singlePass", "true");
// see SOLR-6795, distrib.singlePass=true would return score even when not asked for
handle.clear();
handle.put("timestamp", SKIPVAL);
handle.put("_version_", SKIPVAL);
// we don't to compare maxScore because most distributed requests return it anyway (just because they have score already)
handle.put("maxScore", SKIPVAL);
// this trips the queryWithAsserts function because it uses a custom parser, so just query directly
query("q", "{!func}id", ShardParams.DISTRIB_SINGLE_PASS, "true");
// fix for a bug where not all fields are returned if using multiple fl parameters, see SOLR-6796
queryWithAsserts("q", "*:*", "fl", "id", "fl", "dynamic", "sort", "payload desc", ShardParams.DISTRIB_SINGLE_PASS, "true");
// missing fl with sort
queryWithAsserts("q", "*:*", "sort", "payload desc", ShardParams.DISTRIB_SINGLE_PASS, "true");
queryWithAsserts("q", "*:*", "sort", "payload desc");
queryWithAsserts("q", "id:19", "fl", "id,dynamic_s,cat*", "sort", "payload asc", "distrib.singlePass", "true");
// fl=*
queryWithAsserts("q", "*:*", "fl", "*", "sort", "payload desc", ShardParams.DISTRIB_SINGLE_PASS, "true");
@ -145,6 +172,33 @@ public class DistributedQueryComponentOptimizationTest extends AbstractFullDistr
queryWithAsserts("q", "*:*", "fl", "*,score", "sort", "payload desc");
}
@Test
public void testScoreAlwaysReturned() throws Exception {
// see SOLR-6795, distrib.singlePass=true would return score even when not asked for
queryWithAsserts("q", "id:19", ShardParams.DISTRIB_SINGLE_PASS, "true");
}
@Test
public void testMultipleFlParams() throws Exception {
// fix for a bug where not all fields are returned if using multiple fl parameters, see SOLR-6796
queryWithAsserts("q", "*:*", "fl", "id", "fl", "dynamic_s", "sort", "payload desc", ShardParams.DISTRIB_SINGLE_PASS, "true");
}
@Test
public void testMissingFieldListWithSort() throws Exception {
// missing fl with sort
queryWithAsserts("q", "*:*", "sort", "payload desc", ShardParams.DISTRIB_SINGLE_PASS, "true");
queryWithAsserts("q", "*:*", "sort", "payload desc");
}
private static void compareResponses(QueryResponse rsp1, QueryResponse rsp2) {
Map<String, Integer> skipValues = new HashMap<>();
skipValues.put("timestamp", BaseDistributedSearchTestCase.SKIPVAL);
skipValues.put("_version_", BaseDistributedSearchTestCase.SKIPVAL);
skipValues.put("maxScore", BaseDistributedSearchTestCase.SKIPVAL);
BaseDistributedSearchTestCase.compare(rsp1.getResponse(), rsp2.getResponse(), 0, skipValues);
}
/**
* This test now asserts that every distrib.singlePass query:
* <ol>
@ -166,18 +220,13 @@ public class DistributedQueryComponentOptimizationTest extends AbstractFullDistr
* <p>
* and also asserts that each query which requests id or score or both behaves exactly like a single pass query
*/
private QueryResponse queryWithAsserts(Object... q) throws Exception {
private QueryResponse queryWithAsserts(String... q) throws Exception {
TrackingShardHandlerFactory.RequestTrackingQueue trackingQueue = new TrackingShardHandlerFactory.RequestTrackingQueue();
// the jettys doesn't include the control jetty which is exactly what we need here
TrackingShardHandlerFactory.setTrackingQueue(jettys, trackingQueue);
TrackingShardHandlerFactory.setTrackingQueue(cluster, trackingQueue);
// let's add debug=track to such requests so we can use DebugComponent responses for assertions
Object[] qq = new Object[q.length + 2];
System.arraycopy(q, 0, qq, 0, q.length);
qq[qq.length - 2] = "debug";
qq[qq.length - 1] = "track";
handle.put("debug", SKIPVAL);
QueryResponse response = query(qq);
QueryResponse response = cluster.getSolrClient().query(COLLECTION, new SolrQuery("debug", "track", q));
Map<String, List<TrackingShardHandlerFactory.ShardRequestAndParams>> requests = trackingQueue.getAllRequests();
int numRequests = getNumRequests(requests);
@ -223,9 +272,9 @@ public class DistributedQueryComponentOptimizationTest extends AbstractFullDistr
// it must still be fetched in this phase to merge correctly
Set<String> reqAndIdScoreFields = new HashSet<>(fls);
reqAndIdScoreFields.addAll(idScoreFields);
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD1,
assertParamsEquals(trackingQueue, COLLECTION, SHARD1,
CommonParams.FL, ShardRequest.PURPOSE_GET_TOP_IDS, reqAndIdScoreFields.toArray(new String[reqAndIdScoreFields.size()]));
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD2,
assertParamsEquals(trackingQueue, COLLECTION, SHARD2,
CommonParams.FL, ShardRequest.PURPOSE_GET_TOP_IDS, reqAndIdScoreFields.toArray(new String[reqAndIdScoreFields.size()]));
} else {
// we are assuming there are facet refinement or distributed idf requests here
@ -234,15 +283,15 @@ public class DistributedQueryComponentOptimizationTest extends AbstractFullDistr
numRequests <= sliceCount * 2);
// only id and/or score should be requested
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD1,
assertParamsEquals(trackingQueue, COLLECTION, SHARD1,
CommonParams.FL, ShardRequest.PURPOSE_GET_TOP_IDS, idScoreFields.toArray(new String[idScoreFields.size()]));
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD2,
assertParamsEquals(trackingQueue, COLLECTION, SHARD2,
CommonParams.FL, ShardRequest.PURPOSE_GET_TOP_IDS, idScoreFields.toArray(new String[idScoreFields.size()]));
// only originally requested fields must be requested in GET_FIELDS request
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD1,
assertParamsEquals(trackingQueue, COLLECTION, SHARD1,
CommonParams.FL, ShardRequest.PURPOSE_GET_FIELDS, fls.toArray(new String[fls.size()]));
assertParamsEquals(trackingQueue, DEFAULT_COLLECTION, SHARD2,
assertParamsEquals(trackingQueue, COLLECTION, SHARD2,
CommonParams.FL, ShardRequest.PURPOSE_GET_FIELDS, fls.toArray(new String[fls.size()]));
}
@ -258,7 +307,8 @@ public class DistributedQueryComponentOptimizationTest extends AbstractFullDistr
}
private void assertParamsEquals(TrackingShardHandlerFactory.RequestTrackingQueue trackingQueue, String collection, String shard, String paramName, int purpose, String... values) {
TrackingShardHandlerFactory.ShardRequestAndParams getByIdRequest = trackingQueue.getShardRequestByPurpose(cloudClient.getZkStateReader(), collection, shard, purpose);
TrackingShardHandlerFactory.ShardRequestAndParams getByIdRequest
= trackingQueue.getShardRequestByPurpose(cluster.getSolrClient().getZkStateReader(), collection, shard, purpose);
assertParamsEquals(getByIdRequest, paramName, values);
}

View File

@ -19,129 +19,130 @@ package org.apache.solr.search.mlt;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.concurrent.TimeUnit;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.cloud.DocCollection;
import org.junit.BeforeClass;
import org.junit.Test;
public class CloudMLTQParserTest extends AbstractFullDistribZkTestBase {
public class CloudMLTQParserTest extends SolrCloudTestCase {
public CloudMLTQParserTest() {
sliceCount = 2;
configString = "solrconfig.xml";
schemaString = "schema.xml";
}
@BeforeClass
public static void setupCluster() throws Exception {
configureCluster(2)
.addConfig("conf", configset("cloud-dynamic"))
.configure();
@Override
protected String getCloudSolrConfig() {
return configString;
}
final CloudSolrClient client = cluster.getSolrClient();
@Test
@ShardsFixed(num = 2)
public void test() throws Exception {
waitForRecoveriesToFinish(false);
CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1)
.processAndWait(client, DEFAULT_TIMEOUT);
client.waitForState(COLLECTION, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
(n, c) -> DocCollection.isFullyActive(n, c, 2, 1));
String id = "id";
delQ("*:*");
String FIELD1 = "lowerfilt" ;
String FIELD2 = "lowerfilt1" ;
indexDoc(sdoc(id, "1", FIELD1, "toyota"));
indexDoc(sdoc(id, "2", FIELD1, "chevrolet"));
indexDoc(sdoc(id, "3", FIELD1, "bmw usa"));
indexDoc(sdoc(id, "4", FIELD1, "ford"));
indexDoc(sdoc(id, "5", FIELD1, "ferrari"));
indexDoc(sdoc(id, "6", FIELD1, "jaguar"));
indexDoc(sdoc(id, "7", FIELD1, "mclaren moon or the moon and moon moon shine and the moon but moon was good foxes too"));
indexDoc(sdoc(id, "8", FIELD1, "sonata"));
indexDoc(sdoc(id, "9", FIELD1, "The quick red fox jumped over the lazy big and large brown dogs."));
indexDoc(sdoc(id, "10", FIELD1, "blue"));
indexDoc(sdoc(id, "12", FIELD1, "glue"));
indexDoc(sdoc(id, "13", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "14", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "15", FIELD1, "The fat red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "16", FIELD1, "The slim red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "17", FIELD1, "The quote red fox jumped moon over the lazy brown dogs moon. Of course moon. Foxes and moon come back to the foxes and moon"));
indexDoc(sdoc(id, "18", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "19", FIELD1, "The hose red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "20", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "21", FIELD1, "The court red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "22", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "23", FIELD1, "The quote red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "24", FIELD1, "The file red fox jumped over the lazy brown dogs."));
indexDoc(sdoc(id, "25", FIELD1, "rod fix"));
indexDoc(sdoc(id, "26", FIELD1, "bmw usa 328i"));
indexDoc(sdoc(id, "27", FIELD1, "bmw usa 535i"));
indexDoc(sdoc(id, "28", FIELD1, "bmw 750Li"));
indexDoc(sdoc(id, "29", FIELD1, "bmw usa",
FIELD2, "red green blue"));
indexDoc(sdoc(id, "30", FIELD1, "The quote red fox jumped over the lazy brown dogs.",
FIELD2, "red green yellow"));
indexDoc(sdoc(id, "31", FIELD1, "The fat red fox jumped over the lazy brown dogs.",
FIELD2, "green blue yellow"));
indexDoc(sdoc(id, "32", FIELD1, "The slim red fox jumped over the lazy brown dogs.",
FIELD2, "yellow white black"));
String FIELD1 = "lowerfilt_u" ;
String FIELD2 = "lowerfilt1_u" ;
commit();
new UpdateRequest()
.add(sdoc(id, "1", FIELD1, "toyota"))
.add(sdoc(id, "2", FIELD1, "chevrolet"))
.add(sdoc(id, "3", FIELD1, "bmw usa"))
.add(sdoc(id, "4", FIELD1, "ford"))
.add(sdoc(id, "5", FIELD1, "ferrari"))
.add(sdoc(id, "6", FIELD1, "jaguar"))
.add(sdoc(id, "7", FIELD1, "mclaren moon or the moon and moon moon shine and the moon but moon was good foxes too"))
.add(sdoc(id, "8", FIELD1, "sonata"))
.add(sdoc(id, "9", FIELD1, "The quick red fox jumped over the lazy big and large brown dogs."))
.add(sdoc(id, "10", FIELD1, "blue"))
.add(sdoc(id, "12", FIELD1, "glue"))
.add(sdoc(id, "13", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "14", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "15", FIELD1, "The fat red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "16", FIELD1, "The slim red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "17", FIELD1,
"The quote red fox jumped moon over the lazy brown dogs moon. Of course moon. Foxes and moon come back to the foxes and moon"))
.add(sdoc(id, "18", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "19", FIELD1, "The hose red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "20", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "21", FIELD1, "The court red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "22", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "23", FIELD1, "The quote red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "24", FIELD1, "The file red fox jumped over the lazy brown dogs."))
.add(sdoc(id, "25", FIELD1, "rod fix"))
.add(sdoc(id, "26", FIELD1, "bmw usa 328i"))
.add(sdoc(id, "27", FIELD1, "bmw usa 535i"))
.add(sdoc(id, "28", FIELD1, "bmw 750Li"))
.add(sdoc(id, "29", FIELD1, "bmw usa", FIELD2, "red green blue"))
.add(sdoc(id, "30", FIELD1, "The quote red fox jumped over the lazy brown dogs.", FIELD2, "red green yellow"))
.add(sdoc(id, "31", FIELD1, "The fat red fox jumped over the lazy brown dogs.", FIELD2, "green blue yellow"))
.add(sdoc(id, "32", FIELD1, "The slim red fox jumped over the lazy brown dogs.", FIELD2, "yellow white black"))
.commit(client, COLLECTION);
}
handle.clear();
handle.put("QTime", SKIPVAL);
handle.put("timestamp", SKIPVAL);
handle.put("maxScore", SKIPVAL);
public static final String COLLECTION = "mlt-collection";
ModifiableSolrParams params = new ModifiableSolrParams();
@Test
public void testMLTQParser() throws Exception {
params.set(CommonParams.Q, "{!mlt qf=lowerfilt}17");
QueryResponse queryResponse = cloudClient.query(params);
QueryResponse queryResponse = cluster.getSolrClient()
.query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u}17").setShowDebugInfo(true));
SolrDocumentList solrDocuments = queryResponse.getResults();
int[] expectedIds = new int[]{7, 13, 14, 15, 16, 20, 22, 24, 32, 9};
int[] actualIds = new int[10];
int i = 0;
for (SolrDocument solrDocument : solrDocuments) {
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
}
assertArrayEquals(expectedIds, actualIds);
params = new ModifiableSolrParams();
params.set(CommonParams.Q, "{!mlt qf=lowerfilt boost=true}17");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
expectedIds = new int[]{7, 13, 14, 15, 16, 20, 22, 24, 32, 9};
actualIds = new int[solrDocuments.size()];
i = 0;
}
@Test
public void testBoost() throws Exception {
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u boost=true}17"));
SolrDocumentList solrDocuments = queryResponse.getResults();
int[] expectedIds = new int[]{7, 13, 14, 15, 16, 20, 22, 24, 32, 9};
int[] actualIds = new int[solrDocuments.size()];
int i = 0;
for (SolrDocument solrDocument : solrDocuments) {
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
}
assertArrayEquals(expectedIds, actualIds);
params = new ModifiableSolrParams();
params.set(CommonParams.Q, "{!mlt qf=lowerfilt mindf=0 mintf=1}3");
params.set(CommonParams.DEBUG, "true");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
expectedIds = new int[]{29, 27, 26, 28};
actualIds = new int[solrDocuments.size()];
i = 0;
}
@Test
public void testMinDF() throws Exception {
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("{!mlt qf=lowerfilt_u mindf=0 mintf=1}3").setShowDebugInfo(true));
SolrDocumentList solrDocuments = queryResponse.getResults();
int[] expectedIds = new int[]{29, 27, 26, 28};
int[] actualIds = new int[solrDocuments.size()];
int i = 0;
for (SolrDocument solrDocument : solrDocuments) {
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
}
assertArrayEquals(expectedIds, actualIds);
String[] expectedQueryStrings = new String[]{
"(+(lowerfilt:bmw lowerfilt:usa) -id:3)/no_coord",
"(+(lowerfilt:usa lowerfilt:bmw) -id:3)/no_coord"};
"(+(lowerfilt_u:bmw lowerfilt_u:usa) -id:3)/no_coord",
"(+(lowerfilt_u:usa lowerfilt_u:bmw) -id:3)/no_coord"};
String[] actualParsedQueries;
if(queryResponse.getDebugMap().get("parsedquery") instanceof String) {
if (queryResponse.getDebugMap().get("parsedquery") instanceof String) {
String parsedQueryString = (String) queryResponse.getDebugMap().get("parsedquery");
assertTrue(parsedQueryString.equals(expectedQueryStrings[0]) || parsedQueryString.equals(expectedQueryStrings[1]));
} else {
@ -150,56 +151,68 @@ public class CloudMLTQParserTest extends AbstractFullDistribZkTestBase {
Arrays.sort(actualParsedQueries);
assertArrayEquals(expectedQueryStrings, actualParsedQueries);
}
}
@Test
public void testMultipleFields() throws Exception {
params = new ModifiableSolrParams();
params.set(CommonParams.Q, "{!mlt qf=lowerfilt,lowerfilt1 mindf=0 mintf=1}26");
params.set(CommonParams.DEBUG, "true");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
expectedIds = new int[]{27, 3, 29, 28};
actualIds = new int[solrDocuments.size()];
i = 0;
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("{!mlt qf=lowerfilt_u,lowerfilt1_u mindf=0 mintf=1}26"));
SolrDocumentList solrDocuments = queryResponse.getResults();
int[] expectedIds = new int[]{27, 3, 29, 28};
int[] actualIds = new int[solrDocuments.size()];
int i = 0;
for (SolrDocument solrDocument : solrDocuments) {
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
}
assertArrayEquals(expectedIds, actualIds);
params = new ModifiableSolrParams();
}
@Test
public void testHighDFValue() throws Exception {
// Test out a high value of df and make sure nothing matches.
params.set(CommonParams.Q, "{!mlt qf=lowerfilt mindf=20 mintf=1}3");
params.set(CommonParams.DEBUG, "true");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("{!mlt qf=lowerfilt_u mindf=20 mintf=1}3"));
SolrDocumentList solrDocuments = queryResponse.getResults();
assertEquals("Expected to match 0 documents with a mindf of 20 but found more", solrDocuments.size(), 0);
params = new ModifiableSolrParams();
}
@Test
public void testHighWLValue() throws Exception {
// Test out a high value of wl and make sure nothing matches.
params.set(CommonParams.Q, "{!mlt qf=lowerfilt minwl=4 mintf=1}3");
params.set(CommonParams.DEBUG, "true");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("{!mlt qf=lowerfilt_u minwl=4 mintf=1}3"));
SolrDocumentList solrDocuments = queryResponse.getResults();
assertEquals("Expected to match 0 documents with a minwl of 4 but found more", solrDocuments.size(), 0);
params = new ModifiableSolrParams();
}
@Test
public void testLowMinWLValue() throws Exception {
// Test out a low enough value of minwl and make sure we get the expected matches.
params.set(CommonParams.Q, "{!mlt qf=lowerfilt minwl=3 mintf=1}3");
params.set(CommonParams.DEBUG, "true");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION,
new SolrQuery("{!mlt qf=lowerfilt_u minwl=3 mintf=1}3"));
SolrDocumentList solrDocuments = queryResponse.getResults();
assertEquals("Expected to match 4 documents with a minwl of 3 but found more", 4, solrDocuments.size());
}
@Test
public void testUnstoredAndUnanalyzedFieldsAreIgnored() throws Exception {
// Assert that {!mlt}id does not throw an exception i.e. implicitly, only fields that are stored + have explicit
// analyzer are used for MLT Query construction.
params = new ModifiableSolrParams();
params.set(CommonParams.Q, "{!mlt}20");
queryResponse = queryServer(params);
solrDocuments = queryResponse.getResults();
actualIds = new int[solrDocuments.size()];
expectedIds = new int[]{13, 14, 15, 16, 22, 24, 32, 18, 19, 21};
i = 0;
QueryResponse queryResponse = cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt}20"));
SolrDocumentList solrDocuments = queryResponse.getResults();
int[] actualIds = new int[solrDocuments.size()];
int[] expectedIds = new int[]{13, 14, 15, 16, 22, 24, 32, 18, 19, 21};
int i = 0;
StringBuilder sb = new StringBuilder();
for (SolrDocument solrDocument : solrDocuments) {
actualIds[i++] = Integer.valueOf(String.valueOf(solrDocument.getFieldValue("id")));
@ -208,15 +221,9 @@ public class CloudMLTQParserTest extends AbstractFullDistribZkTestBase {
assertArrayEquals(expectedIds, actualIds);
}
@Test(expected=SolrException.class)
public void testInvalidDocument() throws IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CommonParams.Q, "{!mlt qf=lowerfilt}999999");
try {
cloudClient.query(params);
fail("The above query is supposed to throw an exception.");
} catch (SolrServerException e) {
// Do nothing.
}
public void testInvalidSourceDocument() throws IOException {
SolrException e = expectThrows(SolrException.class, () -> {
cluster.getSolrClient().query(COLLECTION, new SolrQuery("{!mlt qf=lowerfilt_u}999999"));
});
}
}

View File

@ -9,7 +9,7 @@
## Div tag has placeholder text by default
<div id="clusters">
Run Solr with java -Dsolr.clustering.enabled=true -jar start.jar to see clustered search results.
Run Solr with option -Dsolr.clustering.enabled=true to see clustered search results.
</div>
## Replace the div content *if* Carrot^2 is available

View File

@ -64,6 +64,14 @@ public class SolrQuery extends ModifiableSolrParams
this.set(CommonParams.Q, q);
}
public SolrQuery(String k, String v, String... params) {
assert params.length % 2 == 0;
this.set(k, v);
for (int i = 0; i < params.length; i += 2) {
this.set(params[i], params[i + 1]);
}
}
/** enable/disable terms.
*
* @param b flag to indicate terms should be enabled. <br> if b==false, removes all other terms parameters
@ -858,8 +866,9 @@ public class SolrQuery extends ModifiableSolrParams
return this.getInt(CommonParams.ROWS);
}
public void setShowDebugInfo(boolean showDebugInfo) {
public SolrQuery setShowDebugInfo(boolean showDebugInfo) {
this.set(CommonParams.DEBUG_QUERY, String.valueOf(showDebugInfo));
return this;
}
public void setDistrib(boolean val) {

View File

@ -16,6 +16,7 @@
*/
package org.apache.solr.common.cloud;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
@ -250,4 +251,12 @@ public class DocCollection extends ZkNodeProps implements Iterable<Slice> {
public Iterator<Slice> iterator() {
return slices.values().iterator();
}
public List<Replica> getReplicas() {
List<Replica> replicas = new ArrayList<>();
for (Slice slice : this) {
replicas.addAll(slice.getReplicas());
}
return replicas;
}
}

View File

@ -104,10 +104,15 @@ public class Replica extends ZkNodeProps {
public String getName() {
return name;
}
public String getCoreUrl() {
return ZkCoreNodeProps.getCoreUrl(getStr(BASE_URL_PROP), getStr(CORE_NAME_PROP));
}
public String getCoreName() {
return getStr(CORE_NAME_PROP);
}
/** The name of the node this replica resides on */
public String getNodeName() {
return nodeName;

View File

@ -1326,12 +1326,11 @@ public class ZkStateReader implements Closeable {
if (v == null)
return null;
watchers.addAll(v.stateWatchers);
v.stateWatchers.clear();
return v;
});
for (CollectionStateWatcher watcher : watchers) {
if (watcher.onStateChanged(liveNodes, collectionState) == false) {
registerCollectionStateWatcher(collection, watcher);
if (watcher.onStateChanged(liveNodes, collectionState)) {
removeCollectionStateWatcher(collection, watcher);
}
}
}

View File

@ -113,8 +113,9 @@ public class TestCollectionStateWatchers extends SolrCloudTestCase {
cluster.stopJettySolrRunner(random().nextInt(cluster.getJettySolrRunners().size()));
assertTrue("CollectionStateWatcher was never notified of cluster change", latch.await(MAX_WAIT_TIMEOUT, TimeUnit.SECONDS));
assertEquals("CollectionStateWatcher wasn't cleared after completion",
0, client.getZkStateReader().getStateWatchers("testcollection").size());
Set<CollectionStateWatcher> watchers = client.getZkStateReader().getStateWatchers("testcollection");
assertTrue("CollectionStateWatcher wasn't cleared after completion",
watchers == null || watchers.size() == 0);
}

View File

@ -737,8 +737,12 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
}
}
@SuppressWarnings("rawtypes")
protected int sendDocsWithRetry(List<SolrInputDocument> batch, int minRf, int maxRetries, int waitBeforeRetry) throws Exception {
return sendDocsWithRetry(cloudClient, cloudClient.getDefaultCollection(), batch, minRf, maxRetries, waitBeforeRetry);
}
@SuppressWarnings("rawtypes")
protected static int sendDocsWithRetry(CloudSolrClient cloudClient, String collection, List<SolrInputDocument> batch, int minRf, int maxRetries, int waitBeforeRetry) throws Exception {
UpdateRequest up = new UpdateRequest();
up.setParam(UpdateRequest.MIN_REPFACT, String.valueOf(minRf));
up.add(batch);
@ -746,7 +750,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
int numRetries = 0;
while(true) {
try {
resp = cloudClient.request(up);
resp = cloudClient.request(up, collection);
return cloudClient.getMinAchievedReplicationFactor(cloudClient.getDefaultCollection(), resp);
} catch (Exception exc) {
Throwable rootCause = SolrException.getRootCause(exc);

View File

@ -28,6 +28,7 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.SortedMap;
import java.util.concurrent.Callable;
import java.util.concurrent.CopyOnWriteArrayList;
@ -282,6 +283,14 @@ public class MiniSolrCloudCluster {
return Collections.unmodifiableList(jettys);
}
/**
* @return a randomly-selected Jetty
*/
public JettySolrRunner getRandomJetty(Random random) {
int index = random.nextInt(jettys.size());
return jettys.get(index);
}
/**
* Start a new Solr instance
*

View File

@ -50,6 +50,12 @@ import org.junit.Before;
*/
public class SolrCloudTestCase extends SolrTestCaseJ4 {
public static final int DEFAULT_TIMEOUT = 30;
public static Path configset(String name) {
return TEST_PATH().resolve("configsets").resolve(name).resolve("conf");
}
private static class Config {
final String name;
final Path path;