SOLR-7990: don't swallow ExitingReaderException

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1702015 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Yonik Seeley 2015-09-09 14:00:44 +00:00
parent 3cffe74393
commit 5db80b0b90
7 changed files with 150 additions and 46 deletions

View File

@ -268,6 +268,9 @@ Bug Fixes
* SOLR-6188: Fixed error loading ICU classes with solr. prefix. Now jars in
the default sharedLib directory are only loaded once. (Shawn Heisey)
* SOLR-7990: Use of timeAllowed can cause incomplete filters to be cached and incorrect
results to be returned on subsequent requests. (Erick Erickson, yonik)
================== 5.3.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release

View File

@ -131,11 +131,9 @@ public class DocSetUtil {
int maxDoc = searcher.getIndexReader().maxDoc();
DocSetCollector collector = new DocSetCollector((maxDoc >> 6) + 5, maxDoc);
try {
searcher.search(query, collector);
} catch ( ExitableDirectoryReader.ExitingReaderException e) {
searcher.log.warn("Query: " + query + "; " + e.getMessage());
}
// This may throw an ExitableDirectoryReader.ExitingReaderException
// but we should not catch it here, as we don't know how this DocSet will be used (it could be negated before use) or cached.
searcher.search(query, collector);
return collector.getDocSet();
}

View File

@ -37,4 +37,24 @@
<httpCaching never304="true" />
</requestDispatcher>
<query>
<filterCache
class="solr.search.FastLRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<queryResultCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
<documentCache
class="solr.search.LRUCache"
size="512"
initialSize="512"
autowarmCount="0"/>
</query>
</config>

View File

@ -1137,7 +1137,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
protected void comparePartialResponses(QueryResponse rsp, QueryResponse controlRsp, List<String> upShards)
{
NamedList<?> sinfo = (NamedList<?>) rsp.getResponse().get(ShardParams.SHARDS_INFO);
assertNotNull("missing shard info", sinfo);
assertEquals("should have an entry for each shard ["+sinfo+"] "+shards, shardsArr.length, sinfo.size());
// identify each one
@ -1151,7 +1151,8 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
found = true;
// make sure that it responded if it's up
if (upShards.contains(s)) {
assertTrue("Expected to find numFound in the up shard info",info.get("numFound") != null);
// this is no longer true if there was a query timeout on an up shard
// assertTrue("Expected to find numFound in the up shard info",info.get("numFound") != null);
assertTrue("Expected to find shardAddress in the up shard info",info.get("shardAddress") != null);
}
else {

View File

@ -33,6 +33,7 @@ import org.slf4j.LoggerFactory;
public class CloudExitableDirectoryReaderTest extends AbstractFullDistribZkTestBase {
public static Logger log = LoggerFactory.getLogger(CloudExitableDirectoryReaderTest.class);
private static final int NUM_DOCS_PER_TYPE = 20;
private static final String sleep = "2";
public CloudExitableDirectoryReaderTest() {
configString = "solrconfig-tlog-with-delayingcomponent.xml";
@ -71,7 +72,7 @@ public class CloudExitableDirectoryReaderTest extends AbstractFullDistribZkTestB
}
public void doTimeoutTests() throws Exception {
assertPartialResults(params("q", "name:a*", "timeAllowed", "1"));
assertPartialResults(params("q", "name:a*", "timeAllowed", "1", "sleep", sleep));
/*
query rewriting for NUM_DOCS_PER_TYPE terms should take less
@ -81,9 +82,9 @@ public class CloudExitableDirectoryReaderTest extends AbstractFullDistribZkTestB
int fiveSeconds = 5000;
Integer timeAllowed = TestUtil.nextInt(random(), fiveSeconds, Integer.MAX_VALUE);
assertSuccess(params("q", "name:a*", "timeAllowed",timeAllowed.toString()));
assertSuccess(params("q", "name:a*", "timeAllowed", timeAllowed.toString()));
assertPartialResults(params("q", "name:a*", "timeAllowed", "1"));
assertPartialResults(params("q", "name:a*", "timeAllowed", "1", "sleep", sleep));
timeAllowed = TestUtil.nextInt(random(), fiveSeconds, Integer.MAX_VALUE);
assertSuccess(params("q", "name:b*", "timeAllowed",timeAllowed.toString()));

View File

@ -18,12 +18,13 @@ package org.apache.solr.core;
*
*/
import java.util.Map;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.SolrException;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.response.SolrQueryResponse;
import org.apache.solr.common.util.NamedList;
import org.junit.BeforeClass;
import org.junit.Test;
import org.noggit.ObjectBuilder;
/**
* Test that checks that long running queries are exited by Solr using the
@ -31,62 +32,139 @@ import org.junit.Test;
*/
public class ExitableDirectoryReaderTest extends SolrTestCaseJ4 {
static int NUM_DOCS_PER_TYPE = 100;
static final String assertionString = "//result[@numFound='"+ (NUM_DOCS_PER_TYPE - 1) + "']";
static int NUM_DOCS = 100;
static final String assertionString = "/response/numFound=="+ NUM_DOCS;
static final String failureAssertionString = "/responseHeader/partialResults==true]";
static final String longTimeout="10000";
static final String sleep = "2";
@BeforeClass
public static void beforeClass() throws Exception {
System.setProperty("enable.update.log", "false"); // schema12 doesn't support _version_
initCore("solrconfig-nocache-with-delaying-searchcomponent.xml", "schema12.xml");
initCore("solrconfig-delaying-component.xml", "schema_latest.xml");
createIndex();
}
public static void createIndex() {
int counter = 1;
for(; (counter % NUM_DOCS_PER_TYPE) != 0; counter++ )
assertU(adoc("id", Integer.toString(counter), "name", "a" + counter));
counter++;
for(; (counter % NUM_DOCS_PER_TYPE) != 0; counter++ )
assertU(adoc("id", Integer.toString(counter), "name", "b" + counter));
counter++;
for(; counter % NUM_DOCS_PER_TYPE != 0; counter++ )
assertU(adoc("id", Integer.toString(counter), "name", "dummy term doc" + counter));
for (int i = 0; i < NUM_DOCS; i++) {
assertU(adoc("id", Integer.toString(i), "name", "a" + i + " b" + i + " c" + i + " d"+i + " e" + i));
if (random().nextInt(NUM_DOCS) == 0) {
assertU(commit()); // sometimes make multiple segments
}
}
assertU(commit());
}
@Test
public void testPrefixQuery() throws Exception {
assertJQ(req("q", "name:a*", "indent", "true", "timeAllowed", "1"), failureAssertionString);
String q = "name:a*";
assertJQ(req("q", q, "timeAllowed", "1", "sleep", sleep), failureAssertionString);
assertQ(req("q","name:a*", "indent","true", "timeAllowed","10000"), assertionString);
// do the same query and test for both success, and that the number of documents matched (i.e. make sure no partial results were cached)
assertJQ(req("q", q, "timeAllowed", longTimeout), assertionString);
assertJQ(req("q","name:a*", "indent","true", "timeAllowed","1"), failureAssertionString);
// this time we should get a query cache hit and hopefully no exception? this may change in the future if time checks are put into other places.
assertJQ(req("q", q, "timeAllowed", "1", "sleep", sleep), assertionString);
assertQ(req("q","name:b*", "indent","true", "timeAllowed","10000"), assertionString);
// now do the same for the filter cache
assertJQ(req("q","*:*", "fq",q, "timeAllowed", "1", "sleep", sleep), failureAssertionString);
assertQ(req("q","name:b*", "indent","true", "timeAllowed",Long.toString(Long.MAX_VALUE)), assertionString);
// make sure that the result succeeds this time, and that a bad filter wasn't cached
assertJQ(req("q","*:*", "fq",q, "timeAllowed", longTimeout), assertionString);
assertQ(req("q","name:b*", "indent","true", "timeAllowed","-7")); // negative timeAllowed should disable timeouts
// test that Long.MAX_VALUE works
assertJQ(req("q","name:b*", "timeAllowed",Long.toString(Long.MAX_VALUE)), assertionString);
assertQ(req("q","name:b*", "indent","true"));
// negative timeAllowed should disable timeouts.
assertJQ(req("q", "name:c*", "timeAllowed", "-7"), assertionString);
}
// There are lots of assumptions about how/when cache entries should be changed in this method. The
// simple case above shows the root problem without the confusion. testFilterSimpleCase should be
// removed once it is running and this test should be un-ignored and the assumptiions verified.
// With all the weirdness, I'm not going to vouch for this test. Feel free to change it.
@Test
public void testQueriesOnDocsWithMultipleTerms() throws Exception {
assertQ(req("q","name:dummy", "indent","true", "timeAllowed","10000"), assertionString);
public void testCacheAssumptions() throws Exception {
String fq= "name:d*";
SolrCore core = h.getCore();
SolrInfoMBean filterCacheStats = core.getInfoRegistry().get("filterCache");
long fqInserts = (long) filterCacheStats.getStatistics().get("inserts");
// This should pass even though this may take more than the 'timeAllowed' time, it doesn't take long
// to iterate over 1 term (dummy).
assertQ(req("q", "name:dummy", "indent", "true", "timeAllowed", "10000"), assertionString);
SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache");
long qrInserts = (long) queryCacheStats.getStatistics().get("inserts");
assertJQ(req("q", "name:doc*", "indent", "true", "timeAllowed", "1"), failureAssertionString);
// This gets 0 docs back. Use 10000 instead of 1 for timeAllowed and it gets 100 back and the for loop below
// succeeds.
String response = JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", "1", "sleep", sleep));
Map res = (Map) ObjectBuilder.fromJSON(response);
Map body = (Map) (res.get("response"));
assertTrue("Should have fewer docs than " + NUM_DOCS, (long) (body.get("numFound")) < NUM_DOCS);
Map header = (Map) (res.get("responseHeader"));
assertTrue("Should have partial results", (Boolean) (header.get("partialResults")));
assertEquals("Should NOT have inserted partial results in the cache!",
(long) queryCacheStats.getStatistics().get("inserts"), qrInserts);
assertEquals("Should NOT have another insert", fqInserts, (long) filterCacheStats.getStatistics().get("inserts"));
// At the end of all this, we should have no hits in the queryResultCache.
response = JQ(req("q", "*:*", "fq", fq, "indent", "true", "timeAllowed", longTimeout));
// Check that we did insert this one.
assertEquals("Hits should still be 0", (long) filterCacheStats.getStatistics().get("hits"), 0L);
assertEquals("Inserts should be bumped", (long) filterCacheStats.getStatistics().get("inserts"), fqInserts + 1);
res = (Map) ObjectBuilder.fromJSON(response);
body = (Map) (res.get("response"));
assertEquals("Should have exactly " + NUM_DOCS, (long) (body.get("numFound")), NUM_DOCS);
header = (Map) (res.get("responseHeader"));
assertTrue("Should NOT have partial results", header.get("partialResults") == null);
}
// When looking at a problem raised on the user's list I ran across this anomaly with timeAllowed
// This tests for the second query NOT returning partial results, along with some other
@Test
public void testQueryResults() throws Exception {
String q = "name:e*";
SolrCore core = h.getCore();
SolrInfoMBean queryCacheStats = core.getInfoRegistry().get("queryResultCache");
NamedList nl = queryCacheStats.getStatistics();
long inserts = (long) nl.get("inserts");
String response = JQ(req("q", q, "indent", "true", "timeAllowed", "1", "sleep", sleep));
// The queryResultCache should NOT get an entry here.
nl = queryCacheStats.getStatistics();
assertEquals("Should NOT have inserted partial results!", inserts, (long) nl.get("inserts"));
Map res = (Map) ObjectBuilder.fromJSON(response);
Map body = (Map) (res.get("response"));
Map header = (Map) (res.get("responseHeader"));
assertTrue("Should have fewer docs than " + NUM_DOCS, (long) (body.get("numFound")) < NUM_DOCS);
assertTrue("Should have partial results", (Boolean) (header.get("partialResults")));
response = JQ(req("q", q, "indent", "true", "timeAllowed", longTimeout));
// Check that we did insert this one.
NamedList nl2 = queryCacheStats.getStatistics();
assertEquals("Hits should still be 0", (long) nl.get("hits"), (long) nl2.get("hits"));
assertTrue("Inserts should be bumped", inserts < (long) nl2.get("inserts"));
res = (Map) ObjectBuilder.fromJSON(response);
body = (Map) (res.get("response"));
header = (Map) (res.get("responseHeader"));
assertEquals("Should have exactly " + NUM_DOCS, NUM_DOCS, (long) (body.get("numFound")));
Boolean test = (Boolean) (header.get("partialResults"));
if (test != null) {
assertFalse("Should NOT have partial results", test);
}
}
}

View File

@ -35,8 +35,11 @@ public class DelayingSearchComponent extends SearchComponent{
@Override
public void process(ResponseBuilder rb) throws IOException {
int sleep = rb.req.getParams().getInt("sleep",0);
try {
Thread.sleep(1000);
if (sleep > 0) {
Thread.sleep(sleep);
}
} catch (InterruptedException e) {
// Do nothing?
}