Added trace logging in TransportSearchTypeAction and a delayed verification of test failures in MinDocCountTests.testMinDocCountOnTerms

This commit is contained in:
Boaz Leskes 2014-03-31 23:23:46 +02:00
parent cc4cae3ba0
commit 350fd8e30b
2 changed files with 35 additions and 12 deletions

View File

@ -277,6 +277,8 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
} else {
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
}
} else if (logger.isTraceEnabled()) {
logger.trace("{}: Failed to execute [{}]", t, shard, request);
}
}
if (successulOps.get() == 0) {
@ -296,7 +298,7 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
final ShardRouting nextShard = shardIt.nextOrNull();
final boolean lastShard = nextShard == null;
// trace log this exception
if (logger.isTraceEnabled() && t != null) {
if (logger.isTraceEnabled()) {
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
}
if (!lastShard) {
@ -410,6 +412,10 @@ public abstract class TransportSearchTypeAction extends TransportAction<SearchRe
protected final void processFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result) {
firstResults.set(shardIndex, result);
if (logger.isTraceEnabled()) {
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
}
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex

View File

@ -23,6 +23,7 @@ import com.carrotsearch.hppc.LongOpenHashSet;
import com.carrotsearch.hppc.LongSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.index.query.QueryBuilder;
@ -183,19 +184,19 @@ public class MinDocCountTests extends ElasticsearchIntegrationTest {
}
public void testStringCountAscWithInclude() throws Exception {
testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*");
testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(true), ".*a.*", true);
}
public void testStringScriptCountAscWithInclude() throws Exception {
testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*");
testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(true), ".*a.*", true);
}
public void testStringCountDescWithInclude() throws Exception {
testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*");
testMinDocCountOnTerms("s", Script.NO, Terms.Order.count(false), ".*a.*", true);
}
public void testStringScriptCountDescWithInclude() throws Exception {
testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*");
testMinDocCountOnTerms("s", Script.YES, Terms.Order.count(false), ".*a.*", true);
}
public void testLongTermAsc() throws Exception {
@ -263,10 +264,10 @@ public class MinDocCountTests extends ElasticsearchIntegrationTest {
}
private void testMinDocCountOnTerms(String field, Script script, Terms.Order order) throws Exception {
testMinDocCountOnTerms(field, script, order, null);
testMinDocCountOnTerms(field, script, order, null, true);
}
private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include) throws Exception {
private void testMinDocCountOnTerms(String field, Script script, Terms.Order order, String include, boolean retryOnFailure) throws Exception {
// all terms
final SearchResponse allTermsResponse = client().prepareSearch("idx").setTypes("type")
.setSearchType(SearchType.COUNT)
@ -284,7 +285,7 @@ public class MinDocCountTests extends ElasticsearchIntegrationTest {
for (long minDocCount = 0; minDocCount < 20; ++minDocCount) {
final int size = randomIntBetween(1, cardinality + 2);
final SearchResponse response = client().prepareSearch("idx").setTypes("type")
final SearchRequest request = client().prepareSearch("idx").setTypes("type")
.setSearchType(SearchType.COUNT)
.setQuery(QUERY)
.addAggregation(script.apply(terms("terms"), field)
@ -293,10 +294,26 @@ public class MinDocCountTests extends ElasticsearchIntegrationTest {
.size(size)
.include(include)
.shardSize(cardinality + randomInt(10))
.minDocCount(minDocCount))
.execute().actionGet();
assertAllSuccessful(response);
assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
.minDocCount(minDocCount)).request();
final SearchResponse response = client().search(request).get();
try {
assertAllSuccessful(response);
assertSubset(allTerms, (Terms) response.getAggregations().get("terms"), minDocCount, size, include);
} catch (AssertionError ae) {
if (!retryOnFailure) {
throw ae;
}
logger.info("test failed. trying to see if it recovers after 1m.", ae);
try {
Thread.sleep(60000);
logger.debug("1m passed. retrying.");
testMinDocCountOnTerms(field, script, order, include, false);
} catch (Throwable secondFailure) {
logger.error("exception on retry (will re-throw the original in a sec)", secondFailure);
} finally {
throw ae;
}
}
}
}