Add trace log of the request for the query and fetch phases (#34479)

This change adds a logger for the query and fetch phases that prints all requests
before their execution at the trace level. This will help debugging cases where an issue
occurs during the execution since only completed queries are logged by the slow logs.
This commit is contained in:
Jim Ferenczi 2018-11-09 09:41:51 +01:00 committed by GitHub
parent a9641960e1
commit 7054e289fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 49 additions and 1 deletions

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search; package org.elasticsearch.search;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.tasks.Task;
/** /**
* Represents a phase of a search request e.g. query, fetch etc. * Represents a phase of a search request e.g. query, fetch etc.
@ -35,4 +36,32 @@ public interface SearchPhase {
* Executes the search phase * Executes the search phase
*/ */
void execute(SearchContext context); void execute(SearchContext context);
class SearchContextSourcePrinter {
private final SearchContext searchContext;
public SearchContextSourcePrinter(SearchContext searchContext) {
this.searchContext = searchContext;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append(searchContext.indexShard().shardId());
builder.append(" ");
if (searchContext.request() != null &&
searchContext.request().source() != null) {
builder.append("source[").append(searchContext.request().source().toString()).append("], ");
} else {
builder.append("source[], ");
}
if (searchContext.getTask() != null &&
searchContext.getTask().getHeader(Task.X_OPAQUE_ID) != null) {
builder.append("id[").append(searchContext.getTask().getHeader(Task.X_OPAQUE_ID)).append("], ");
} else {
builder.append("id[], ");
}
return builder.toString();
}
}
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.search.fetch; package org.elasticsearch.search.fetch;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.ReaderUtil;
import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocIdSetIterator;
@ -73,6 +75,7 @@ import java.util.Set;
* after reducing all of the matches returned by the query phase * after reducing all of the matches returned by the query phase
*/ */
public class FetchPhase implements SearchPhase { public class FetchPhase implements SearchPhase {
private static final Logger LOGGER = LogManager.getLogger(FetchPhase.class);
private final FetchSubPhase[] fetchSubPhases; private final FetchSubPhase[] fetchSubPhases;
@ -87,6 +90,11 @@ public class FetchPhase implements SearchPhase {
@Override @Override
public void execute(SearchContext context) { public void execute(SearchContext context) {
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("{}", new SearchContextSourcePrinter(context));
}
final FieldsVisitor fieldsVisitor; final FieldsVisitor fieldsVisitor;
Map<String, Set<String>> storedToRequestedFields = new HashMap<>(); Map<String, Set<String>> storedToRequestedFields = new HashMap<>();
StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); StoredFieldsContext storedFieldsContext = context.storedFieldsContext();

View File

@ -19,6 +19,8 @@
package org.elasticsearch.search.query; package org.elasticsearch.search.query;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.MinDocQuery; import org.apache.lucene.queries.MinDocQuery;
@ -73,6 +75,7 @@ import static org.elasticsearch.search.query.TopDocsCollectorContext.createTopDo
* (document ids and score or sort criteria) so that matches can be reduced on the coordinating node * (document ids and score or sort criteria) so that matches can be reduced on the coordinating node
*/ */
public class QueryPhase implements SearchPhase { public class QueryPhase implements SearchPhase {
private static final Logger LOGGER = LogManager.getLogger(QueryPhase.class);
private final AggregationPhase aggregationPhase; private final AggregationPhase aggregationPhase;
private final SuggestPhase suggestPhase; private final SuggestPhase suggestPhase;
@ -99,6 +102,11 @@ public class QueryPhase implements SearchPhase {
new DocValueFormat[0]); new DocValueFormat[0]);
return; return;
} }
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("{}", new SearchContextSourcePrinter(searchContext));
}
// Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F // Pre-process aggregations as late as possible. In the case of a DFS_Q_T_F
// request, preProcess is called on the DFS phase phase, this is why we pre-process them // request, preProcess is called on the DFS phase phase, this is why we pre-process them
// here to make sure it happens during the QUERY phase // here to make sure it happens during the QUERY phase

View File

@ -58,6 +58,7 @@ import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.ObjectMapper.Nested; import org.elasticsearch.index.mapper.ObjectMapper.Nested;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.query.support.NestedScope;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
@ -130,6 +131,9 @@ public abstract class AggregatorTestCase extends ESTestCase {
MappedFieldType... fieldTypes) throws IOException { MappedFieldType... fieldTypes) throws IOException {
SearchContext searchContext = createSearchContext(indexSearcher, indexSettings); SearchContext searchContext = createSearchContext(indexSearcher, indexSettings);
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
IndexShard indexShard = mock(IndexShard.class);
when(indexShard.shardId()).thenReturn(new ShardId("test", "test", 0));
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.aggregations()) when(searchContext.aggregations())
.thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); .thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer));
when(searchContext.query()).thenReturn(query); when(searchContext.query()).thenReturn(query);
@ -155,7 +159,6 @@ public abstract class AggregatorTestCase extends ESTestCase {
QueryShardContext queryShardContext = queryShardContextMock(mapperService); QueryShardContext queryShardContext = queryShardContextMock(mapperService);
when(queryShardContext.getIndexSettings()).thenReturn(indexSettings); when(queryShardContext.getIndexSettings()).thenReturn(indexSettings);
when(searchContext.getQueryShardContext()).thenReturn(queryShardContext); when(searchContext.getQueryShardContext()).thenReturn(queryShardContext);
Map<String, MappedFieldType> fieldNameToType = new HashMap<>(); Map<String, MappedFieldType> fieldNameToType = new HashMap<>();
fieldNameToType.putAll(Arrays.stream(fieldTypes) fieldNameToType.putAll(Arrays.stream(fieldTypes)
.collect(Collectors.toMap(MappedFieldType::name, Function.identity()))); .collect(Collectors.toMap(MappedFieldType::name, Function.identity())));