percolator: Removed percolator cache

Before 5.0 for it was required that the percolator queries were cached in jvm heap as Lucene queries for two reasons:
1) Performance. The percolator evaluated all percolator queries all the time. There was no pre-selecting queries that are likely to match like we have today.
2) Updates made to percolator queries were visible in realtime, Today these changes are visible in near realtime. So updating no longer requires the percolator to have the queries in jvm heap.

So having the percolator queries in jvm heap via the percolator cache is now less attractive. Especially when there are many percolator queries then these queries can consume many GBs of jvm heap.
Removing the percolator cache does make the percolate query slower compared to how the execution time in 5.0.0-alpha1 and alpha2, but it is still faster compared to 2.x and before.
This commit is contained in:
Martijn van Groningen 2016-04-29 16:23:01 +02:00
parent dafa78ec63
commit 80fee8666f
44 changed files with 198 additions and 1031 deletions
core/src
docs/reference
migration/migrate_5_0
query-dsl
modules/lang-mustache/src/test/java/org/elasticsearch/messy/tests
rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards
test/framework/src
main/java/org/elasticsearch/test
test/java/org/elasticsearch/search

@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionStats;
@ -45,7 +44,6 @@ public class ClusterStatsIndices implements ToXContent {
private QueryCacheStats queryCache; private QueryCacheStats queryCache;
private CompletionStats completion; private CompletionStats completion;
private SegmentsStats segments; private SegmentsStats segments;
private PercolatorQueryCacheStats percolatorCache;
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) { public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) {
ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>(); ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>();
@ -56,7 +54,6 @@ public class ClusterStatsIndices implements ToXContent {
this.queryCache = new QueryCacheStats(); this.queryCache = new QueryCacheStats();
this.completion = new CompletionStats(); this.completion = new CompletionStats();
this.segments = new SegmentsStats(); this.segments = new SegmentsStats();
this.percolatorCache = new PercolatorQueryCacheStats();
for (ClusterStatsNodeResponse r : nodeResponses) { for (ClusterStatsNodeResponse r : nodeResponses) {
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) { for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
@ -79,7 +76,6 @@ public class ClusterStatsIndices implements ToXContent {
queryCache.add(shardCommonStats.queryCache); queryCache.add(shardCommonStats.queryCache);
completion.add(shardCommonStats.completion); completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments); segments.add(shardCommonStats.segments);
percolatorCache.add(shardCommonStats.percolatorCache);
} }
} }
@ -122,10 +118,6 @@ public class ClusterStatsIndices implements ToXContent {
return segments; return segments;
} }
public PercolatorQueryCacheStats getPercolatorCache() {
return percolatorCache;
}
static final class Fields { static final class Fields {
static final String COUNT = "count"; static final String COUNT = "count";
} }
@ -140,7 +132,6 @@ public class ClusterStatsIndices implements ToXContent {
queryCache.toXContent(builder, params); queryCache.toXContent(builder, params);
completion.toXContent(builder, params); completion.toXContent(builder, params);
segments.toXContent(builder, params); segments.toXContent(builder, params);
percolatorCache.toXContent(builder, params);
return builder; return builder;
} }

@ -55,8 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> { TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store, private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments);
CommonStatsFlags.Flag.PercolatorCache);
private final NodeService nodeService; private final NodeService nodeService;
private final IndicesService indicesService; private final IndicesService indicesService;
@ -100,7 +99,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) { if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
// only report on fully started shards // only report on fully started shards
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats())); shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
} }
} }
} }

@ -32,10 +32,8 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
@ -101,9 +99,6 @@ public class CommonStats implements Streamable, ToXContent {
case Segments: case Segments:
segments = new SegmentsStats(); segments = new SegmentsStats();
break; break;
case PercolatorCache:
percolatorCache = new PercolatorQueryCacheStats();
break;
case Translog: case Translog:
translog = new TranslogStats(); translog = new TranslogStats();
break; break;
@ -123,8 +118,7 @@ public class CommonStats implements Streamable, ToXContent {
} }
public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache, public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
IndexShard indexShard, CommonStatsFlags flags) {
CommonStatsFlags.Flag[] setFlags = flags.getFlags(); CommonStatsFlags.Flag[] setFlags = flags.getFlags();
@ -169,9 +163,6 @@ public class CommonStats implements Streamable, ToXContent {
case Segments: case Segments:
segments = indexShard.segmentStats(flags.includeSegmentFileSizes()); segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
break; break;
case PercolatorCache:
percolatorCache = percolatorQueryCache.getStats(indexShard.shardId());
break;
case Translog: case Translog:
translog = indexShard.translogStats(); translog = indexShard.translogStats();
break; break;
@ -223,9 +214,6 @@ public class CommonStats implements Streamable, ToXContent {
@Nullable @Nullable
public FieldDataStats fieldData; public FieldDataStats fieldData;
@Nullable
public PercolatorQueryCacheStats percolatorCache;
@Nullable @Nullable
public CompletionStats completion; public CompletionStats completion;
@ -331,14 +319,6 @@ public class CommonStats implements Streamable, ToXContent {
} else { } else {
fieldData.add(stats.getFieldData()); fieldData.add(stats.getFieldData());
} }
if (percolatorCache == null) {
if (stats.getPercolatorCache() != null) {
percolatorCache = new PercolatorQueryCacheStats();
percolatorCache.add(stats.getPercolatorCache());
}
} else {
percolatorCache.add(stats.getPercolatorCache());
}
if (completion == null) { if (completion == null) {
if (stats.getCompletion() != null) { if (stats.getCompletion() != null) {
completion = new CompletionStats(); completion = new CompletionStats();
@ -436,11 +416,6 @@ public class CommonStats implements Streamable, ToXContent {
return this.fieldData; return this.fieldData;
} }
@Nullable
public PercolatorQueryCacheStats getPercolatorCache() {
return percolatorCache;
}
@Nullable @Nullable
public CompletionStats getCompletion() { public CompletionStats getCompletion() {
return completion; return completion;
@ -528,9 +503,6 @@ public class CommonStats implements Streamable, ToXContent {
if (in.readBoolean()) { if (in.readBoolean()) {
fieldData = FieldDataStats.readFieldDataStats(in); fieldData = FieldDataStats.readFieldDataStats(in);
} }
if (in.readBoolean()) {
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
}
if (in.readBoolean()) { if (in.readBoolean()) {
completion = CompletionStats.readCompletionStats(in); completion = CompletionStats.readCompletionStats(in);
} }
@ -610,12 +582,6 @@ public class CommonStats implements Streamable, ToXContent {
out.writeBoolean(true); out.writeBoolean(true);
fieldData.writeTo(out); fieldData.writeTo(out);
} }
if (percolatorCache == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
percolatorCache.writeTo(out);
}
if (completion == null) { if (completion == null) {
out.writeBoolean(false); out.writeBoolean(false);
} else { } else {
@ -669,9 +635,6 @@ public class CommonStats implements Streamable, ToXContent {
if (fieldData != null) { if (fieldData != null) {
fieldData.toXContent(builder, params); fieldData.toXContent(builder, params);
} }
if (percolatorCache != null) {
percolatorCache.toXContent(builder, params);
}
if (completion != null) { if (completion != null) {
completion.toXContent(builder, params); completion.toXContent(builder, params);
} }

@ -240,7 +240,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
FieldData("fielddata"), FieldData("fielddata"),
Docs("docs"), Docs("docs"),
Warmer("warmer"), Warmer("warmer"),
PercolatorCache("percolator_cache"),
Completion("completion"), Completion("completion"),
Segments("segments"), Segments("segments"),
Translog("translog"), Translog("translog"),

@ -184,15 +184,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
return flags.isSet(Flag.FieldData); return flags.isSet(Flag.FieldData);
} }
public IndicesStatsRequest percolate(boolean percolate) {
flags.set(Flag.PercolatorCache, percolate);
return this;
}
public boolean percolate() {
return flags.isSet(Flag.PercolatorCache);
}
public IndicesStatsRequest segments(boolean segments) { public IndicesStatsRequest segments(boolean segments) {
flags.set(Flag.Segments, segments); flags.set(Flag.Segments, segments);
return this; return this;

@ -127,11 +127,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
return this; return this;
} }
public IndicesStatsRequestBuilder setPercolate(boolean percolate) {
request.percolate(percolate);
return this;
}
public IndicesStatsRequestBuilder setSegments(boolean segments) { public IndicesStatsRequestBuilder setSegments(boolean segments) {
request.segments(segments); request.segments(segments);
return this; return this;

@ -139,9 +139,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.set(CommonStatsFlags.Flag.FieldData); flags.set(CommonStatsFlags.Flag.FieldData);
flags.fieldDataFields(request.fieldDataFields()); flags.fieldDataFields(request.fieldDataFields());
} }
if (request.percolate()) {
flags.set(CommonStatsFlags.Flag.PercolatorCache);
}
if (request.segments()) { if (request.segments()) {
flags.set(CommonStatsFlags.Flag.Segments); flags.set(CommonStatsFlags.Flag.Segments);
flags.includeSegmentFileSizes(request.includeSegmentFileSizes()); flags.includeSegmentFileSizes(request.includeSegmentFileSizes());
@ -163,6 +160,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.set(CommonStatsFlags.Flag.Recovery); flags.set(CommonStatsFlags.Flag.Recovery);
} }
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()); return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
} }
} }

@ -35,7 +35,7 @@ import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueryCache; import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
@ -126,7 +126,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
FieldMapper.IGNORE_MALFORMED_SETTING, FieldMapper.IGNORE_MALFORMED_SETTING,
FieldMapper.COERCE_SETTING, FieldMapper.COERCE_SETTING,
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING, Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING, PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING,
MapperService.INDEX_MAPPER_DYNAMIC_SETTING, MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,

@ -50,7 +50,6 @@ import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexEventListener; import org.elasticsearch.index.shard.IndexEventListener;
@ -151,11 +150,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
this.indexStore = indexStore; this.indexStore = indexStore;
indexFieldData.setListener(new FieldDataCacheListener(this)); indexFieldData.setListener(new FieldDataCacheListener(this));
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext);
this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool, this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool,
bitsetFilterCache.createListener(threadPool), bitsetFilterCache.createListener(threadPool));
percolatorQueryCache.createListener(threadPool)); this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache);
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache);
this.engineFactory = engineFactory; this.engineFactory = engineFactory;
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
this.searcherWrapper = wrapperFactory.newWrapper(this); this.searcherWrapper = wrapperFactory.newWrapper(this);
@ -239,8 +236,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
} }
} }
} finally { } finally {
IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask, IOUtils.close(bitsetFilterCache, indexCache, indexFieldData, analysisService, refreshTask, fsyncTask);
cache().getPercolatorQueryCache());
} }
} }
} }
@ -443,7 +439,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
return new QueryShardContext( return new QueryShardContext(
indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(), indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(),
similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(), similarityService(), nodeServicesProvider.getScriptService(), nodeServicesProvider.getIndicesQueriesRegistry(),
nodeServicesProvider.getClient(), indexCache.getPercolatorQueryCache(), indexReader, nodeServicesProvider.getClient(), indexReader,
nodeServicesProvider.getClusterService().state() nodeServicesProvider.getClusterService().state()
); );
} }

@ -24,7 +24,6 @@ import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -36,14 +35,11 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
private final QueryCache queryCache; private final QueryCache queryCache;
private final BitsetFilterCache bitsetFilterCache; private final BitsetFilterCache bitsetFilterCache;
private final PercolatorQueryCache percolatorQueryCache;
public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache, public IndexCache(IndexSettings indexSettings, QueryCache queryCache, BitsetFilterCache bitsetFilterCache) {
PercolatorQueryCache percolatorQueryCache) {
super(indexSettings); super(indexSettings);
this.queryCache = queryCache; this.queryCache = queryCache;
this.bitsetFilterCache = bitsetFilterCache; this.bitsetFilterCache = bitsetFilterCache;
this.percolatorQueryCache = percolatorQueryCache;
} }
public QueryCache query() { public QueryCache query() {
@ -57,13 +53,9 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
return bitsetFilterCache; return bitsetFilterCache;
} }
public PercolatorQueryCache getPercolatorQueryCache() {
return percolatorQueryCache;
}
@Override @Override
public void close() throws IOException { public void close() throws IOException {
IOUtils.close(queryCache, bitsetFilterCache, percolatorQueryCache); IOUtils.close(queryCache, bitsetFilterCache);
} }
public void clear(String reason) { public void clear(String reason) {

@ -23,11 +23,13 @@ import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -36,6 +38,7 @@ import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BinaryFieldMapper;
import org.elasticsearch.index.mapper.core.KeywordFieldMapper; import org.elasticsearch.index.mapper.core.KeywordFieldMapper;
import org.elasticsearch.index.query.PercolateQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -50,14 +53,17 @@ import java.util.Map;
public class PercolatorFieldMapper extends FieldMapper { public class PercolatorFieldMapper extends FieldMapper {
public final static XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE;
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope);
@Deprecated @Deprecated
public static final String LEGACY_TYPE_NAME = ".percolator"; public static final String LEGACY_TYPE_NAME = ".percolator";
public static final String CONTENT_TYPE = "percolator"; public static final String CONTENT_TYPE = "percolator";
private static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType(); private static final PercolatorFieldType FIELD_TYPE = new PercolatorFieldType();
private static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms"; public static final String EXTRACTED_TERMS_FIELD_NAME = "extracted_terms";
private static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query"; public static final String UNKNOWN_QUERY_FIELD_NAME = "unknown_query";
static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field"; public static final String QUERY_BUILDER_FIELD_NAME = "query_builder_field";
public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> { public static class Builder extends FieldMapper.Builder<Builder, PercolatorFieldMapper> {
@ -172,7 +178,7 @@ public class PercolatorFieldMapper extends FieldMapper {
this.queryTermsField = queryTermsField; this.queryTermsField = queryTermsField;
this.unknownQueryField = unknownQueryField; this.unknownQueryField = unknownQueryField;
this.queryBuilderField = queryBuilderField; this.queryBuilderField = queryBuilderField;
this.mapUnmappedFieldAsString = PercolatorQueryCache.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings); this.mapUnmappedFieldAsString = INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING.get(indexSettings);
} }
@Override @Override
@ -196,7 +202,7 @@ public class PercolatorFieldMapper extends FieldMapper {
// Fetching of terms, shapes and indexed scripts happen during this rewrite: // Fetching of terms, shapes and indexed scripts happen during this rewrite:
queryBuilder = queryBuilder.rewrite(queryShardContext); queryBuilder = queryBuilder.rewrite(queryShardContext);
try (XContentBuilder builder = XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE)) { try (XContentBuilder builder = XContentFactory.contentBuilder(QUERY_BUILDER_CONTENT_TYPE)) {
queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap())); queryBuilder.toXContent(builder, new MapParams(Collections.emptyMap()));
builder.flush(); builder.flush();
byte[] queryBuilderAsBytes = builder.bytes().toBytes(); byte[] queryBuilderAsBytes = builder.bytes().toBytes();

@ -40,6 +40,7 @@ import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SubSearchContext; import org.elasticsearch.search.internal.SubSearchContext;
import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -69,8 +70,8 @@ public class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
} }
List<LeafReaderContext> ctxs = context.searcher().getIndexReader().leaves(); List<LeafReaderContext> ctxs = context.searcher().getIndexReader().leaves();
PercolatorQueryCache queriesRegistry = context.percolatorQueryCache();
IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher(); IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore();
LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0); LeafReaderContext percolatorLeafReaderContext = percolatorIndexSearcher.getIndexReader().leaves().get(0);
FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext();
@ -78,9 +79,14 @@ public class PercolatorHighlightSubFetchPhase implements FetchSubPhase {
createSubSearchContext(context, percolatorLeafReaderContext, percolateQuery.getDocumentSource()); createSubSearchContext(context, percolatorLeafReaderContext, percolateQuery.getDocumentSource());
for (InternalSearchHit hit : hits) { for (InternalSearchHit hit : hits) {
final Query query;
try {
LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs)); LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
int segmentDocId = hit.docId() - ctx.docBase; int segmentDocId = hit.docId() - ctx.docBase;
Query query = queriesRegistry.getQueries(ctx).getQuery(segmentDocId); query = queryStore.getQueries(ctx).getQuery(segmentDocId);
} catch (IOException e) {
throw new RuntimeException(e);
}
if (query != null) { if (query != null) {
subSearchContext.parsedQuery(new ParsedQuery(query)); subSearchContext.parsedQuery(new ParsedQuery(query));
hitContext.reset( hitContext.reset(

@ -1,294 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import com.carrotsearch.hppc.IntObjectHashMap;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.cache.Cache;
import org.elasticsearch.common.cache.CacheBuilder;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.index.IndexWarmer.TerminationHandle;
import org.elasticsearch.index.engine.Engine.Searcher;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.query.PercolateQuery;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.io.IOException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.function.Supplier;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.LEGACY_TYPE_NAME;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.PercolatorFieldType;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.parseQuery;
public final class PercolatorQueryCache extends AbstractIndexComponent
implements Closeable, LeafReader.CoreClosedListener, PercolateQuery.QueryRegistry {
public final static Setting<Boolean> INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING =
Setting.boolSetting("index.percolator.map_unmapped_fields_as_string", false, Setting.Property.IndexScope);
public final static XContentType QUERY_BUILDER_CONTENT_TYPE = XContentType.SMILE;
private final Supplier<QueryShardContext> queryShardContextSupplier;
private final Cache<Object, QueriesLeaf> cache;
private final boolean mapUnmappedFieldsAsString;
public PercolatorQueryCache(IndexSettings indexSettings, Supplier<QueryShardContext> queryShardContextSupplier) {
super(indexSettings);
this.queryShardContextSupplier = queryShardContextSupplier;
cache = CacheBuilder.<Object, QueriesLeaf>builder().build();
this.mapUnmappedFieldsAsString = indexSettings.getValue(INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
}
@Override
public Leaf getQueries(LeafReaderContext ctx) {
QueriesLeaf percolatorQueries = cache.get(ctx.reader().getCoreCacheKey());
if (percolatorQueries == null) {
throw new IllegalStateException("queries not loaded, queries should be have been preloaded during index warming...");
}
return percolatorQueries;
}
public IndexWarmer.Listener createListener(ThreadPool threadPool) {
return new IndexWarmer.Listener() {
final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
@Override
public TerminationHandle warmReader(IndexShard indexShard, Searcher searcher) {
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size());
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
if (cache.get(ctx.reader().getCoreCacheKey()) != null) {
latch.countDown();
continue;
}
executor.execute(() -> {
try {
final long start = System.nanoTime();
QueriesLeaf queries = loadQueries(ctx, indexShard);
cache.put(ctx.reader().getCoreCacheKey(), queries);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace(
"loading percolator queries took [{}]",
TimeValue.timeValueNanos(System.nanoTime() - start)
);
}
} catch (Throwable t) {
indexShard.warmerService().logger().warn("failed to load percolator queries", t);
} finally {
latch.countDown();
}
});
}
return () -> latch.await();
}
};
}
QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException {
Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated();
MapperService mapperService = indexShard.mapperService();
LeafReader leafReader = context.reader();
ShardId shardId = ShardUtils.extractShardId(leafReader);
if (shardId == null) {
throw new IllegalStateException("can't resolve shard id");
}
if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
// percolator cache insanity
String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " +
indexSettings.getIndex();
throw new IllegalStateException(message);
}
IntObjectHashMap<Query> queries = new IntObjectHashMap<>();
boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1);
if (legacyLoading) {
PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME), PostingsEnum.NONE);
if (postings != null) {
LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings.nextDoc()) {
leafReader.document(docId, visitor);
queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source));
visitor.source = null; // reset
}
}
} else {
// Each type can have one percolator field mapper,
// So for each type we check if there is a percolator field mapper
// and parse all the queries for the documents of that type.
IndexSearcher indexSearcher = new IndexSearcher(leafReader);
for (DocumentMapper documentMapper : mapperService.docMappers(false)) {
Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false);
for (FieldMapper fieldMapper : documentMapper.mappers()) {
if (fieldMapper instanceof PercolatorFieldMapper) {
PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType();
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.getQueryBuilderFieldName());
if (binaryDocValues != null) {
// use the same leaf reader context the indexSearcher is using too:
Scorer scorer = queryWeight.scorer(leafReader.getContext());
if (scorer != null) {
DocIdSetIterator iterator = scorer.iterator();
for (int docId = iterator.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
BytesRef qbSource = binaryDocValues.get(docId);
if (qbSource.length > 0) {
queries.put(docId, parseQueryBuilder(docId, qbSource));
}
}
}
}
break;
}
}
}
}
leafReader.addCoreClosedListener(this);
return new QueriesLeaf(shardId, queries);
}
private Query parseQueryBuilder(int docId, BytesRef qbSource) {
XContent xContent = QUERY_BUILDER_CONTENT_TYPE.xContent();
try (XContentParser sourceParser = xContent.createParser(qbSource.bytes, qbSource.offset, qbSource.length)) {
QueryShardContext context = queryShardContextSupplier.get();
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
} catch (IOException e) {
throw new PercolatorException(index(), "failed to parse query builder for document [" + docId + "]", e);
}
}
private Query parseLegacyPercolatorDocument(int docId, BytesReference source) {
try (XContentParser sourceParser = XContentHelper.createParser(source)) {
String currentFieldName = null;
XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchException("failed to parse query [" + docId + "], not starting with OBJECT");
}
while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = sourceParser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
QueryShardContext context = queryShardContextSupplier.get();
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
} else {
sourceParser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
sourceParser.skipChildren();
}
}
} catch (Exception e) {
throw new PercolatorException(index(), "failed to parse query [" + docId + "]", e);
}
return null;
}
public PercolatorQueryCacheStats getStats(ShardId shardId) {
int numberOfQueries = 0;
for (QueriesLeaf queries : cache.values()) {
if (shardId.equals(queries.shardId)) {
numberOfQueries += queries.queries.size();
}
}
return new PercolatorQueryCacheStats(numberOfQueries);
}
@Override
public void onClose(Object cacheKey) throws IOException {
cache.invalidate(cacheKey);
}
@Override
public void close() throws IOException {
cache.invalidateAll();
}
final static class LegacyQueryFieldVisitor extends StoredFieldVisitor {
private BytesArray source;
@Override
public void binaryField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
source = new BytesArray(bytes);
}
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
if (source != null) {
return Status.STOP;
}
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
return Status.YES;
} else {
return Status.NO;
}
}
}
final static class QueriesLeaf implements Leaf {
final ShardId shardId;
final IntObjectHashMap<Query> queries;
QueriesLeaf(ShardId shardId, IntObjectHashMap<Query> queries) {
this.shardId = shardId;
this.queries = queries;
}
@Override
public Query getQuery(int docId) {
return queries.get(docId);
}
}
}

@ -1,89 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Exposes percolator query cache statistics.
*/
public class PercolatorQueryCacheStats implements Streamable, ToXContent {
private long numQueries;
/**
* Noop constructor for serialization purposes.
*/
public PercolatorQueryCacheStats() {
}
PercolatorQueryCacheStats(long numQueries) {
this.numQueries = numQueries;
}
/**
* @return The total number of loaded percolate queries.
*/
public long getNumQueries() {
return numQueries;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.PERCOLATOR);
builder.field(Fields.QUERIES, getNumQueries());
builder.endObject();
return builder;
}
public void add(PercolatorQueryCacheStats percolate) {
if (percolate == null) {
return;
}
numQueries += percolate.getNumQueries();
}
static final class Fields {
static final String PERCOLATOR = "percolator";
static final String QUERIES = "num_queries";
}
public static PercolatorQueryCacheStats readPercolateStats(StreamInput in) throws IOException {
PercolatorQueryCacheStats stats = new PercolatorQueryCacheStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
numQueries = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(numQueries);
}
}

@ -50,7 +50,7 @@ public final class PercolateQuery extends Query implements Accountable {
public static class Builder { public static class Builder {
private final String docType; private final String docType;
private final QueryRegistry queryRegistry; private final QueryStore queryStore;
private final BytesReference documentSource; private final BytesReference documentSource;
private final IndexSearcher percolatorIndexSearcher; private final IndexSearcher percolatorIndexSearcher;
@ -59,15 +59,15 @@ public final class PercolateQuery extends Query implements Accountable {
/** /**
* @param docType The type of the document being percolated * @param docType The type of the document being percolated
* @param queryRegistry The registry holding all the percolator queries as Lucene queries. * @param queryStore The lookup holding all the percolator queries as Lucene queries.
* @param documentSource The source of the document being percolated * @param documentSource The source of the document being percolated
* @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated * @param percolatorIndexSearcher The index searcher on top of the in-memory index that holds the document being percolated
*/ */
public Builder(String docType, QueryRegistry queryRegistry, BytesReference documentSource, IndexSearcher percolatorIndexSearcher) { public Builder(String docType, QueryStore queryStore, BytesReference documentSource, IndexSearcher percolatorIndexSearcher) {
this.docType = Objects.requireNonNull(docType); this.docType = Objects.requireNonNull(docType);
this.documentSource = Objects.requireNonNull(documentSource); this.documentSource = Objects.requireNonNull(documentSource);
this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher); this.percolatorIndexSearcher = Objects.requireNonNull(percolatorIndexSearcher);
this.queryRegistry = Objects.requireNonNull(queryRegistry); this.queryStore = Objects.requireNonNull(queryStore);
} }
/** /**
@ -94,7 +94,6 @@ public final class PercolateQuery extends Query implements Accountable {
if (percolateTypeQuery != null && queriesMetaDataQuery != null) { if (percolateTypeQuery != null && queriesMetaDataQuery != null) {
throw new IllegalStateException("Either filter by deprecated percolator type or by query metadata"); throw new IllegalStateException("Either filter by deprecated percolator type or by query metadata");
} }
// The query that selects which percolator queries will be evaluated by MemoryIndex: // The query that selects which percolator queries will be evaluated by MemoryIndex:
BooleanQuery.Builder builder = new BooleanQuery.Builder(); BooleanQuery.Builder builder = new BooleanQuery.Builder();
if (percolateTypeQuery != null) { if (percolateTypeQuery != null) {
@ -103,24 +102,23 @@ public final class PercolateQuery extends Query implements Accountable {
if (queriesMetaDataQuery != null) { if (queriesMetaDataQuery != null) {
builder.add(queriesMetaDataQuery, FILTER); builder.add(queriesMetaDataQuery, FILTER);
} }
return new PercolateQuery(docType, queryStore, documentSource, builder.build(), percolatorIndexSearcher);
return new PercolateQuery(docType, queryRegistry, documentSource, builder.build(), percolatorIndexSearcher);
} }
} }
private final String documentType; private final String documentType;
private final QueryRegistry queryRegistry; private final QueryStore queryStore;
private final BytesReference documentSource; private final BytesReference documentSource;
private final Query percolatorQueriesQuery; private final Query percolatorQueriesQuery;
private final IndexSearcher percolatorIndexSearcher; private final IndexSearcher percolatorIndexSearcher;
private PercolateQuery(String documentType, QueryRegistry queryRegistry, BytesReference documentSource, private PercolateQuery(String documentType, QueryStore queryStore, BytesReference documentSource,
Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher) { Query percolatorQueriesQuery, IndexSearcher percolatorIndexSearcher) {
this.documentType = documentType; this.documentType = documentType;
this.documentSource = documentSource; this.documentSource = documentSource;
this.percolatorQueriesQuery = percolatorQueriesQuery; this.percolatorQueriesQuery = percolatorQueriesQuery;
this.queryRegistry = queryRegistry; this.queryStore = queryStore;
this.percolatorIndexSearcher = percolatorIndexSearcher; this.percolatorIndexSearcher = percolatorIndexSearcher;
} }
@ -128,7 +126,7 @@ public final class PercolateQuery extends Query implements Accountable {
public Query rewrite(IndexReader reader) throws IOException { public Query rewrite(IndexReader reader) throws IOException {
Query rewritten = percolatorQueriesQuery.rewrite(reader); Query rewritten = percolatorQueriesQuery.rewrite(reader);
if (rewritten != percolatorQueriesQuery) { if (rewritten != percolatorQueriesQuery) {
return new PercolateQuery(documentType, queryRegistry, documentSource, rewritten, percolatorIndexSearcher); return new PercolateQuery(documentType, queryStore, documentSource, rewritten, percolatorIndexSearcher);
} else { } else {
return this; return this;
} }
@ -151,7 +149,7 @@ public final class PercolateQuery extends Query implements Accountable {
if (result == docId) { if (result == docId) {
if (twoPhaseIterator.matches()) { if (twoPhaseIterator.matches()) {
if (needsScores) { if (needsScores) {
QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext); QueryStore.Leaf percolatorQueries = queryStore.getQueries(leafReaderContext);
Query query = percolatorQueries.getQuery(docId); Query query = percolatorQueries.getQuery(docId);
Explanation detail = percolatorIndexSearcher.explain(query, 0); Explanation detail = percolatorIndexSearcher.explain(query, 0);
return Explanation.match(scorer.score(), "PercolateQuery", detail); return Explanation.match(scorer.score(), "PercolateQuery", detail);
@ -181,9 +179,9 @@ public final class PercolateQuery extends Query implements Accountable {
return null; return null;
} }
final QueryRegistry.Leaf percolatorQueries = queryRegistry.getQueries(leafReaderContext); final QueryStore.Leaf queries = queryStore.getQueries(leafReaderContext);
if (needsScores) { if (needsScores) {
return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) { return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
float score; float score;
@ -209,7 +207,7 @@ public final class PercolateQuery extends Query implements Accountable {
} }
}; };
} else { } else {
return new BaseScorer(this, approximation, percolatorQueries, percolatorIndexSearcher) { return new BaseScorer(this, approximation, queries, percolatorIndexSearcher) {
@Override @Override
public float score() throws IOException { public float score() throws IOException {
@ -238,6 +236,10 @@ public final class PercolateQuery extends Query implements Accountable {
return documentSource; return documentSource;
} }
public QueryStore getQueryStore() {
return queryStore;
}
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) return true; if (this == o) return true;
@ -276,13 +278,15 @@ public final class PercolateQuery extends Query implements Accountable {
return sizeInBytes; return sizeInBytes;
} }
public interface QueryRegistry { @FunctionalInterface
public interface QueryStore {
Leaf getQueries(LeafReaderContext ctx); Leaf getQueries(LeafReaderContext ctx) throws IOException;
@FunctionalInterface
interface Leaf { interface Leaf {
Query getQuery(int docId); Query getQuery(int docId) throws IOException;
} }
@ -291,10 +295,10 @@ public final class PercolateQuery extends Query implements Accountable {
static abstract class BaseScorer extends Scorer { static abstract class BaseScorer extends Scorer {
final Scorer approximation; final Scorer approximation;
final QueryRegistry.Leaf percolatorQueries; final QueryStore.Leaf percolatorQueries;
final IndexSearcher percolatorIndexSearcher; final IndexSearcher percolatorIndexSearcher;
BaseScorer(Weight weight, Scorer approximation, QueryRegistry.Leaf percolatorQueries, IndexSearcher percolatorIndexSearcher) { BaseScorer(Weight weight, Scorer approximation, QueryStore.Leaf percolatorQueries, IndexSearcher percolatorIndexSearcher) {
super(weight); super(weight);
this.approximation = approximation; this.approximation = approximation;
this.percolatorQueries = percolatorQueries; this.percolatorQueries = percolatorQueries;

@ -21,10 +21,13 @@ package org.elasticsearch.index.query;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper;
import org.apache.lucene.index.BinaryDocValues;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.MultiReader; import org.apache.lucene.index.MultiReader;
import org.apache.lucene.index.SlowCompositeReaderWrapper; import org.apache.lucene.index.SlowCompositeReaderWrapper;
import org.apache.lucene.index.StoredFieldVisitor;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.index.memory.MemoryIndex; import org.apache.lucene.index.memory.MemoryIndex;
import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause;
@ -33,23 +36,27 @@ import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.Weight; import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.FieldNameAnalyzer; import org.elasticsearch.index.analysis.FieldNameAnalyzer;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.DocumentMapperForType;
@ -57,15 +64,16 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.percolator.PercolatorFieldMapper; import org.elasticsearch.index.percolator.PercolatorFieldMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import static org.elasticsearch.index.mapper.SourceToParse.source; import static org.elasticsearch.index.mapper.SourceToParse.source;
import static org.elasticsearch.index.percolator.PercolatorFieldMapper.parseQuery;
public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBuilder> { public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBuilder> {
@ -388,16 +396,14 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
docSearcher.setQueryCache(null); docSearcher.setQueryCache(null);
} }
PercolatorQueryCache registry = context.getPercolatorQueryCache(); IndexSettings indexSettings = context.getIndexSettings();
if (registry == null) { boolean mapUnmappedFieldsAsString = indexSettings.getValue(PercolatorFieldMapper.INDEX_MAP_UNMAPPED_FIELDS_AS_STRING_SETTING);
throw new QueryShardException(context, "no percolator query registry"); return buildQuery(indexSettings.getIndexVersionCreated(), context, docSearcher, mapUnmappedFieldsAsString);
} }
PercolateQuery.Builder builder = new PercolateQuery.Builder( Query buildQuery(Version indexVersionCreated, QueryShardContext context, IndexSearcher docSearcher,
documentType, registry, document, docSearcher boolean mapUnmappedFieldsAsString) throws IOException {
); if (indexVersionCreated.onOrAfter(Version.V_5_0_0_alpha1)) {
Settings indexSettings = registry.getIndexSettings().getSettings();
if (indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null).onOrAfter(Version.V_5_0_0_alpha1)) {
MappedFieldType fieldType = context.fieldMapper(field); MappedFieldType fieldType = context.fieldMapper(field);
if (fieldType == null) { if (fieldType == null) {
throw new QueryShardException(context, "field [" + field + "] does not exist"); throw new QueryShardException(context, "field [" + field + "] does not exist");
@ -408,13 +414,20 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
"] to be of type [percolator], but is of type [" + fieldType.typeName() + "]"); "] to be of type [percolator], but is of type [" + fieldType.typeName() + "]");
} }
PercolatorFieldMapper.PercolatorFieldType pft = (PercolatorFieldMapper.PercolatorFieldType) fieldType; PercolatorFieldMapper.PercolatorFieldType pft = (PercolatorFieldMapper.PercolatorFieldType) fieldType;
PercolateQuery.Builder builder = new PercolateQuery.Builder(
documentType, createStore(pft, context, mapUnmappedFieldsAsString), document, docSearcher
);
builder.extractQueryTermsQuery(pft.getExtractedTermsField(), pft.getUnknownQueryFieldName()); builder.extractQueryTermsQuery(pft.getExtractedTermsField(), pft.getUnknownQueryFieldName());
return builder.build();
} else { } else {
Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME)); Query percolateTypeQuery = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME));
PercolateQuery.Builder builder = new PercolateQuery.Builder(
documentType, createLegacyStore(context, mapUnmappedFieldsAsString), document, docSearcher
);
builder.setPercolateTypeQuery(percolateTypeQuery); builder.setPercolateTypeQuery(percolateTypeQuery);
}
return builder.build(); return builder.build();
} }
}
public String getField() { public String getField() {
return field; return field;
@ -459,4 +472,91 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
} }
} }
private static PercolateQuery.QueryStore createStore(PercolatorFieldMapper.PercolatorFieldType fieldType,
QueryShardContext context,
boolean mapUnmappedFieldsAsString) {
return ctx -> {
LeafReader leafReader = ctx.reader();
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(fieldType.getQueryBuilderFieldName());
if (binaryDocValues == null) {
return docId -> null;
}
Bits bits = leafReader.getDocsWithField(fieldType.getQueryBuilderFieldName());
return docId -> {
if (bits.get(docId)) {
BytesRef qbSource = binaryDocValues.get(docId);
if (qbSource.length > 0) {
XContent xContent = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent();
try (XContentParser sourceParser = xContent.createParser(qbSource.bytes, qbSource.offset, qbSource.length)) {
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
}
} else {
return null;
}
} else {
return null;
}
};
};
}
private static PercolateQuery.QueryStore createLegacyStore(QueryShardContext context, boolean mapUnmappedFieldsAsString) {
return ctx -> {
LeafReader leafReader = ctx.reader();
return docId -> {
LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor();
leafReader.document(docId, visitor);
if (visitor.source == null) {
throw new IllegalStateException("No source found for document with docid [" + docId + "]");
}
try (XContentParser sourceParser = XContentHelper.createParser(visitor.source)) {
String currentFieldName = null;
XContentParser.Token token = sourceParser.nextToken(); // move the START_OBJECT
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchException("failed to parse query [" + docId + "], not starting with OBJECT");
}
while ((token = sourceParser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = sourceParser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("query".equals(currentFieldName)) {
return parseQuery(context, mapUnmappedFieldsAsString, sourceParser);
} else {
sourceParser.skipChildren();
}
} else if (token == XContentParser.Token.START_ARRAY) {
sourceParser.skipChildren();
}
}
}
return null;
};
};
}
private final static class LegacyQueryFieldVisitor extends StoredFieldVisitor {
private BytesArray source;
@Override
public void binaryField(FieldInfo fieldInfo, byte[] bytes) throws IOException {
source = new BytesArray(bytes);
}
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
if (source != null) {
return Status.STOP;
}
if (SourceFieldMapper.NAME.equals(fieldInfo.name)) {
return Status.YES;
} else {
return Status.NO;
}
}
}
} }

@ -51,7 +51,6 @@ import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.core.TextFieldMapper; import org.elasticsearch.index.mapper.core.TextFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.query.support.NestedScope;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry;
@ -82,7 +81,6 @@ public class QueryShardContext extends QueryRewriteContext {
private final Map<String, Query> namedQueries = new HashMap<>(); private final Map<String, Query> namedQueries = new HashMap<>();
private final MapperQueryParser queryParser = new MapperQueryParser(this); private final MapperQueryParser queryParser = new MapperQueryParser(this);
private final IndicesQueriesRegistry indicesQueriesRegistry; private final IndicesQueriesRegistry indicesQueriesRegistry;
private final PercolatorQueryCache percolatorQueryCache;
private boolean allowUnmappedFields; private boolean allowUnmappedFields;
private boolean mapUnmappedFieldAsString; private boolean mapUnmappedFieldAsString;
private NestedScope nestedScope; private NestedScope nestedScope;
@ -90,7 +88,7 @@ public class QueryShardContext extends QueryRewriteContext {
public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService, public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService,
MapperService mapperService, SimilarityService similarityService, ScriptService scriptService, MapperService mapperService, SimilarityService similarityService, ScriptService scriptService,
final IndicesQueriesRegistry indicesQueriesRegistry, Client client, PercolatorQueryCache percolatorQueryCache, final IndicesQueriesRegistry indicesQueriesRegistry, Client client,
IndexReader reader, ClusterState clusterState) { IndexReader reader, ClusterState clusterState) {
super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState); super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState);
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
@ -100,14 +98,13 @@ public class QueryShardContext extends QueryRewriteContext {
this.indexFieldDataService = indexFieldDataService; this.indexFieldDataService = indexFieldDataService;
this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields(); this.allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields();
this.indicesQueriesRegistry = indicesQueriesRegistry; this.indicesQueriesRegistry = indicesQueriesRegistry;
this.percolatorQueryCache = percolatorQueryCache;
this.nestedScope = new NestedScope(); this.nestedScope = new NestedScope();
} }
public QueryShardContext(QueryShardContext source) { public QueryShardContext(QueryShardContext source) {
this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService, this(source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService,
source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client, source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client,
source.percolatorQueryCache, source.reader, source.clusterState); source.reader, source.clusterState);
this.types = source.getTypes(); this.types = source.getTypes();
} }
@ -123,10 +120,6 @@ public class QueryShardContext extends QueryRewriteContext {
return mapperService.analysisService(); return mapperService.analysisService();
} }
public PercolatorQueryCache getPercolatorQueryCache() {
return percolatorQueryCache;
}
public Similarity getSearchSimilarity() { public Similarity getSearchSimilarity() {
return similarityService != null ? similarityService.similarity(mapperService) : null; return similarityService != null ? similarityService.similarity(mapperService) : null;
} }

@ -280,7 +280,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
if (indexShard.routingEntry() == null) { if (indexShard.routingEntry() == null) {
continue; continue;
} }
IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats()) }); IndexShardStats indexShardStats = new IndexShardStats(indexShard.shardId(), new ShardStats[] { new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesQueryCache, indexShard, flags), indexShard.commitStats()) });
if (!statsByShard.containsKey(indexService.index())) { if (!statsByShard.containsKey(indexService.index())) {
statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats)); statsByShard.put(indexService.index(), arrayAsArrayList(indexShardStats));
} else { } else {

@ -37,7 +37,6 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
@ -102,11 +101,6 @@ public class NodeIndicesStats implements Streamable, ToXContent {
return stats.getSearch(); return stats.getSearch();
} }
@Nullable
public PercolatorQueryCacheStats getPercolate() {
return stats.getPercolatorCache();
}
@Nullable @Nullable
public MergeStats getMerge() { public MergeStats getMerge() {
return stats.getMerge(); return stats.getMerge();

@ -78,7 +78,6 @@ public class RestIndicesStatsAction extends BaseRestHandler {
indicesStatsRequest.flush(metrics.contains("flush")); indicesStatsRequest.flush(metrics.contains("flush"));
indicesStatsRequest.warmer(metrics.contains("warmer")); indicesStatsRequest.warmer(metrics.contains("warmer"));
indicesStatsRequest.queryCache(metrics.contains("query_cache")); indicesStatsRequest.queryCache(metrics.contains("query_cache"));
indicesStatsRequest.percolate(metrics.contains("percolator_cache"));
indicesStatsRequest.segments(metrics.contains("segments")); indicesStatsRequest.segments(metrics.contains("segments"));
indicesStatsRequest.fieldData(metrics.contains("fielddata")); indicesStatsRequest.fieldData(metrics.contains("fielddata"));
indicesStatsRequest.completion(metrics.contains("completion")); indicesStatsRequest.completion(metrics.contains("completion"));

@ -222,9 +222,6 @@ public class RestIndicesAction extends AbstractCatAction {
table.addCell("merges.total_time", "sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); table.addCell("merges.total_time", "sibling:pri;alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges"); table.addCell("pri.merges.total_time", "default:false;text-align:right;desc:time spent in merges");
table.addCell("percolate.queries", "sibling:pri;alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
table.addCell("pri.percolate.queries", "default:false;text-align:right;desc:number of registered percolation queries");
table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.total", "sibling:pri;alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes"); table.addCell("pri.refresh.total", "default:false;text-align:right;desc:total refreshes");
@ -424,9 +421,6 @@ public class RestIndicesAction extends AbstractCatAction {
table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalTime()); table.addCell(indexStats == null ? null : indexStats.getTotal().getMerge().getTotalTime());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalTime()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getMerge().getTotalTime());
table.addCell(indexStats == null ? null : indexStats.getTotal().getPercolatorCache().getNumQueries());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getPercolatorCache().getNumQueries());
table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotal()); table.addCell(indexStats == null ? null : indexStats.getTotal().getRefresh().getTotal());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotal()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRefresh().getTotal());

@ -45,7 +45,6 @@ import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.shard.IndexingStats; import org.elasticsearch.index.shard.IndexingStats;
@ -185,8 +184,6 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
@ -338,9 +335,6 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell(mergeStats == null ? null : mergeStats.getTotalSize()); table.addCell(mergeStats == null ? null : mergeStats.getTotalSize());
table.addCell(mergeStats == null ? null : mergeStats.getTotalTime()); table.addCell(mergeStats == null ? null : mergeStats.getTotalTime());
PercolatorQueryCacheStats percolatorQueryCacheStats = indicesStats == null ? null : indicesStats.getPercolate();
table.addCell(percolatorQueryCacheStats == null ? null : percolatorQueryCacheStats.getNumQueries());
RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh(); RefreshStats refreshStats = indicesStats == null ? null : indicesStats.getRefresh();
table.addCell(refreshStats == null ? null : refreshStats.getTotal()); table.addCell(refreshStats == null ? null : refreshStats.getTotal());
table.addCell(refreshStats == null ? null : refreshStats.getTotalTime()); table.addCell(refreshStats == null ? null : refreshStats.getTotalTime());

@ -139,8 +139,6 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged"); table.addCell("merges.total_size", "alias:mts,mergesTotalSize;default:false;text-align:right;desc:size merged");
table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges"); table.addCell("merges.total_time", "alias:mtt,mergesTotalTime;default:false;text-align:right;desc:time spent in merges");
table.addCell("percolate.queries", "alias:pq,percolateQueries;default:false;text-align:right;desc:number of registered percolation queries");
table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes"); table.addCell("refresh.total", "alias:rto,refreshTotal;default:false;text-align:right;desc:total refreshes");
table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes"); table.addCell("refresh.time", "alias:rti,refreshTime;default:false;text-align:right;desc:time spent in refreshes");
@ -278,8 +276,6 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalSize()); table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalSize());
table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalTime()); table.addCell(commonStats == null ? null : commonStats.getMerge().getTotalTime());
table.addCell(commonStats == null ? null : commonStats.getPercolatorCache().getNumQueries());
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotal());
table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime()); table.addCell(commonStats == null ? null : commonStats.getRefresh().getTotalTime());

@ -48,7 +48,6 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.AbstractQueryBuilder;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -495,11 +494,6 @@ public class DefaultSearchContext extends SearchContext {
return indexService.fieldData(); return indexService.fieldData();
} }
@Override
public PercolatorQueryCache percolatorQueryCache() {
return indexService.cache().getPercolatorQueryCache();
}
@Override @Override
public long timeoutInMillis() { public long timeoutInMillis() {
return timeoutInMillis; return timeoutInMillis;

@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -270,11 +269,6 @@ public abstract class FilteredSearchContext extends SearchContext {
return in.fieldData(); return in.fieldData();
} }
@Override
public PercolatorQueryCache percolatorQueryCache() {
return in.percolatorQueryCache();
}
@Override @Override
public long timeoutInMillis() { public long timeoutInMillis() {
return in.timeoutInMillis(); return in.timeoutInMillis();

@ -38,7 +38,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -231,8 +230,6 @@ public abstract class SearchContext implements Releasable {
public abstract IndexFieldDataService fieldData(); public abstract IndexFieldDataService fieldData();
public abstract PercolatorQueryCache percolatorQueryCache();
public abstract long timeoutInMillis(); public abstract long timeoutInMillis();
public abstract void timeoutInMillis(long timeoutInMillis); public abstract void timeoutInMillis(long timeoutInMillis);

@ -38,7 +38,6 @@ import org.junit.Before;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.percolator.PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
@ -251,7 +250,8 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
} }
private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException { private void assertQueryBuilder(BytesRef actual, QueryBuilder expected) throws IOException {
XContentParser sourceParser = QUERY_BUILDER_CONTENT_TYPE.xContent().createParser(actual.bytes, actual.offset, actual.length); XContentParser sourceParser = PercolatorFieldMapper.QUERY_BUILDER_CONTENT_TYPE.xContent()
.createParser(actual.bytes, actual.offset, actual.length);
QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser); QueryParseContext qsc = indexService.newQueryShardContext().newParseContext(sourceParser);
assertThat(qsc.parseInnerQueryBuilder(), equalTo(expected)); assertThat(qsc.parseInnerQueryBuilder(), equalTo(expected));
} }

@ -1,390 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.analysis.core.SimpleAnalyzer;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StoredField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TieredMergePolicy;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.WildcardQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.IndexWarmer;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.DocumentFieldMappers;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.PercolateQuery;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParser;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.TermQueryBuilder;
import org.elasticsearch.index.query.WildcardQueryBuilder;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.warmer.ShardIndexWarmerService;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Collections;
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class PercolatorQueryCacheTests extends ESTestCase {
private QueryShardContext queryShardContext;
private PercolatorQueryCache cache;
void initialize(Object... fields) throws IOException {
Settings settings = Settings.builder()
.put("node.name", PercolatorQueryCacheTests.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry();
QueryParser<TermQueryBuilder> termParser = TermQueryBuilder::fromXContent;
indicesQueriesRegistry.register(termParser, TermQueryBuilder.QUERY_NAME_FIELD);
QueryParser<WildcardQueryBuilder> wildcardParser = WildcardQueryBuilder::fromXContent;
indicesQueriesRegistry.register(wildcardParser, WildcardQueryBuilder.QUERY_NAME_FIELD);
QueryParser<BoolQueryBuilder> boolQueryParser = BoolQueryBuilder::fromXContent;
indicesQueriesRegistry.register(boolQueryParser, BoolQueryBuilder.QUERY_NAME_FIELD);
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(new Index("_index", ClusterState.UNKNOWN_UUID), indexSettings);
SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
AnalysisService analysisService = new AnalysisRegistry(null, new Environment(settings)).build(idxSettings);
MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry();
MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry,
() -> queryShardContext);
mapperService.merge("type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("type", fields).string()),
MapperService.MergeReason.MAPPING_UPDATE, false);
cache = new PercolatorQueryCache(idxSettings, () -> queryShardContext);
ClusterState state = ClusterState.builder(new ClusterName("_name")).build();
queryShardContext = new QueryShardContext(idxSettings, null, null, mapperService, similarityService, null,
indicesQueriesRegistry, null, cache, null, state);
}
public void testLoadQueries() throws Exception {
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(
directory,
new IndexWriterConfig(new MockAnalyzer(random()))
.setMergePolicy(NoMergePolicy.INSTANCE)
);
boolean legacyFormat = randomBoolean();
Version version = legacyFormat ? Version.V_2_0_0 : Version.CURRENT;
IndexShard indexShard = mockIndexShard(version, legacyFormat);
storeQuery("0", indexWriter, termQuery("field1", "value1"), true, legacyFormat);
storeQuery("1", indexWriter, wildcardQuery("field1", "v*"), true, legacyFormat);
storeQuery("2", indexWriter, boolQuery().must(termQuery("field1", "value1")).must(termQuery("field2", "value2")),
true, legacyFormat);
// dymmy docs should be skipped during loading:
Document doc = new Document();
doc.add(new StringField("dummy", "value", Field.Store.YES));
indexWriter.addDocument(doc);
storeQuery("4", indexWriter, termQuery("field2", "value2"), true, legacyFormat);
// only documents that .percolator type should be loaded:
storeQuery("5", indexWriter, termQuery("field2", "value2"), false, legacyFormat);
storeQuery("6", indexWriter, termQuery("field3", "value3"), true, legacyFormat);
indexWriter.forceMerge(1);
// also include queries for percolator docs marked as deleted:
indexWriter.deleteDocuments(new Term("id", "6"));
indexWriter.close();
ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
assertThat(indexReader.leaves().size(), equalTo(1));
assertThat(indexReader.numDeletedDocs(), equalTo(1));
assertThat(indexReader.maxDoc(), equalTo(7));
initialize("field1", "type=keyword", "field2", "type=keyword", "field3", "type=keyword");
PercolatorQueryCache.QueriesLeaf leaf = cache.loadQueries(indexReader.leaves().get(0), indexShard);
assertThat(leaf.queries.size(), equalTo(5));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("field1", "value1"))));
assertThat(leaf.getQuery(1), equalTo(new WildcardQuery(new Term("field1", "v*"))));
assertThat(leaf.getQuery(2), equalTo(new BooleanQuery.Builder()
.add(new TermQuery(new Term("field1", "value1")), BooleanClause.Occur.MUST)
.add(new TermQuery(new Term("field2", "value2")), BooleanClause.Occur.MUST)
.build()
));
assertThat(leaf.getQuery(4), equalTo(new TermQuery(new Term("field2", "value2"))));
assertThat(leaf.getQuery(6), equalTo(new TermQuery(new Term("field3", "value3"))));
indexReader.close();
directory.close();
}
public void testGetQueries() throws Exception {
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(
directory,
new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE)
);
storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
indexWriter.flush();
storeQuery("3", indexWriter, termQuery("a", "3"), true, false);
storeQuery("4", indexWriter, termQuery("a", "4"), true, false);
storeQuery("5", indexWriter, termQuery("a", "5"), true, false);
indexWriter.flush();
storeQuery("6", indexWriter, termQuery("a", "6"), true, false);
storeQuery("7", indexWriter, termQuery("a", "7"), true, false);
storeQuery("8", indexWriter, termQuery("a", "8"), true, false);
indexWriter.flush();
indexWriter.close();
ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID , 0);
IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
assertThat(indexReader.leaves().size(), equalTo(3));
assertThat(indexReader.maxDoc(), equalTo(9));
initialize("a", "type=keyword");
try {
cache.getQueries(indexReader.leaves().get(0));
fail("IllegalStateException expected");
} catch (IllegalStateException e) {
assertThat(e.getMessage(), equalTo("queries not loaded, queries should be have been preloaded during index warming..."));
}
IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
ThreadPool threadPool = mockThreadPool();
IndexWarmer.Listener listener = cache.createListener(threadPool);
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
PercolatorQueryCacheStats stats = cache.getStats(shardId);
assertThat(stats.getNumQueries(), equalTo(9L));
PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "1"))));
assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "2"))));
leaf = cache.getQueries(indexReader.leaves().get(1));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "3"))));
assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "4"))));
assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "5"))));
leaf = cache.getQueries(indexReader.leaves().get(2));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "6"))));
assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "7"))));
assertThat(leaf.getQuery(2), equalTo(new TermQuery(new Term("a", "8"))));
indexReader.close();
directory.close();
}
public void testInvalidateEntries() throws Exception {
Directory directory = newDirectory();
IndexWriter indexWriter = new IndexWriter(
directory,
new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE)
);
storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
indexWriter.flush();
storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
indexWriter.flush();
storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
indexWriter.flush();
ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
assertThat(indexReader.leaves().size(), equalTo(3));
assertThat(indexReader.maxDoc(), equalTo(3));
initialize("a", "type=keyword");
IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
ThreadPool threadPool = mockThreadPool();
IndexWarmer.Listener listener = cache.createListener(threadPool);
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
assertThat(cache.getStats(shardId).getNumQueries(), equalTo(3L));
PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
leaf = cache.getQueries(indexReader.leaves().get(1));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "1"))));
leaf = cache.getQueries(indexReader.leaves().get(2));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));
// change merge policy, so that merges will actually happen:
indexWriter.getConfig().setMergePolicy(new TieredMergePolicy());
indexWriter.deleteDocuments(new Term("id", "1"));
indexWriter.forceMergeDeletes();
indexReader.close();
indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
assertThat(indexReader.leaves().size(), equalTo(2));
assertThat(indexReader.maxDoc(), equalTo(2));
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));
leaf = cache.getQueries(indexReader.leaves().get(0));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
leaf = cache.getQueries(indexReader.leaves().get(1));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));
indexWriter.forceMerge(1);
indexReader.close();
indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
assertThat(indexReader.leaves().size(), equalTo(1));
assertThat(indexReader.maxDoc(), equalTo(2));
listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));
leaf = cache.getQueries(indexReader.leaves().get(0));
assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "2"))));
indexWriter.close();
indexReader.close();
directory.close();
}
void storeQuery(String id, IndexWriter indexWriter, QueryBuilder queryBuilder, boolean typeField, boolean legacy) throws IOException {
Document doc = new Document();
doc.add(new StringField("id", id, Field.Store.NO));
if (typeField) {
if (legacy) {
doc.add(new StringField(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME, Field.Store.NO));
} else {
doc.add(new StringField(TypeFieldMapper.NAME, "query", Field.Store.NO));
}
}
if (legacy) {
BytesReference percolatorQuery = XContentFactory.jsonBuilder().startObject()
.field("query", queryBuilder)
.endObject().bytes();
doc.add(new StoredField(
SourceFieldMapper.NAME,
percolatorQuery.array(), percolatorQuery.arrayOffset(), percolatorQuery.length())
);
} else {
BytesRef queryBuilderAsBytes = new BytesRef(
XContentFactory.contentBuilder(PercolatorQueryCache.QUERY_BUILDER_CONTENT_TYPE).value(queryBuilder).bytes().toBytes()
);
doc.add(new BinaryDocValuesField(PercolatorFieldMapper.QUERY_BUILDER_FIELD_NAME, queryBuilderAsBytes));
}
indexWriter.addDocument(doc);
}
IndexShard mockIndexShard(Version version, boolean legacyFormat) {
IndexShard indexShard = mock(IndexShard.class);
ShardIndexWarmerService shardIndexWarmerService = mock(ShardIndexWarmerService.class);
when(shardIndexWarmerService.logger()).thenReturn(logger);
when(indexShard.warmerService()).thenReturn(shardIndexWarmerService);
IndexSettings indexSettings = new IndexSettings(
IndexMetaData.builder("_index").settings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_VERSION_CREATED, version)
).build(),
Settings.EMPTY
);
when(indexShard.indexSettings()).thenReturn(indexSettings);
PercolatorFieldMapper.PercolatorFieldType fieldType = mock(PercolatorFieldMapper.PercolatorFieldType.class);
when(fieldType.name()).thenReturn("query");
when(fieldType.getQueryBuilderFieldName()).thenReturn(PercolatorFieldMapper.QUERY_BUILDER_FIELD_NAME);
PercolatorFieldMapper percolatorFieldMapper = mock(PercolatorFieldMapper.class);
when(percolatorFieldMapper.fieldType()).thenReturn(fieldType);
MapperService mapperService = mock(MapperService.class);
DocumentMapper documentMapper = mock(DocumentMapper.class);
if (legacyFormat) {
when(documentMapper.type()).thenReturn(PercolatorFieldMapper.LEGACY_TYPE_NAME);
when(documentMapper.typeFilter())
.thenReturn(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorFieldMapper.LEGACY_TYPE_NAME)));
} else {
when(documentMapper.type()).thenReturn("query");
when(documentMapper.typeFilter()).thenReturn(new TermQuery(new Term(TypeFieldMapper.NAME, "query")));
}
Analyzer analyzer = new SimpleAnalyzer();
DocumentFieldMappers documentFieldMappers =
new DocumentFieldMappers(Collections.singleton(percolatorFieldMapper), analyzer, analyzer, analyzer);
when(documentMapper.mappers()).thenReturn(documentFieldMappers);
when(mapperService.docMappers(false)).thenReturn(Collections.singleton(documentMapper));
when(indexShard.mapperService()).thenReturn(mapperService);
return indexShard;
}
ThreadPool mockThreadPool() {
ThreadPool threadPool = mock(ThreadPool.class);
when(threadPool.executor(anyString())).thenReturn(Runnable::run);
return threadPool;
}
}

@ -76,7 +76,6 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.support.QueryParsers; import org.elasticsearch.index.query.support.QueryParsers;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
@ -190,7 +189,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
private static IndexSettings idxSettings; private static IndexSettings idxSettings;
private static SimilarityService similarityService; private static SimilarityService similarityService;
private static MapperService mapperService; private static MapperService mapperService;
private static PercolatorQueryCache percolatorQueryCache;
private static BitsetFilterCache bitsetFilterCache; private static BitsetFilterCache bitsetFilterCache;
private static ScriptService scriptService; private static ScriptService scriptService;
@ -308,7 +306,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
} }
}); });
percolatorQueryCache = new PercolatorQueryCache(idxSettings, () -> createShardContext());
indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class); indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
//create some random type with some default field, those types will stick around for all of the subclasses //create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)]; currentTypes = new String[randomIntBetween(0, 5)];
@ -349,7 +346,6 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
idxSettings = null; idxSettings = null;
similarityService = null; similarityService = null;
mapperService = null; mapperService = null;
percolatorQueryCache = null;
bitsetFilterCache = null; bitsetFilterCache = null;
scriptService = null; scriptService = null;
} }
@ -750,7 +746,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
ClusterState state = ClusterState.builder(new ClusterName("_name")).build(); ClusterState state = ClusterState.builder(new ClusterName("_name")).build();
Client client = injector.getInstance(Client.class); Client client = injector.getInstance(Client.class);
return new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService, return new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, similarityService,
scriptService, indicesQueriesRegistry, client, percolatorQueryCache, null, state); scriptService, indicesQueriesRegistry, client, null, state);
} }
/** /**

@ -20,7 +20,6 @@
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParseException;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
@ -42,6 +41,7 @@ import org.junit.BeforeClass;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;

@ -84,14 +84,14 @@ public class PercolateQueryTests extends ESTestCase {
private Directory directory; private Directory directory;
private IndexWriter indexWriter; private IndexWriter indexWriter;
private Map<String, Query> queries; private Map<String, Query> queries;
private PercolateQuery.QueryRegistry queryRegistry; private PercolateQuery.QueryStore queryStore;
private DirectoryReader directoryReader; private DirectoryReader directoryReader;
@Before @Before
public void init() throws Exception { public void init() throws Exception {
directory = newDirectory(); directory = newDirectory();
queries = new HashMap<>(); queries = new HashMap<>();
queryRegistry = ctx -> docId -> { queryStore = ctx -> docId -> {
try { try {
String val = ctx.reader().document(docId).get(UidFieldMapper.NAME); String val = ctx.reader().document(docId).get(UidFieldMapper.NAME);
return queries.get(Uid.createUid(val).id()); return queries.get(Uid.createUid(val).id());
@ -145,7 +145,7 @@ public class PercolateQueryTests extends ESTestCase {
PercolateQuery.Builder builder = new PercolateQuery.Builder( PercolateQuery.Builder builder = new PercolateQuery.Builder(
"docType", "docType",
queryRegistry, queryStore,
new BytesArray("{}"), new BytesArray("{}"),
percolateSearcher percolateSearcher
); );
@ -219,7 +219,7 @@ public class PercolateQueryTests extends ESTestCase {
PercolateQuery.Builder builder = new PercolateQuery.Builder( PercolateQuery.Builder builder = new PercolateQuery.Builder(
"docType", "docType",
queryRegistry, queryStore,
new BytesArray("{}"), new BytesArray("{}"),
percolateSearcher percolateSearcher
); );
@ -336,7 +336,7 @@ public class PercolateQueryTests extends ESTestCase {
IndexSearcher percolateSearcher = memoryIndex.createSearcher(); IndexSearcher percolateSearcher = memoryIndex.createSearcher();
PercolateQuery.Builder builder1 = new PercolateQuery.Builder( PercolateQuery.Builder builder1 = new PercolateQuery.Builder(
"docType", "docType",
queryRegistry, queryStore,
new BytesArray("{}"), new BytesArray("{}"),
percolateSearcher percolateSearcher
); );
@ -346,7 +346,7 @@ public class PercolateQueryTests extends ESTestCase {
PercolateQuery.Builder builder2 = new PercolateQuery.Builder( PercolateQuery.Builder builder2 = new PercolateQuery.Builder(
"docType", "docType",
queryRegistry, queryStore,
new BytesArray("{}"), new BytesArray("{}"),
percolateSearcher percolateSearcher
); );

@ -47,7 +47,7 @@ public class QueryShardContextTests extends ESTestCase {
MapperService mapperService = mock(MapperService.class); MapperService mapperService = mock(MapperService.class);
when(mapperService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.getIndexSettings()).thenReturn(indexSettings);
QueryShardContext context = new QueryShardContext( QueryShardContext context = new QueryShardContext(
indexSettings, null, null, mapperService, null, null, null, null, null, null, null indexSettings, null, null, mapperService, null, null, null, null, null, null
); );
context.setAllowUnmappedFields(false); context.setAllowUnmappedFields(false);

@ -644,7 +644,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test")); IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0); IndexShard shard = test.getShardOrNull(0);
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), test.cache().getPercolatorQueryCache(), shard, new CommonStatsFlags()), shard.commitStats()); ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), shard, new CommonStatsFlags()), shard.commitStats());
assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath());
assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath());
assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath()); assertEquals(shard.shardPath().isCustomDataPath(), stats.isCustomDataPath());

@ -670,7 +670,7 @@ public class IndexStatsIT extends ESIntegTestCase {
public void testFlagOrdinalOrder() { public void testFlagOrdinalOrder() {
Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh, Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.PercolatorCache, Flag.Completion, Flag.Segments, Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Completion, Flag.Segments,
Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery}; Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery};
assertThat(flags.length, equalTo(Flag.values().length)); assertThat(flags.length, equalTo(Flag.values().length));
@ -913,9 +913,6 @@ public class IndexStatsIT extends ESIntegTestCase {
case Warmer: case Warmer:
builder.setWarmer(set); builder.setWarmer(set);
break; break;
case PercolatorCache:
builder.setPercolate(set);
break;
case Completion: case Completion:
builder.setCompletion(set); builder.setCompletion(set);
break; break;
@ -963,8 +960,6 @@ public class IndexStatsIT extends ESIntegTestCase {
return response.getStore() != null; return response.getStore() != null;
case Warmer: case Warmer:
return response.getWarmer() != null; return response.getWarmer() != null;
case PercolatorCache:
return response.getPercolatorCache() != null;
case Completion: case Completion:
return response.getCompletion() != null; return response.getCompletion() != null;
case Segments: case Segments:

@ -514,38 +514,6 @@ public class PercolatorIT extends ESIntegTestCase {
assertThat(percolate.getMatches(), emptyArray()); assertThat(percolate.getMatches(), emptyArray());
} }
public void testPercolateStatistics() throws Exception {
client().admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator")
.get();
client().admin().indices().prepareCreate("test2")
.addMapping(TYPE_NAME, "query", "type=percolator")
.get();
ensureGreen();
logger.info("--> register a query");
client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
client().prepareIndex("test2", TYPE_NAME, "1")
.setSource(jsonBuilder().startObject().field("query", matchAllQuery()).endObject())
.execute().actionGet();
refresh();
logger.info("--> First percolate request");
PercolateResponse response = client().preparePercolate()
.setIndices(INDEX_NAME).setDocumentType("type")
.setSource(jsonBuilder().startObject().startObject("doc").field("field", "val").endObject().endObject())
.execute().actionGet();
assertMatchCount(response, 1L);
assertThat(convertFromTextArray(response.getMatches(), INDEX_NAME), arrayContaining("1"));
NumShards numShards = getNumShards(INDEX_NAME);
IndicesStatsResponse indicesResponse = client().admin().indices().prepareStats(INDEX_NAME).execute().actionGet();
assertThat(indicesResponse.getTotal().getPercolatorCache().getNumQueries(), equalTo((long)numShards.dataCopies)); // number of copies
}
public void testPercolatingExistingDocs() throws Exception { public void testPercolatingExistingDocs() throws Exception {
client().admin().indices().prepareCreate(INDEX_NAME) client().admin().indices().prepareCreate(INDEX_NAME)
.addMapping(TYPE_NAME, "query", "type=percolator") .addMapping(TYPE_NAME, "query", "type=percolator")

@ -289,7 +289,7 @@ public class HighlightBuilderTests extends ESTestCase {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings);
// shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter // shard context will only need indicesQueriesRegistry for building Query objects nested in highlighter
QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry,
null, null, null, null) { null, null, null) {
@Override @Override
public MappedFieldType fieldMapper(String name) { public MappedFieldType fieldMapper(String name) {
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);

@ -158,7 +158,7 @@ public class QueryRescoreBuilderTests extends ESTestCase {
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings(randomAsciiOfLengthBetween(1, 10), indexSettings);
// shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer // shard context will only need indicesQueriesRegistry for building Query objects nested in query rescorer
QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry, QueryShardContext mockShardContext = new QueryShardContext(idxSettings, null, null, null, null, null, indicesQueriesRegistry,
null, null, null, null) { null, null, null) {
@Override @Override
public MappedFieldType fieldMapper(String name) { public MappedFieldType fieldMapper(String name) {
TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name); TextFieldMapper.Builder builder = new TextFieldMapper.Builder(name);

@ -232,7 +232,7 @@ public abstract class AbstractSortTestCase<T extends SortBuilder<T>> extends EST
} }
}); });
return new QueryShardContext(idxSettings, bitsetFilterCache, ifds, null, null, scriptService, return new QueryShardContext(idxSettings, bitsetFilterCache, ifds, null, null, scriptService,
indicesQueriesRegistry, null, null, null, null) { indicesQueriesRegistry, null, null, null) {
@Override @Override
public MappedFieldType fieldMapper(String name) { public MappedFieldType fieldMapper(String name) {
return provideMappedFieldType(name); return provideMappedFieldType(name);

@ -46,4 +46,4 @@ the existing document.
==== Percolate Stats ==== Percolate Stats
Percolate stats have been replaced with `percolate` query cache stats in nodes stats and cluster stats APIs. The percolate stats have been removed. This is because the percolator no longer caches the percolator queries.

@ -327,13 +327,10 @@ the document defined in the `percolate` query.
==== How it Works Under the Hood ==== How it Works Under the Hood
When indexing a document into an index that has the <<percolator,percolator field type>> mapping configured, the query When indexing a document into an index that has the <<percolator,percolator field type>> mapping configured, the query
part of the documents gets parsed into a Lucene query and is kept in memory until that percolator document is removed. part of the documents gets parsed into a Lucene query and are stored into the Lucene index. A binary representation
So, all the active percolator queries are kept in memory. of the query gets stored, but also the query's terms are analyzed and stored into an indexed field.
At search time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory At search time, the document specified in the request gets parsed into a Lucene document and is stored in a in-memory
temporary Lucene index. This in-memory index can just hold this one document and it is optimized for that. Then all the queries temporary Lucene index. This in-memory index can just hold this one document and it is optimized for that. After this
that are registered to the index that the search request is targeted for, are going to be executed on this single document a special query is build based on the terms in the in-memory index that select candidate percolator queries based on
in-memory index. This happens on each shard the search request needs to execute. their indexed query terms. These queries are then evaluated by the in-memory index if they actually match.
By using `routing` or additional queries the amount of percolator queries that need to be executed can be reduced and thus
the time the search API needs to run can be decreased.

@ -153,7 +153,7 @@ public class TemplateQueryParserTests extends ESTestCase {
}); });
IndicesQueriesRegistry indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class); IndicesQueriesRegistry indicesQueriesRegistry = injector.getInstance(IndicesQueriesRegistry.class);
contextFactory = () -> new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService, contextFactory = () -> new QueryShardContext(idxSettings, bitsetFilterCache, indexFieldDataService, mapperService,
similarityService, scriptService, indicesQueriesRegistry, proxy, null, null, null); similarityService, scriptService, indicesQueriesRegistry, proxy, null, null);
} }
@Override @Override

@ -48,7 +48,6 @@
merges.total_docs .+ \n merges.total_docs .+ \n
merges.total_size .+ \n merges.total_size .+ \n
merges.total_time .+ \n merges.total_time .+ \n
percolate.queries .+ \n
refresh.total .+ \n refresh.total .+ \n
refresh.time .+ \n refresh.time .+ \n
search.fetch_current .+ \n search.fetch_current .+ \n

@ -38,7 +38,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -73,7 +72,6 @@ public class TestSearchContext extends SearchContext {
final IndexService indexService; final IndexService indexService;
final IndexFieldDataService indexFieldDataService; final IndexFieldDataService indexFieldDataService;
final BitsetFilterCache fixedBitSetFilterCache; final BitsetFilterCache fixedBitSetFilterCache;
final PercolatorQueryCache percolatorQueryCache;
final ThreadPool threadPool; final ThreadPool threadPool;
final Map<Class<?>, Collector> queryCollectors = new HashMap<>(); final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
final IndexShard indexShard; final IndexShard indexShard;
@ -101,7 +99,6 @@ public class TestSearchContext extends SearchContext {
this.indexService = indexService; this.indexService = indexService;
this.indexFieldDataService = indexService.fieldData(); this.indexFieldDataService = indexService.fieldData();
this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache(); this.fixedBitSetFilterCache = indexService.cache().bitsetFilterCache();
this.percolatorQueryCache = indexService.cache().getPercolatorQueryCache();
this.threadPool = threadPool; this.threadPool = threadPool;
this.indexShard = indexService.getShardOrNull(0); this.indexShard = indexService.getShardOrNull(0);
this.scriptService = scriptService; this.scriptService = scriptService;
@ -116,7 +113,6 @@ public class TestSearchContext extends SearchContext {
this.indexFieldDataService = null; this.indexFieldDataService = null;
this.threadPool = null; this.threadPool = null;
this.fixedBitSetFilterCache = null; this.fixedBitSetFilterCache = null;
this.percolatorQueryCache = null;
this.indexShard = null; this.indexShard = null;
scriptService = null; scriptService = null;
this.queryShardContext = queryShardContext; this.queryShardContext = queryShardContext;
@ -323,11 +319,6 @@ public class TestSearchContext extends SearchContext {
return indexFieldDataService; return indexFieldDataService;
} }
@Override
public PercolatorQueryCache percolatorQueryCache() {
return percolatorQueryCache;
}
@Override @Override
public long timeoutInMillis() { public long timeoutInMillis() {
return 0; return 0;

@ -34,7 +34,7 @@ import org.elasticsearch.test.TestSearchContext;
public class MockSearchServiceTests extends ESTestCase { public class MockSearchServiceTests extends ESTestCase {
public void testAssertNoInFlightContext() { public void testAssertNoInFlightContext() {
SearchContext s = new TestSearchContext(new QueryShardContext(new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null, SearchContext s = new TestSearchContext(new QueryShardContext(new IndexSettings(IndexMetaData.PROTO, Settings.EMPTY), null, null,
null, null, null, null, null, null, null, null)) { null, null, null, null, null, null, null)) {
@Override @Override
public SearchShardTarget shardTarget() { public SearchShardTarget shardTarget() {
return new SearchShardTarget("node", new Index("idx", "ignored"), 0); return new SearchShardTarget("node", new Index("idx", "ignored"), 0);