diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index f2a1dd6e587..2a36a7833cd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.index.cache.filter.FilterCacheStats; +import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.percolator.stats.PercolateStats; @@ -46,7 +46,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { private DocsStats docs; private StoreStats store; private FieldDataStats fieldData; - private FilterCacheStats filterCache; + private QueryCacheStats queryCache; private CompletionStats completion; private SegmentsStats segments; private PercolateStats percolate; @@ -60,7 +60,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { this.docs = new DocsStats(); this.store = new StoreStats(); this.fieldData = new FieldDataStats(); - this.filterCache = new FilterCacheStats(); + this.queryCache = new QueryCacheStats(); this.completion = new CompletionStats(); this.segments = new SegmentsStats(); this.percolate = new PercolateStats(); @@ -83,7 +83,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { } store.add(shardCommonStats.store); fieldData.add(shardCommonStats.fieldData); - filterCache.add(shardCommonStats.filterCache); + queryCache.add(shardCommonStats.queryCache); completion.add(shardCommonStats.completion); segments.add(shardCommonStats.segments); percolate.add(shardCommonStats.percolate); @@ -117,8 +117,8 @@ public class ClusterStatsIndices implements ToXContent, Streamable { return fieldData; } - public FilterCacheStats getFilterCache() { - return filterCache; + public QueryCacheStats getQueryCache() { + return queryCache; } public CompletionStats getCompletion() { @@ -140,7 +140,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { docs = DocsStats.readDocStats(in); store = StoreStats.readStoreStats(in); fieldData = FieldDataStats.readFieldDataStats(in); - filterCache = FilterCacheStats.readFilterCacheStats(in); + queryCache = QueryCacheStats.readFilterCacheStats(in); completion = CompletionStats.readCompletionStats(in); segments = SegmentsStats.readSegmentsStats(in); percolate = PercolateStats.readPercolateStats(in); @@ -153,7 +153,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { docs.writeTo(out); store.writeTo(out); fieldData.writeTo(out); - filterCache.writeTo(out); + queryCache.writeTo(out); completion.writeTo(out); segments.writeTo(out); percolate.writeTo(out); @@ -176,7 +176,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable { docs.toXContent(builder, params); store.toXContent(builder, params); fieldData.toXContent(builder, params); - filterCache.toXContent(builder, params); + queryCache.toXContent(builder, params); completion.toXContent(builder, params); segments.toXContent(builder, params); percolate.toXContent(builder, params); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 40c1c8b9e74..57b8f3956b3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -55,7 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction { private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store, - CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, + CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Percolate); private final NodeService nodeService; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index cf471ab0c77..a5563d6b872 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -30,10 +30,10 @@ import java.io.IOException; */ public class ClearIndicesCacheRequest extends BroadcastRequest { - private boolean filterCache = false; + private boolean queryCache = false; private boolean fieldDataCache = false; private boolean recycler = false; - private boolean queryCache = false; + private boolean requestCache = false; private String[] fields = null; @@ -44,17 +44,8 @@ public class ClearIndicesCacheRequest extends BroadcastRequest { private final IndicesService indicesService; - private final IndicesQueryCache indicesQueryCache; + private final IndicesRequestCache indicesRequestCache; @Inject public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, - IndicesQueryCache indicesQueryCache, ActionFilters actionFilters) { + IndicesRequestCache indicesQueryCache, ActionFilters actionFilters) { super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, ClearIndicesCacheRequest.class, ShardClearIndicesCacheRequest.class, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; - this.indicesQueryCache = indicesQueryCache; + this.indicesRequestCache = indicesQueryCache; } @Override @@ -100,9 +100,9 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction { return flags.isSet(Flag.Warmer); } - public IndicesStatsRequest filterCache(boolean filterCache) { - flags.set(Flag.FilterCache, filterCache); + public IndicesStatsRequest queryCache(boolean queryCache) { + flags.set(Flag.QueryCache, queryCache); return this; } - public boolean filterCache() { - return flags.isSet(Flag.FilterCache); + public boolean queryCache() { + return flags.isSet(Flag.QueryCache); } public IndicesStatsRequest fieldData(boolean fieldData) { @@ -247,13 +247,13 @@ public class IndicesStatsRequest extends BroadcastRequest { return flags.isSet(Flag.Suggest); } - public IndicesStatsRequest queryCache(boolean queryCache) { - flags.set(Flag.QueryCache, queryCache); + public IndicesStatsRequest requestCache(boolean requestCache) { + flags.set(Flag.RequestCache, requestCache); return this; } - public boolean queryCache() { - return flags.isSet(Flag.QueryCache); + public boolean requestCache() { + return flags.isSet(Flag.RequestCache); } public IndicesStatsRequest recovery(boolean recovery) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java index fb342119d3e..8e78c548e37 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsRequestBuilder.java @@ -112,8 +112,8 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder return this; } - public IndicesStatsRequestBuilder setFilterCache(boolean filterCache) { - request.filterCache(filterCache); + public IndicesStatsRequestBuilder setQueryCache(boolean queryCache) { + request.queryCache(queryCache); return this; } @@ -157,8 +157,8 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder return this; } - public IndicesStatsRequestBuilder setQueryCache(boolean queryCache) { - request.queryCache(queryCache); + public IndicesStatsRequestBuilder setRequestCache(boolean requestCache) { + request.requestCache(requestCache); return this; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index b4b0d6a4435..c6cf00dfbc0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -157,8 +157,8 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction * Index warming allows to run registered search requests to warm up the index before it is available for search. * With the near real time aspect of search, cold data (segments) will be warmed up before they become available for - * search. This includes things such as the filter cache, filesystem cache, and loading field data for fields. + * search. This includes things such as the query cache, filesystem cache, and loading field data for fields. *

* * @see the reference guide for more detailed information about the Indices / Search Warmer */ -package org.elasticsearch.action.admin.indices.warmer; \ No newline at end of file +package org.elasticsearch.action.admin.indices.warmer; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java index 0b11e0bcf5d..6fa22e68a87 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java @@ -132,21 +132,21 @@ public class TransportPutWarmerAction extends TransportMasterNodeAction entries = new ArrayList<>(warmers.entries().size() + 1); for (IndexWarmersMetaData.Entry entry : warmers.entries()) { if (entry.name().equals(request.name())) { found = true; - entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().queryCache(), source)); + entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source)); } else { entries.add(entry); } } if (!found) { logger.info("[{}] put warmer [{}]", index, request.name()); - entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().queryCache(), source)); + entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source)); } else { logger.info("[{}] update warmer [{}]", index, request.name()); } diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 3d4c08aa96f..1b6a4f2e1f5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -76,7 +76,7 @@ public class SearchRequest extends ActionRequest implements Indic private BytesReference source; private BytesReference extraSource; - private Boolean queryCache; + private Boolean requestCache; private Scroll scroll; @@ -103,7 +103,7 @@ public class SearchRequest extends ActionRequest implements Indic this.template = searchRequest.template; this.source = searchRequest.source; this.extraSource = searchRequest.extraSource; - this.queryCache = searchRequest.queryCache; + this.requestCache = searchRequest.requestCache; this.scroll = searchRequest.scroll; this.types = searchRequest.types; this.indicesOptions = searchRequest.indicesOptions; @@ -533,12 +533,12 @@ public class SearchRequest extends ActionRequest implements Indic * will default to the index level setting if query cache is enabled or not). */ public SearchRequest queryCache(Boolean queryCache) { - this.queryCache = queryCache; + this.requestCache = queryCache; return this; } - public Boolean queryCache() { - return this.queryCache; + public Boolean requestCache() { + return this.requestCache; } @Override @@ -568,7 +568,7 @@ public class SearchRequest extends ActionRequest implements Indic if (in.readBoolean()) { template = Template.readTemplate(in); } - queryCache = in.readOptionalBoolean(); + requestCache = in.readOptionalBoolean(); } @Override @@ -602,6 +602,6 @@ public class SearchRequest extends ActionRequest implements Indic template.writeTo(out); } - out.writeOptionalBoolean(queryCache); + out.writeOptionalBoolean(requestCache); } } diff --git a/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java b/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java index 7fa1947a501..16be9cea211 100644 --- a/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java +++ b/core/src/main/java/org/elasticsearch/common/joda/DateMathParser.java @@ -49,7 +49,7 @@ public class DateMathParser { } // Note: we take a callable here for the timestamp in order to be able to figure out - // if it has been used. For instance, the query cache does not cache queries that make + // if it has been used. For instance, the request cache does not cache requests that make // use of `now`. public long parse(String text, Callable now, boolean roundUp, DateTimeZone timeZone) { long time; diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index e23a623add0..560105455ab 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -52,7 +52,7 @@ import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InternalIndicesLifecycle; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.ShardsPluginsModule; @@ -309,7 +309,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() { @Override public void close() throws IOException { - injector.getInstance(IndicesFilterCache.class).onClose(shardId); + injector.getInstance(IndicesQueryCache.class).onClose(shardId); } }), path)); modules.add(new DeletionPolicyModule(indexSettings)); diff --git a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 3b71f735c2e..afb75cf50b5 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.settings.IndexSettings; import java.io.Closeable; @@ -37,24 +37,24 @@ import java.io.IOException; */ public class IndexCache extends AbstractIndexComponent implements Closeable { - private final FilterCache filterCache; - private final QueryCachingPolicy filterCachingPolicy; + private final QueryCache queryCache; + private final QueryCachingPolicy queryCachingPolicy; private final BitsetFilterCache bitsetFilterCache; @Inject - public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) { + public IndexCache(Index index, @IndexSettings Settings indexSettings, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, BitsetFilterCache bitsetFilterCache) { super(index, indexSettings); - this.filterCache = filterCache; - this.filterCachingPolicy = filterCachingPolicy; + this.queryCache = queryCache; + this.queryCachingPolicy = queryCachingPolicy; this.bitsetFilterCache = bitsetFilterCache; } - public FilterCache filter() { - return filterCache; + public QueryCache query() { + return queryCache; } - public QueryCachingPolicy filterPolicy() { - return filterCachingPolicy; + public QueryCachingPolicy queryPolicy() { + return queryCachingPolicy; } /** @@ -66,11 +66,11 @@ public class IndexCache extends AbstractIndexComponent implements Closeable { @Override public void close() throws IOException { - IOUtils.close(filterCache, bitsetFilterCache); + IOUtils.close(queryCache, bitsetFilterCache); } public void clear(String reason) { - filterCache.clear(reason); + queryCache.clear(reason); bitsetFilterCache.clear(reason); } diff --git a/core/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java b/core/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java index 3a0c9fc584e..43ddbf7cef1 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java +++ b/core/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java @@ -22,7 +22,7 @@ package org.elasticsearch.index.cache; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.bitset.BitsetFilterCacheModule; -import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.cache.query.QueryCacheModule; /** * @@ -37,7 +37,7 @@ public class IndexCacheModule extends AbstractModule { @Override protected void configure() { - new FilterCacheModule(settings).configure(binder()); + new QueryCacheModule(settings).configure(binder()); new BitsetFilterCacheModule(settings).configure(binder()); bind(IndexCache.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index 7bf252f235a..2203c94bd5a 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -67,7 +67,7 @@ import java.util.concurrent.Executor; *

* Use this cache with care, only components that require that a filter is to be materialized as a {@link BitDocIdSet} * and require that it should always be around should use this cache, otherwise the - * {@link org.elasticsearch.index.cache.filter.FilterCache} should be used instead. + * {@link org.elasticsearch.index.cache.query.QueryCache} should be used instead. */ public class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener>, Closeable { diff --git a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java deleted file mode 100644 index 948f7e57702..00000000000 --- a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.filter; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; - -import java.io.IOException; - -/** - */ -public class FilterCacheStats implements Streamable, ToXContent { - - long ramBytesUsed; - long hitCount; - long missCount; - long cacheCount; - long cacheSize; - - public FilterCacheStats() { - } - - public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) { - this.ramBytesUsed = ramBytesUsed; - this.hitCount = hitCount; - this.missCount = missCount; - this.cacheCount = cacheCount; - this.cacheSize = cacheSize; - } - - public void add(FilterCacheStats stats) { - ramBytesUsed += stats.ramBytesUsed; - hitCount += stats.hitCount; - missCount += stats.missCount; - cacheCount += stats.cacheCount; - cacheSize += stats.cacheSize; - } - - public long getMemorySizeInBytes() { - return ramBytesUsed; - } - - public ByteSizeValue getMemorySize() { - return new ByteSizeValue(ramBytesUsed); - } - - /** - * The total number of lookups in the cache. - */ - public long getTotalCount() { - return hitCount + missCount; - } - - /** - * The number of successful lookups in the cache. - */ - public long getHitCount() { - return hitCount; - } - - /** - * The number of lookups in the cache that failed to retrieve a {@link DocIdSet}. - */ - public long getMissCount() { - return missCount; - } - - /** - * The number of {@link DocIdSet}s that have been cached. - */ - public long getCacheCount() { - return cacheCount; - } - - /** - * The number of {@link DocIdSet}s that are in the cache. - */ - public long getCacheSize() { - return cacheSize; - } - - /** - * The number of {@link DocIdSet}s that have been evicted from the cache. - */ - public long getEvictions() { - return cacheCount - cacheSize; - } - - public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException { - FilterCacheStats stats = new FilterCacheStats(); - stats.readFrom(in); - return stats; - } - - - @Override - public void readFrom(StreamInput in) throws IOException { - ramBytesUsed = in.readLong(); - hitCount = in.readLong(); - missCount = in.readLong(); - cacheCount = in.readLong(); - cacheSize = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(ramBytesUsed); - out.writeLong(hitCount); - out.writeLong(missCount); - out.writeLong(cacheCount); - out.writeLong(cacheSize); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(Fields.FILTER_CACHE); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); - builder.field(Fields.TOTAL_COUNT, getTotalCount()); - builder.field(Fields.HIT_COUNT, getHitCount()); - builder.field(Fields.MISS_COUNT, getMissCount()); - builder.field(Fields.CACHE_SIZE, getCacheSize()); - builder.field(Fields.CACHE_COUNT, getCacheCount()); - builder.field(Fields.EVICTIONS, getEvictions()); - builder.endObject(); - return builder; - } - - static final class Fields { - static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache"); - static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); - static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); - static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count"); - static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); - static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); - static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size"); - static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count"); - static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java similarity index 88% rename from core/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java rename to core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java index 37c45e3adf7..bee947b54f0 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCache.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.cache.filter; +package org.elasticsearch.index.cache.query; import org.elasticsearch.index.IndexComponent; @@ -26,7 +26,7 @@ import java.io.Closeable; /** * */ -public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { +public interface QueryCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { static class EntriesStats { public final long sizeInBytes; diff --git a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheModule.java similarity index 77% rename from core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java rename to core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheModule.java index 20496e3266b..1c9ef9ba157 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheModule.java @@ -17,38 +17,38 @@ * under the License. */ -package org.elasticsearch.index.cache.filter; +package org.elasticsearch.index.cache.query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.index.IndexFilterCache; +import org.elasticsearch.index.cache.query.index.IndexQueryCache; /** * */ -public class FilterCacheModule extends AbstractModule { +public class QueryCacheModule extends AbstractModule { public static final class FilterCacheSettings { - public static final String FILTER_CACHE_TYPE = "index.cache.filter.type"; + public static final String FILTER_CACHE_TYPE = "index.queries.cache.type"; // for test purposes only - public static final String FILTER_CACHE_EVERYTHING = "index.cache.filter.everything"; + public static final String FILTER_CACHE_EVERYTHING = "index.queries.cache.everything"; } private final Settings settings; - public FilterCacheModule(Settings settings) { + public QueryCacheModule(Settings settings) { this.settings = settings; } @Override protected void configure() { - bind(FilterCache.class) - .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) + bind(QueryCache.class) + .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class, "org.elasticsearch.index.cache.query.", "FilterCache")) .in(Scopes.SINGLETON); - // the filter cache is a node-level thing, however we want the most popular filters + // the query cache is a node-level thing, however we want the most popular queries // to be computed on a per-index basis, that is why we don't use the SINGLETON // scope below if (settings.getAsBoolean(FilterCacheSettings.FILTER_CACHE_EVERYTHING, false)) { diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index f83566e2e2f..7065192747a 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -33,81 +33,130 @@ import java.io.IOException; */ public class QueryCacheStats implements Streamable, ToXContent { - long memorySize; - long evictions; + long ramBytesUsed; long hitCount; long missCount; + long cacheCount; + long cacheSize; public QueryCacheStats() { } - public QueryCacheStats(long memorySize, long evictions, long hitCount, long missCount) { - this.memorySize = memorySize; - this.evictions = evictions; + public QueryCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) { + this.ramBytesUsed = ramBytesUsed; this.hitCount = hitCount; this.missCount = missCount; + this.cacheCount = cacheCount; + this.cacheSize = cacheSize; } public void add(QueryCacheStats stats) { - this.memorySize += stats.memorySize; - this.evictions += stats.evictions; - this.hitCount += stats.hitCount; - this.missCount += stats.missCount; + ramBytesUsed += stats.ramBytesUsed; + hitCount += stats.hitCount; + missCount += stats.missCount; + cacheCount += stats.cacheCount; + cacheSize += stats.cacheSize; } public long getMemorySizeInBytes() { - return this.memorySize; + return ramBytesUsed; } public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySize); + return new ByteSizeValue(ramBytesUsed); } - public long getEvictions() { - return this.evictions; + /** + * The total number of lookups in the cache. + */ + public long getTotalCount() { + return hitCount + missCount; } + /** + * The number of successful lookups in the cache. + */ public long getHitCount() { - return this.hitCount; + return hitCount; } + /** + * The number of lookups in the cache that failed to retrieve a {@link DocIdSet}. + */ public long getMissCount() { - return this.missCount; + return missCount; } + /** + * The number of {@link DocIdSet}s that have been cached. + */ + public long getCacheCount() { + return cacheCount; + } + + /** + * The number of {@link DocIdSet}s that are in the cache. + */ + public long getCacheSize() { + return cacheSize; + } + + /** + * The number of {@link DocIdSet}s that have been evicted from the cache. + */ + public long getEvictions() { + return cacheCount - cacheSize; + } + + public static QueryCacheStats readFilterCacheStats(StreamInput in) throws IOException { + QueryCacheStats stats = new QueryCacheStats(); + stats.readFrom(in); + return stats; + } + + @Override public void readFrom(StreamInput in) throws IOException { - memorySize = in.readVLong(); - evictions = in.readVLong(); - hitCount = in.readVLong(); - missCount = in.readVLong(); + ramBytesUsed = in.readLong(); + hitCount = in.readLong(); + missCount = in.readLong(); + cacheCount = in.readLong(); + cacheSize = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(memorySize); - out.writeVLong(evictions); - out.writeVLong(hitCount); - out.writeVLong(missCount); + out.writeLong(ramBytesUsed); + out.writeLong(hitCount); + out.writeLong(missCount); + out.writeLong(cacheCount); + out.writeLong(cacheSize); } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields.QUERY_CACHE_STATS); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); - builder.field(Fields.EVICTIONS, getEvictions()); + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(Fields.QUERY_CACHE); + builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); + builder.field(Fields.TOTAL_COUNT, getTotalCount()); builder.field(Fields.HIT_COUNT, getHitCount()); builder.field(Fields.MISS_COUNT, getMissCount()); + builder.field(Fields.CACHE_SIZE, getCacheSize()); + builder.field(Fields.CACHE_COUNT, getCacheCount()); + builder.field(Fields.EVICTIONS, getEvictions()); builder.endObject(); return builder; } static final class Fields { - static final XContentBuilderString QUERY_CACHE_STATS = new XContentBuilderString("query_cache"); + static final XContentBuilderString QUERY_CACHE = new XContentBuilderString("query_cache"); static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); - static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); + static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count"); static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); + static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size"); + static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count"); + static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); } + } diff --git a/core/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java similarity index 75% rename from core/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java rename to core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java index 5dfaf4c7799..6a41be5a653 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/index/IndexQueryCache.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.cache.filter.index; +package org.elasticsearch.index.cache.query.index; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Weight; @@ -26,20 +26,20 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; /** - * The index-level filter cache. This class mostly delegates to the node-level - * filter cache: {@link IndicesFilterCache}. + * The index-level query cache. This class mostly delegates to the node-level + * query cache: {@link IndicesQueryCache}. */ -public class IndexFilterCache extends AbstractIndexComponent implements FilterCache { +public class IndexQueryCache extends AbstractIndexComponent implements QueryCache { - final IndicesFilterCache indicesFilterCache; + final IndicesQueryCache indicesFilterCache; @Inject - public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { + public IndexQueryCache(Index index, @IndexSettings Settings indexSettings, IndicesQueryCache indicesFilterCache) { super(index, indexSettings); this.indicesFilterCache = indicesFilterCache; } diff --git a/core/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/query/none/NoneQueryCache.java similarity index 82% rename from core/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java rename to core/src/main/java/org/elasticsearch/index/cache/query/none/NoneQueryCache.java index ded3c207a42..9d88940ff89 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/query/none/NoneQueryCache.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.cache.filter.none; +package org.elasticsearch.index.cache.query.none; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Weight; @@ -25,18 +25,18 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.settings.IndexSettings; /** * */ -public class NoneFilterCache extends AbstractIndexComponent implements FilterCache { +public class NoneQueryCache extends AbstractIndexComponent implements QueryCache { @Inject - public NoneFilterCache(Index index, @IndexSettings Settings indexSettings) { + public NoneQueryCache(Index index, @IndexSettings Settings indexSettings) { super(index, indexSettings); - logger.debug("Using no filter cache"); + logger.debug("Using no query cache"); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java b/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java new file mode 100644 index 00000000000..58bb6a69379 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.cache.request; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentBuilderString; + +import java.io.IOException; + +/** + */ +public class RequestCacheStats implements Streamable, ToXContent { + + long memorySize; + long evictions; + long hitCount; + long missCount; + + public RequestCacheStats() { + } + + public RequestCacheStats(long memorySize, long evictions, long hitCount, long missCount) { + this.memorySize = memorySize; + this.evictions = evictions; + this.hitCount = hitCount; + this.missCount = missCount; + } + + public void add(RequestCacheStats stats) { + this.memorySize += stats.memorySize; + this.evictions += stats.evictions; + this.hitCount += stats.hitCount; + this.missCount += stats.missCount; + } + + public long getMemorySizeInBytes() { + return this.memorySize; + } + + public ByteSizeValue getMemorySize() { + return new ByteSizeValue(memorySize); + } + + public long getEvictions() { + return this.evictions; + } + + public long getHitCount() { + return this.hitCount; + } + + public long getMissCount() { + return this.missCount; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + memorySize = in.readVLong(); + evictions = in.readVLong(); + hitCount = in.readVLong(); + missCount = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(memorySize); + out.writeVLong(evictions); + out.writeVLong(hitCount); + out.writeVLong(missCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(Fields.QUERY_CACHE_STATS); + builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); + builder.field(Fields.EVICTIONS, getEvictions()); + builder.field(Fields.HIT_COUNT, getHitCount()); + builder.field(Fields.MISS_COUNT, getMissCount()); + builder.endObject(); + return builder; + } + + static final class Fields { + static final XContentBuilderString QUERY_CACHE_STATS = new XContentBuilderString("request_cache"); + static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); + static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); + static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); + static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); + static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); + } +} diff --git a/core/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java similarity index 74% rename from core/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java rename to core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java index 0cece67803c..ef82e73dc4d 100644 --- a/core/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java +++ b/core/src/main/java/org/elasticsearch/index/cache/request/ShardRequestCache.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.index.cache.query; +package org.elasticsearch.index.cache.request; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; @@ -28,23 +28,23 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; /** */ -public class ShardQueryCache extends AbstractIndexShardComponent implements RemovalListener { +public class ShardRequestCache extends AbstractIndexShardComponent implements RemovalListener { final CounterMetric evictionsMetric = new CounterMetric(); final CounterMetric totalMetric = new CounterMetric(); final CounterMetric hitCount = new CounterMetric(); final CounterMetric missCount = new CounterMetric(); - public ShardQueryCache(ShardId shardId, @IndexSettings Settings indexSettings) { + public ShardRequestCache(ShardId shardId, @IndexSettings Settings indexSettings) { super(shardId, indexSettings); } - public QueryCacheStats stats() { - return new QueryCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count()); + public RequestCacheStats stats() { + return new RequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count()); } public void onHit() { @@ -55,12 +55,12 @@ public class ShardQueryCache extends AbstractIndexShardComponent implements Remo missCount.inc(); } - public void onCached(IndicesQueryCache.Key key, IndicesQueryCache.Value value) { + public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) { totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed()); } @Override - public void onRemoval(RemovalNotification removalNotification) { + public void onRemoval(RemovalNotification removalNotification) { if (removalNotification.wasEvicted()) { evictionsMetric.inc(); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index be8aa252cf1..576e6dadb0d 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.store.Store; @@ -76,8 +75,8 @@ public final class EngineConfig { private final CodecService codecService; private final Engine.FailedEngineListener failedEngineListener; private final boolean forceNewTranslog; - private final QueryCache filterCache; - private final QueryCachingPolicy filterCachingPolicy; + private final QueryCache queryCache; + private final QueryCachingPolicy queryCachingPolicy; /** * Index setting for index concurrency / number of threadstates in the indexwriter. @@ -144,7 +143,7 @@ public final class EngineConfig { Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, - TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, TranslogConfig translogConfig) { + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig) { this.shardId = shardId; this.indexSettings = indexSettings; this.threadPool = threadPool; @@ -168,8 +167,8 @@ public final class EngineConfig { updateVersionMapSize(); this.translogRecoveryPerformer = translogRecoveryPerformer; this.forceNewTranslog = indexSettings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false); - this.filterCache = filterCache; - this.filterCachingPolicy = filterCachingPolicy; + this.queryCache = queryCache; + this.queryCachingPolicy = queryCachingPolicy; this.translogConfig = translogConfig; } @@ -409,17 +408,17 @@ public final class EngineConfig { } /** - * Return the cache to use for filters. + * Return the cache to use for queries. */ - public QueryCache getFilterCache() { - return filterCache; + public QueryCache getQueryCache() { + return queryCache; } /** - * Return the policy to use when caching filters. + * Return the policy to use when caching queries. */ - public QueryCachingPolicy getFilterCachingPolicy() { - return filterCachingPolicy; + public QueryCachingPolicy getQueryCachingPolicy() { + return queryCachingPolicy; } /** diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java index fa8d9a6a5c1..a09ad622299 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java @@ -41,8 +41,8 @@ public class EngineSearcherFactory extends SearcherFactory { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { IndexSearcher searcher = super.newSearcher(reader, previousReader); - searcher.setQueryCache(engineConfig.getFilterCache()); - searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy()); + searcher.setQueryCache(engineConfig.getQueryCache()); + searcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy()); searcher.setSimilarity(engineConfig.getSimilarity()); return searcher; } diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java index 101f2eff601..4ea00e7b0cf 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; + import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.BooleanClause.Occur; @@ -38,7 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.indices.cache.filter.terms.TermsLookup; +import org.elasticsearch.indices.cache.query.terms.TermsLookup; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; diff --git a/core/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/core/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index 650b1a79dd5..518e86b309d 100644 --- a/core/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/core/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -39,7 +39,7 @@ import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.ttl.IndicesTTLService; /** @@ -110,7 +110,8 @@ public class IndexDynamicSettingsModule extends AbstractModule { indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH); indexDynamicSettings.addDynamicSetting(TranslogConfig.INDEX_TRANSLOG_DURABILITY); indexDynamicSettings.addDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED); - indexDynamicSettings.addDynamicSetting(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, Validator.BOOLEAN); + indexDynamicSettings.addDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN); + indexDynamicSettings.addDynamicSetting(IndicesRequestCache.DEPRECATED_INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN); indexDynamicSettings.addDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 965b7a3dc21..32e695d445e 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.shard; import com.google.common.base.Charsets; import com.google.common.base.Preconditions; + import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.store.AlreadyClosedException; @@ -58,8 +59,8 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.cache.query.ShardQueryCache; +import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; @@ -99,7 +100,7 @@ import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.InternalIndicesLifecycle; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat; import org.elasticsearch.search.suggest.completion.CompletionStats; @@ -134,7 +135,7 @@ public class IndexShard extends AbstractIndexShardComponent { private final ShardSearchStats searchService; private final ShardGetService getService; private final ShardIndexWarmerService shardWarmerService; - private final ShardQueryCache shardQueryCache; + private final ShardRequestCache shardQueryCache; private final ShardFieldData shardFieldData; private final PercolatorQueriesRegistry percolatorQueriesRegistry; private final ShardPercolateService shardPercolateService; @@ -154,7 +155,7 @@ public class IndexShard extends AbstractIndexShardComponent { private final EngineConfig engineConfig; private final TranslogConfig translogConfig; private final MergePolicyConfig mergePolicyConfig; - private final IndicesFilterCache indicesFilterCache; + private final IndicesQueryCache indicesFilterCache; private final StoreRecoveryService storeRecoveryService; private TimeValue refreshInterval; @@ -191,7 +192,7 @@ public class IndexShard extends AbstractIndexShardComponent { @Inject public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, - IndicesFilterCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService, + IndicesQueryCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService, ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService, @Nullable IndicesWarmer warmer, SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService, EngineFactory factory, ClusterService clusterService, ShardPath path, BigArrays bigArrays) { @@ -219,7 +220,7 @@ public class IndexShard extends AbstractIndexShardComponent { this.searchService = new ShardSearchStats(indexSettings); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); this.indicesFilterCache = indicesFilterCache; - this.shardQueryCache = new ShardQueryCache(shardId, indexSettings); + this.shardQueryCache = new ShardRequestCache(shardId, indexSettings); this.shardFieldData = new ShardFieldData(); this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, indicesLifecycle, mapperService, indexFieldDataService, shardPercolateService); this.shardPercolateService = shardPercolateService; @@ -296,7 +297,7 @@ public class IndexShard extends AbstractIndexShardComponent { return this.shardWarmerService; } - public ShardQueryCache queryCache() { + public ShardRequestCache requestCache() { return this.shardQueryCache; } @@ -614,7 +615,7 @@ public class IndexShard extends AbstractIndexShardComponent { return shardWarmerService.stats(); } - public FilterCacheStats filterCacheStats() { + public QueryCacheStats queryCacheStats() { return indicesFilterCache.getStats(shardId); } @@ -1345,7 +1346,7 @@ public class IndexShard extends AbstractIndexShardComponent { }; return new EngineConfig(shardId, threadPool, indexingService, indexSettingsService.indexSettings(), warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, - mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy(), translogConfig); + mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), indexCache.queryPolicy(), translogConfig); } private static class IndexShardOperationCounter extends AbstractRefCounted { diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index 01f5db1e126..6ec1e3f8ad3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -42,7 +42,7 @@ import org.elasticsearch.index.store.Store; import org.elasticsearch.index.termvectors.ShardTermVectorsService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -60,7 +60,7 @@ public final class ShadowIndexShard extends IndexShard { IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, - IndexAliasesService indexAliasesService, IndicesFilterCache indicesFilterCache, + IndexAliasesService indexAliasesService, IndicesQueryCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService, ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService, @Nullable IndicesWarmer warmer, diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 50bbfa61858..4e8946b5339 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices; import com.google.common.collect.ImmutableList; + import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.inject.AbstractModule; @@ -27,8 +28,8 @@ import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.SpawnModules; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.analysis.IndicesAnalysisModule; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener; @@ -71,8 +72,8 @@ public class IndicesModule extends AbstractModule implements SpawnModules { bind(IndicesClusterStateService.class).asEagerSingleton(); bind(IndexingMemoryController.class).asEagerSingleton(); bind(SyncedFlushService.class).asEagerSingleton(); - bind(IndicesFilterCache.class).asEagerSingleton(); bind(IndicesQueryCache.class).asEagerSingleton(); + bind(IndicesRequestCache.class).asEagerSingleton(); bind(IndicesFieldDataCache.class).asEagerSingleton(); bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton(); bind(IndicesTTLService.class).asEagerSingleton(); diff --git a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index e0f286e325b..2c82ad0b27e 100644 --- a/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/core/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -33,8 +33,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.filter.FilterCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; @@ -132,13 +132,13 @@ public class NodeIndicesStats implements Streamable, Serializable, ToXContent { } @Nullable - public FilterCacheStats getFilterCache() { - return stats.getFilterCache(); + public QueryCacheStats getFilterCache() { + return stats.getQueryCache(); } @Nullable - public QueryCacheStats getQueryCache() { - return stats.getQueryCache(); + public RequestCacheStats getQueryCache() { + return stats.getRequestCache(); } @Nullable diff --git a/core/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java b/core/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java deleted file mode 100644 index 0d82a38c031..00000000000 --- a/core/src/main/java/org/elasticsearch/indices/cache/filter/IndicesFilterCache.java +++ /dev/null @@ -1,308 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.cache.filter; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.LRUQueryCache; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCache; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.ShardCoreKeyMap; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.MemorySizeValue; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.shard.ShardId; - -import java.io.Closeable; -import java.io.IOException; -import java.util.HashMap; -import java.util.IdentityHashMap; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -public class IndicesFilterCache extends AbstractComponent implements QueryCache, Closeable { - - public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size"; - public static final String INDICES_CACHE_QUERY_COUNT = "indices.cache.filter.count"; - - private final LRUQueryCache cache; - private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); - private final Map shardStats = new ConcurrentHashMap<>(); - private volatile long sharedRamBytesUsed; - - // This is a hack for the fact that the close listener for the - // ShardCoreKeyMap will be called before onDocIdSetEviction - // See onDocIdSetEviction for more info - private final Map stats2 = new IdentityHashMap<>(); - - @Inject - public IndicesFilterCache(Settings settings) { - super(settings); - final String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE, "10%"); - final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString, INDICES_CACHE_QUERY_SIZE); - final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 1000); - logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], max filter count [{}]", - sizeString, size, count); - cache = new LRUQueryCache(count, size.bytes()) { - - private Stats getStats(Object coreKey) { - final ShardId shardId = shardKeyMap.getShardId(coreKey); - if (shardId == null) { - return null; - } - return shardStats.get(shardId); - } - - private Stats getOrCreateStats(Object coreKey) { - final ShardId shardId = shardKeyMap.getShardId(coreKey); - Stats stats = shardStats.get(shardId); - if (stats == null) { - stats = new Stats(); - shardStats.put(shardId, stats); - } - return stats; - } - - // It's ok to not protect these callbacks by a lock since it is - // done in LRUQueryCache - @Override - protected void onClear() { - assert Thread.holdsLock(this); - super.onClear(); - for (Stats stats : shardStats.values()) { - // don't throw away hit/miss - stats.cacheSize = 0; - stats.ramBytesUsed = 0; - } - sharedRamBytesUsed = 0; - } - - @Override - protected void onQueryCache(Query filter, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onQueryCache(filter, ramBytesUsed); - sharedRamBytesUsed += ramBytesUsed; - } - - @Override - protected void onQueryEviction(Query filter, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onQueryEviction(filter, ramBytesUsed); - sharedRamBytesUsed -= ramBytesUsed; - } - - @Override - protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { - assert Thread.holdsLock(this); - super.onDocIdSetCache(readerCoreKey, ramBytesUsed); - final Stats shardStats = getOrCreateStats(readerCoreKey); - shardStats.cacheSize += 1; - shardStats.cacheCount += 1; - shardStats.ramBytesUsed += ramBytesUsed; - - StatsAndCount statsAndCount = stats2.get(readerCoreKey); - if (statsAndCount == null) { - statsAndCount = new StatsAndCount(shardStats); - stats2.put(readerCoreKey, statsAndCount); - } - statsAndCount.count += 1; - } - - @Override - protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { - assert Thread.holdsLock(this); - super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); - // We can't use ShardCoreKeyMap here because its core closed - // listener is called before the listener of the cache which - // triggers this eviction. So instead we use use stats2 that - // we only evict when nothing is cached anymore on the segment - // instead of relying on close listeners - final StatsAndCount statsAndCount = stats2.get(readerCoreKey); - final Stats shardStats = statsAndCount.stats; - shardStats.cacheSize -= numEntries; - shardStats.ramBytesUsed -= sumRamBytesUsed; - statsAndCount.count -= numEntries; - if (statsAndCount.count == 0) { - stats2.remove(readerCoreKey); - } - } - - @Override - protected void onHit(Object readerCoreKey, Query filter) { - assert Thread.holdsLock(this); - super.onHit(readerCoreKey, filter); - final Stats shardStats = getStats(readerCoreKey); - shardStats.hitCount += 1; - } - - @Override - protected void onMiss(Object readerCoreKey, Query filter) { - assert Thread.holdsLock(this); - super.onMiss(readerCoreKey, filter); - final Stats shardStats = getOrCreateStats(readerCoreKey); - shardStats.missCount += 1; - } - }; - sharedRamBytesUsed = 0; - } - - /** Get usage statistics for the given shard. */ - public FilterCacheStats getStats(ShardId shard) { - final Map stats = new HashMap<>(); - for (Map.Entry entry : shardStats.entrySet()) { - stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); - } - FilterCacheStats shardStats = new FilterCacheStats(); - FilterCacheStats info = stats.get(shard); - if (info == null) { - info = new FilterCacheStats(); - } - shardStats.add(info); - - // We also have some shared ram usage that we try to distribute to - // proportionally to their number of cache entries of each shard - long totalSize = 0; - for (FilterCacheStats s : stats.values()) { - totalSize += s.getCacheSize(); - } - final double weight = totalSize == 0 - ? 1d / stats.size() - : shardStats.getCacheSize() / totalSize; - final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); - shardStats.add(new FilterCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); - return shardStats; - } - - @Override - public Weight doCache(Weight weight, QueryCachingPolicy policy) { - while (weight instanceof CachingWeightWrapper) { - weight = ((CachingWeightWrapper) weight).in; - } - final Weight in = cache.doCache(weight, policy); - // We wrap the weight to track the readers it sees and map them with - // the shards they belong to - return new CachingWeightWrapper(in); - } - - private class CachingWeightWrapper extends Weight { - - private final Weight in; - - protected CachingWeightWrapper(Weight in) { - super(in.getQuery()); - this.in = in; - } - - @Override - public void extractTerms(Set terms) { - in.extractTerms(terms); - } - - @Override - public Explanation explain(LeafReaderContext context, int doc) throws IOException { - shardKeyMap.add(context.reader()); - return in.explain(context, doc); - } - - @Override - public float getValueForNormalization() throws IOException { - return in.getValueForNormalization(); - } - - @Override - public void normalize(float norm, float topLevelBoost) { - in.normalize(norm, topLevelBoost); - } - - @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - shardKeyMap.add(context.reader()); - return in.scorer(context, acceptDocs); - } - } - - /** Clear all entries that belong to the given index. */ - public void clearIndex(String index) { - final Set coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); - for (Object coreKey : coreCacheKeys) { - cache.clearCoreCacheKey(coreKey); - } - - // This cache stores two things: filters, and doc id sets. Calling - // clear only removes the doc id sets, but if we reach the situation - // that the cache does not contain any DocIdSet anymore, then it - // probably means that the user wanted to remove everything. - if (cache.getCacheSize() == 0) { - cache.clear(); - } - } - - @Override - public void close() { - assert shardKeyMap.size() == 0 : shardKeyMap.size(); - assert shardStats.isEmpty() : shardStats.keySet(); - assert stats2.isEmpty() : stats2; - cache.clear(); - } - - private static class Stats implements Cloneable { - - volatile long ramBytesUsed; - volatile long hitCount; - volatile long missCount; - volatile long cacheCount; - volatile long cacheSize; - - FilterCacheStats toQueryCacheStats() { - return new FilterCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); - } - } - - private static class StatsAndCount { - int count; - final Stats stats; - - StatsAndCount(Stats stats) { - this.stats = stats; - this.count = 0; - } - } - - private boolean empty(Stats stats) { - if (stats == null) { - return true; - } - return stats.cacheSize == 0 && stats.ramBytesUsed == 0; - } - - public void onClose(ShardId shardId) { - assert empty(shardStats.get(shardId)); - shardStats.remove(shardId); - } -} diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 2bbaa7e4d23..70c14b1295e 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -19,453 +19,302 @@ package org.elasticsearch.indices.cache.query; -import com.carrotsearch.hppc.ObjectHashSet; -import com.carrotsearch.hppc.ObjectSet; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.common.cache.Weigher; - -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.util.Accountable; -import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesReference; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.LRUQueryCache; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.MemorySizeValue; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardState; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchRequest; -import org.elasticsearch.search.query.QueryPhase; -import org.elasticsearch.search.query.QuerySearchResult; -import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.shard.ShardId; -import java.util.Collection; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Iterator; +import java.io.Closeable; +import java.io.IOException; +import java.util.HashMap; +import java.util.IdentityHashMap; +import java.util.Map; import java.util.Set; -import java.util.concurrent.Callable; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.ConcurrentHashMap; -import static org.elasticsearch.common.Strings.hasLength; +public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { -/** - * The indices query cache allows to cache a shard level query stage responses, helping with improving - * similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent - * with the semantics of NRT (the index reader version is part of the cache key), and relies on size based - * eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that - * are no longer used or closed shards. - *

- * Currently, the cache is only enabled for {@link SearchType#COUNT}, and can only be opted in on an index - * level setting that can be dynamically changed and defaults to false. - *

- * There are still several TODOs left in this class, some easily addressable, some more complex, but the support - * is functional. - */ -public class IndicesQueryCache extends AbstractComponent implements RemovalListener { + public static final String INDICES_CACHE_QUERY_SIZE = "indices.queries.cache.size"; + @Deprecated + public static final String DEPRECATED_INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size"; + public static final String INDICES_CACHE_QUERY_COUNT = "indices.queries.cache.count"; - /** - * A setting to enable or disable query caching on an index level. Its dynamic by default - * since we are checking on the cluster state IndexMetaData always. - */ - public static final String INDEX_CACHE_QUERY_ENABLED = "index.cache.query.enable"; - public static final String INDICES_CACHE_QUERY_CLEAN_INTERVAL = "indices.cache.query.clean_interval"; + private final LRUQueryCache cache; + private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); + private final Map shardStats = new ConcurrentHashMap<>(); + private volatile long sharedRamBytesUsed; - public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.query.size"; - public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.cache.query.expire"; - public static final String INDICES_CACHE_QUERY_CONCURRENCY_LEVEL = "indices.cache.query.concurrency_level"; - - private static final Set CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH); - - private final ThreadPool threadPool; - private final ClusterService clusterService; - - private final TimeValue cleanInterval; - private final Reaper reaper; - - final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); - final Set keysToClean = ConcurrentCollections.newConcurrentSet(); - - - //TODO make these changes configurable on the cluster level - private final String size; - private final TimeValue expire; - private final int concurrencyLevel; - - private volatile Cache cache; + // This is a hack for the fact that the close listener for the + // ShardCoreKeyMap will be called before onDocIdSetEviction + // See onDocIdSetEviction for more info + private final Map stats2 = new IdentityHashMap<>(); @Inject - public IndicesQueryCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) { + public IndicesQueryCache(Settings settings) { super(settings); - this.clusterService = clusterService; - this.threadPool = threadPool; - this.cleanInterval = settings.getAsTime(INDICES_CACHE_QUERY_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); - // this cache can be very small yet still be very effective - this.size = settings.get(INDICES_CACHE_QUERY_SIZE, "1%"); - this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null); - // defaults to 4, but this is a busy map for all indices, increase it a bit by default - this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16); - if (concurrencyLevel <= 0) { - throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE); + if (sizeString == null) { + sizeString = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE); + if (sizeString != null) { + deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE + + "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead"); + } } - buildCache(); + if (sizeString == null) { + sizeString = "10%"; + } + final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString, INDICES_CACHE_QUERY_SIZE); + final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 1000); + logger.debug("using [node] query cache with size [{}], actual_size [{}], max filter count [{}]", + sizeString, size, count); + cache = new LRUQueryCache(count, size.bytes()) { - this.reaper = new Reaper(); - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper); + private Stats getStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + if (shardId == null) { + return null; + } + return shardStats.get(shardId); + } + + private Stats getOrCreateStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + Stats stats = shardStats.get(shardId); + if (stats == null) { + stats = new Stats(); + shardStats.put(shardId, stats); + } + return stats; + } + + // It's ok to not protect these callbacks by a lock since it is + // done in LRUQueryCache + @Override + protected void onClear() { + assert Thread.holdsLock(this); + super.onClear(); + for (Stats stats : shardStats.values()) { + // don't throw away hit/miss + stats.cacheSize = 0; + stats.ramBytesUsed = 0; + } + sharedRamBytesUsed = 0; + } + + @Override + protected void onQueryCache(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryCache(filter, ramBytesUsed); + sharedRamBytesUsed += ramBytesUsed; + } + + @Override + protected void onQueryEviction(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryEviction(filter, ramBytesUsed); + sharedRamBytesUsed -= ramBytesUsed; + } + + @Override + protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetCache(readerCoreKey, ramBytesUsed); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.cacheSize += 1; + shardStats.cacheCount += 1; + shardStats.ramBytesUsed += ramBytesUsed; + + StatsAndCount statsAndCount = stats2.get(readerCoreKey); + if (statsAndCount == null) { + statsAndCount = new StatsAndCount(shardStats); + stats2.put(readerCoreKey, statsAndCount); + } + statsAndCount.count += 1; + } + + @Override + protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); + // We can't use ShardCoreKeyMap here because its core closed + // listener is called before the listener of the cache which + // triggers this eviction. So instead we use use stats2 that + // we only evict when nothing is cached anymore on the segment + // instead of relying on close listeners + final StatsAndCount statsAndCount = stats2.get(readerCoreKey); + final Stats shardStats = statsAndCount.stats; + shardStats.cacheSize -= numEntries; + shardStats.ramBytesUsed -= sumRamBytesUsed; + statsAndCount.count -= numEntries; + if (statsAndCount.count == 0) { + stats2.remove(readerCoreKey); + } + } + + @Override + protected void onHit(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onHit(readerCoreKey, filter); + final Stats shardStats = getStats(readerCoreKey); + shardStats.hitCount += 1; + } + + @Override + protected void onMiss(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onMiss(readerCoreKey, filter); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.missCount += 1; + } + }; + sharedRamBytesUsed = 0; } - private void buildCache() { - long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes(); - - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() - .maximumWeight(sizeInBytes).weigher(new QueryCacheWeigher()).removalListener(this); - cacheBuilder.concurrencyLevel(concurrencyLevel); - - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + /** Get usage statistics for the given shard. */ + public QueryCacheStats getStats(ShardId shard) { + final Map stats = new HashMap<>(); + for (Map.Entry entry : shardStats.entrySet()) { + stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); } - - cache = cacheBuilder.build(); - } - - private static class QueryCacheWeigher implements Weigher { - - @Override - public int weigh(Key key, Value value) { - return (int) (key.ramBytesUsed() + value.ramBytesUsed()); + QueryCacheStats shardStats = new QueryCacheStats(); + QueryCacheStats info = stats.get(shard); + if (info == null) { + info = new QueryCacheStats(); } - } + shardStats.add(info); - public void close() { - reaper.close(); - cache.invalidateAll(); - } - - public void clear(IndexShard shard) { - if (shard == null) { - return; + // We also have some shared ram usage that we try to distribute to + // proportionally to their number of cache entries of each shard + long totalSize = 0; + for (QueryCacheStats s : stats.values()) { + totalSize += s.getCacheSize(); } - keysToClean.add(new CleanupKey(shard, -1)); - logger.trace("{} explicit cache clear", shard.shardId()); - reaper.reap(); + final double weight = totalSize == 0 + ? 1d / stats.size() + : shardStats.getCacheSize() / totalSize; + final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); + shardStats.add(new QueryCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); + return shardStats; } @Override - public void onRemoval(RemovalNotification notification) { - if (notification.getKey() == null) { - return; + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + while (weight instanceof CachingWeightWrapper) { + weight = ((CachingWeightWrapper) weight).in; } - notification.getKey().shard.queryCache().onRemoval(notification); + final Weight in = cache.doCache(weight, policy); + // We wrap the weight to track the readers it sees and map them with + // the shards they belong to + return new CachingWeightWrapper(in); } - /** - * Can the shard request be cached at all? - */ - public boolean canCache(ShardSearchRequest request, SearchContext context) { - // TODO: for now, template is not supported, though we could use the generated bytes as the key - if (hasLength(request.templateSource())) { - return false; + private class CachingWeightWrapper extends Weight { + + private final Weight in; + + protected CachingWeightWrapper(Weight in) { + super(in.getQuery()); + this.in = in; } - // for now, only enable it for requests with no hits - if (context.size() != 0) { - return false; + @Override + public void extractTerms(Set terms) { + in.extractTerms(terms); } - // We cannot cache with DFS because results depend not only on the content of the index but also - // on the overridden statistics. So if you ran two queries on the same index with different stats - // (because an other shard was updated) you would get wrong results because of the scores - // (think about top_hits aggs or scripts using the score) - if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) { - return false; + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + shardKeyMap.add(context.reader()); + return in.explain(context, doc); } - IndexMetaData index = clusterService.state().getMetaData().index(request.index()); - if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted - return false; + @Override + public float getValueForNormalization() throws IOException { + return in.getValueForNormalization(); } - // if not explicitly set in the request, use the index setting, if not, use the request - if (request.queryCache() == null) { - if (!index.settings().getAsBoolean(INDEX_CACHE_QUERY_ENABLED, Boolean.FALSE)) { - return false; - } - } else if (!request.queryCache()) { - return false; - } - // if the reader is not a directory reader, we can't get the version from it - if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) { - return false; - } - // if now in millis is used (or in the future, a more generic "isDeterministic" flag - // then we can't cache based on "now" key within the search request, as it is not deterministic - if (context.nowInMillisUsed()) { - return false; - } - return true; - } - /** - * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached - * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows - * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse - * the same cache. - */ - public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception { - assert canCache(request, context); - Key key = buildKey(request, context); - Loader loader = new Loader(queryPhase, context, key); - Value value = cache.get(key, loader); - if (loader.isLoaded()) { - key.shard.queryCache().onMiss(); - // see if its the first time we see this reader, and make sure to register a cleanup key - CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion()); - if (!registeredClosedListeners.containsKey(cleanupKey)) { - Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); - if (previous == null) { - context.searcher().getIndexReader().addReaderClosedListener(cleanupKey); - } - } - } else { - key.shard.queryCache().onHit(); - // restore the cached query result into the context - final QuerySearchResult result = context.queryResult(); - result.readFromWithId(context.id(), value.reference.streamInput()); - result.shardTarget(context.shardTarget()); + @Override + public void normalize(float norm, float topLevelBoost) { + in.normalize(norm, topLevelBoost); + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + shardKeyMap.add(context.reader()); + return in.scorer(context, acceptDocs); } } - private static class Loader implements Callable { - - private final QueryPhase queryPhase; - private final SearchContext context; - private final IndicesQueryCache.Key key; - private boolean loaded; - - Loader(QueryPhase queryPhase, SearchContext context, IndicesQueryCache.Key key) { - this.queryPhase = queryPhase; - this.context = context; - this.key = key; + /** Clear all entries that belong to the given index. */ + public void clearIndex(String index) { + final Set coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); + for (Object coreKey : coreCacheKeys) { + cache.clearCoreCacheKey(coreKey); } - public boolean isLoaded() { - return this.loaded; - } - - @Override - public Value call() throws Exception { - queryPhase.execute(context); - - /* BytesStreamOutput allows to pass the expected size but by default uses - * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. - * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful - * since we don't shrink to the actual size once we are done serializing. - * By passing 512 as the expected size we will resize the byte array in the stream - * slowly until we hit the page size and don't waste too much memory for small query - * results.*/ - final int expectedSizeInBytes = 512; - try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { - context.queryResult().writeToNoId(out); - // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep - // the memory properly paged instead of having varied sized bytes - final BytesReference reference = out.bytes(); - loaded = true; - Value value = new Value(reference, out.ramBytesUsed()); - key.shard.queryCache().onCached(key, value); - return value; - } + // This cache stores two things: filters, and doc id sets. Calling + // clear only removes the doc id sets, but if we reach the situation + // that the cache does not contain any DocIdSet anymore, then it + // probably means that the user wanted to remove everything. + if (cache.getCacheSize() == 0) { + cache.clear(); } } - public static class Value implements Accountable { - final BytesReference reference; - final long ramBytesUsed; + @Override + public void close() { + assert shardKeyMap.size() == 0 : shardKeyMap.size(); + assert shardStats.isEmpty() : shardStats.keySet(); + assert stats2.isEmpty() : stats2; + cache.clear(); + } - public Value(BytesReference reference, long ramBytesUsed) { - this.reference = reference; - this.ramBytesUsed = ramBytesUsed; - } + private static class Stats implements Cloneable { - @Override - public long ramBytesUsed() { - return ramBytesUsed; - } + volatile long ramBytesUsed; + volatile long hitCount; + volatile long missCount; + volatile long cacheCount; + volatile long cacheSize; - @Override - public Collection getChildResources() { - return Collections.emptyList(); + QueryCacheStats toQueryCacheStats() { + return new QueryCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); } } - public static class Key implements Accountable { - public final IndexShard shard; // use as identity equality - public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped - public final BytesReference value; + private static class StatsAndCount { + int count; + final Stats stats; - Key(IndexShard shard, long readerVersion, BytesReference value) { - this.shard = shard; - this.readerVersion = readerVersion; - this.value = value; + StatsAndCount(Stats stats) { + this.stats = stats; + this.count = 0; } + } - @Override - public long ramBytesUsed() { - return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length(); - } - - @Override - public Collection getChildResources() { - // TODO: more detailed ram usage? - return Collections.emptyList(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - Key key = (Key) o; - if (readerVersion != key.readerVersion) return false; - if (!shard.equals(key.shard)) return false; - if (!value.equals(key.value)) return false; + private boolean empty(Stats stats) { + if (stats == null) { return true; } - - @Override - public int hashCode() { - int result = shard.hashCode(); - result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32)); - result = 31 * result + value.hashCode(); - return result; - } + return stats.cacheSize == 0 && stats.ramBytesUsed == 0; } - private class CleanupKey implements IndexReader.ReaderClosedListener { - IndexShard indexShard; - long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped - - private CleanupKey(IndexShard indexShard, long readerVersion) { - this.indexShard = indexShard; - this.readerVersion = readerVersion; - } - - @Override - public void onClose(IndexReader reader) { - Boolean remove = registeredClosedListeners.remove(this); - if (remove != null) { - keysToClean.add(this); - } - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - CleanupKey that = (CleanupKey) o; - if (readerVersion != that.readerVersion) return false; - if (!indexShard.equals(that.indexShard)) return false; - return true; - } - - @Override - public int hashCode() { - int result = indexShard.hashCode(); - result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32)); - return result; - } - } - - private class Reaper implements Runnable { - - private final ObjectSet currentKeysToClean = new ObjectHashSet<>(); - private final ObjectSet currentFullClean = new ObjectHashSet<>(); - - private volatile boolean closed; - - void close() { - closed = true; - } - - @Override - public void run() { - if (closed) { - return; - } - if (keysToClean.isEmpty()) { - schedule(); - return; - } - try { - threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { - @Override - public void run() { - reap(); - schedule(); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run ReaderCleaner - execution rejected", ex); - } - } - - private void schedule() { - try { - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); - } - } - - synchronized void reap() { - currentKeysToClean.clear(); - currentFullClean.clear(); - for (Iterator iterator = keysToClean.iterator(); iterator.hasNext(); ) { - CleanupKey cleanupKey = iterator.next(); - iterator.remove(); - if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) { - // -1 indicates full cleanup, as does a closed shard - currentFullClean.add(cleanupKey.indexShard); - } else { - currentKeysToClean.add(cleanupKey); - } - } - - if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { - CleanupKey lookupKey = new CleanupKey(null, -1); - for (Iterator iterator = cache.asMap().keySet().iterator(); iterator.hasNext(); ) { - Key key = iterator.next(); - if (currentFullClean.contains(key.shard)) { - iterator.remove(); - } else { - lookupKey.indexShard = key.shard; - lookupKey.readerVersion = key.readerVersion; - if (currentKeysToClean.contains(lookupKey)) { - iterator.remove(); - } - } - } - } - - cache.cleanUp(); - currentKeysToClean.clear(); - currentFullClean.clear(); - } - } - - private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception { - // TODO: for now, this will create different keys for different JSON order - // TODO: tricky to get around this, need to parse and order all, which can be expensive - return new Key(context.indexShard(), - ((DirectoryReader) context.searcher().getIndexReader()).getVersion(), - request.cacheKey()); + public void onClose(ShardId shardId) { + assert empty(shardStats.get(shardId)); + shardStats.remove(shardId); } } diff --git a/core/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java b/core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java similarity index 97% rename from core/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java rename to core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java index d067a2383da..28ab04bd245 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/filter/terms/TermsLookup.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/query/terms/TermsLookup.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.indices.cache.filter.terms; +package org.elasticsearch.indices.cache.query.terms; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.query.QueryParseContext; diff --git a/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java new file mode 100644 index 00000000000..d6219946fd6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java @@ -0,0 +1,503 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.cache.request; + +import com.carrotsearch.hppc.ObjectHashSet; +import com.carrotsearch.hppc.ObjectSet; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.common.cache.RemovalNotification; +import com.google.common.cache.Weigher; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collection; +import java.util.Collections; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.common.Strings.hasLength; + +/** + * The indices request cache allows to cache a shard level request stage responses, helping with improving + * similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent + * with the semantics of NRT (the index reader version is part of the cache key), and relies on size based + * eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that + * are no longer used or closed shards. + *

+ * Currently, the cache is only enabled for {@link SearchType#COUNT}, and can only be opted in on an index + * level setting that can be dynamically changed and defaults to false. + *

+ * There are still several TODOs left in this class, some easily addressable, some more complex, but the support + * is functional. + */ +public class IndicesRequestCache extends AbstractComponent implements RemovalListener { + + /** + * A setting to enable or disable request caching on an index level. Its dynamic by default + * since we are checking on the cluster state IndexMetaData always. + */ + public static final String INDEX_CACHE_REQUEST_ENABLED = "index.requests.cache.enable"; + @Deprecated + public static final String DEPRECATED_INDEX_CACHE_REQUEST_ENABLED = "index.cache.query.enable"; + public static final String INDICES_CACHE_REQUEST_CLEAN_INTERVAL = "indices.requests.cache.clean_interval"; + + public static final String INDICES_CACHE_QUERY_SIZE = "indices.requests.cache.size"; + @Deprecated + public static final String DEPRECATED_INDICES_CACHE_QUERY_SIZE = "indices.cache.query.size"; + public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.requests.cache.expire"; + public static final String INDICES_CACHE_QUERY_CONCURRENCY_LEVEL = "indices.requests.cache.concurrency_level"; + + private static final Set CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH); + + private final ThreadPool threadPool; + private final ClusterService clusterService; + + private final TimeValue cleanInterval; + private final Reaper reaper; + + final ConcurrentMap registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); + final Set keysToClean = ConcurrentCollections.newConcurrentSet(); + + + //TODO make these changes configurable on the cluster level + private final String size; + private final TimeValue expire; + private final int concurrencyLevel; + + private volatile Cache cache; + + @Inject + public IndicesRequestCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) { + super(settings); + this.clusterService = clusterService; + this.threadPool = threadPool; + this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); + + String size = settings.get(INDICES_CACHE_QUERY_SIZE); + if (size == null) { + size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE); + if (size != null) { + deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE + + "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead"); + } + } + if (size == null) { + // this cache can be very small yet still be very effective + size = "1%"; + } + this.size = size; + + this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null); + // defaults to 4, but this is a busy map for all indices, increase it a bit by default + this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16); + if (concurrencyLevel <= 0) { + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + } + buildCache(); + + this.reaper = new Reaper(); + threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper); + } + + private boolean isCacheEnabled(Settings settings, boolean defaultEnable) { + Boolean enable = settings.getAsBoolean(INDEX_CACHE_REQUEST_ENABLED, null); + if (enable == null) { + enable = settings.getAsBoolean(DEPRECATED_INDEX_CACHE_REQUEST_ENABLED, null); + if (enable != null) { + deprecationLogger.deprecated("The [" + DEPRECATED_INDEX_CACHE_REQUEST_ENABLED + + "] settings is now deprecated, use [" + INDEX_CACHE_REQUEST_ENABLED + "] instead"); + } + } + if (enable == null) { + enable = defaultEnable; + } + return enable; + } + + private void buildCache() { + long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes(); + + CacheBuilder cacheBuilder = CacheBuilder.newBuilder() + .maximumWeight(sizeInBytes).weigher(new QueryCacheWeigher()).removalListener(this); + cacheBuilder.concurrencyLevel(concurrencyLevel); + + if (expire != null) { + cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + } + + cache = cacheBuilder.build(); + } + + private static class QueryCacheWeigher implements Weigher { + + @Override + public int weigh(Key key, Value value) { + return (int) (key.ramBytesUsed() + value.ramBytesUsed()); + } + } + + public void close() { + reaper.close(); + cache.invalidateAll(); + } + + public void clear(IndexShard shard) { + if (shard == null) { + return; + } + keysToClean.add(new CleanupKey(shard, -1)); + logger.trace("{} explicit cache clear", shard.shardId()); + reaper.reap(); + } + + @Override + public void onRemoval(RemovalNotification notification) { + if (notification.getKey() == null) { + return; + } + notification.getKey().shard.requestCache().onRemoval(notification); + } + + /** + * Can the shard request be cached at all? + */ + public boolean canCache(ShardSearchRequest request, SearchContext context) { + // TODO: for now, template is not supported, though we could use the generated bytes as the key + if (hasLength(request.templateSource())) { + return false; + } + + // for now, only enable it for requests with no hits + if (context.size() != 0) { + return false; + } + + // We cannot cache with DFS because results depend not only on the content of the index but also + // on the overridden statistics. So if you ran two queries on the same index with different stats + // (because an other shard was updated) you would get wrong results because of the scores + // (think about top_hits aggs or scripts using the score) + if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) { + return false; + } + + IndexMetaData index = clusterService.state().getMetaData().index(request.index()); + if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted + return false; + } + // if not explicitly set in the request, use the index setting, if not, use the request + if (request.requestCache() == null) { + if (!isCacheEnabled(index.settings(), Boolean.FALSE)) { + return false; + } + } else if (!request.requestCache()) { + return false; + } + // if the reader is not a directory reader, we can't get the version from it + if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) { + return false; + } + // if now in millis is used (or in the future, a more generic "isDeterministic" flag + // then we can't cache based on "now" key within the search request, as it is not deterministic + if (context.nowInMillisUsed()) { + return false; + } + return true; + } + + /** + * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached + * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows + * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse + * the same cache. + */ + public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception { + assert canCache(request, context); + Key key = buildKey(request, context); + Loader loader = new Loader(queryPhase, context, key); + Value value = cache.get(key, loader); + if (loader.isLoaded()) { + key.shard.requestCache().onMiss(); + // see if its the first time we see this reader, and make sure to register a cleanup key + CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion()); + if (!registeredClosedListeners.containsKey(cleanupKey)) { + Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); + if (previous == null) { + context.searcher().getIndexReader().addReaderClosedListener(cleanupKey); + } + } + } else { + key.shard.requestCache().onHit(); + // restore the cached query result into the context + final QuerySearchResult result = context.queryResult(); + result.readFromWithId(context.id(), value.reference.streamInput()); + result.shardTarget(context.shardTarget()); + } + } + + private static class Loader implements Callable { + + private final QueryPhase queryPhase; + private final SearchContext context; + private final IndicesRequestCache.Key key; + private boolean loaded; + + Loader(QueryPhase queryPhase, SearchContext context, IndicesRequestCache.Key key) { + this.queryPhase = queryPhase; + this.context = context; + this.key = key; + } + + public boolean isLoaded() { + return this.loaded; + } + + @Override + public Value call() throws Exception { + queryPhase.execute(context); + + /* BytesStreamOutput allows to pass the expected size but by default uses + * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. + * a date histogram with 3 buckets is ~100byte so 16k might be very wasteful + * since we don't shrink to the actual size once we are done serializing. + * By passing 512 as the expected size we will resize the byte array in the stream + * slowly until we hit the page size and don't waste too much memory for small query + * results.*/ + final int expectedSizeInBytes = 512; + try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) { + context.queryResult().writeToNoId(out); + // for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep + // the memory properly paged instead of having varied sized bytes + final BytesReference reference = out.bytes(); + loaded = true; + Value value = new Value(reference, out.ramBytesUsed()); + key.shard.requestCache().onCached(key, value); + return value; + } + } + } + + public static class Value implements Accountable { + final BytesReference reference; + final long ramBytesUsed; + + public Value(BytesReference reference, long ramBytesUsed) { + this.reference = reference; + this.ramBytesUsed = ramBytesUsed; + } + + @Override + public long ramBytesUsed() { + return ramBytesUsed; + } + + @Override + public Collection getChildResources() { + return Collections.emptyList(); + } + } + + public static class Key implements Accountable { + public final IndexShard shard; // use as identity equality + public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped + public final BytesReference value; + + Key(IndexShard shard, long readerVersion, BytesReference value) { + this.shard = shard; + this.readerVersion = readerVersion; + this.value = value; + } + + @Override + public long ramBytesUsed() { + return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length(); + } + + @Override + public Collection getChildResources() { + // TODO: more detailed ram usage? + return Collections.emptyList(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + Key key = (Key) o; + if (readerVersion != key.readerVersion) return false; + if (!shard.equals(key.shard)) return false; + if (!value.equals(key.value)) return false; + return true; + } + + @Override + public int hashCode() { + int result = shard.hashCode(); + result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32)); + result = 31 * result + value.hashCode(); + return result; + } + } + + private class CleanupKey implements IndexReader.ReaderClosedListener { + IndexShard indexShard; + long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped + + private CleanupKey(IndexShard indexShard, long readerVersion) { + this.indexShard = indexShard; + this.readerVersion = readerVersion; + } + + @Override + public void onClose(IndexReader reader) { + Boolean remove = registeredClosedListeners.remove(this); + if (remove != null) { + keysToClean.add(this); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + CleanupKey that = (CleanupKey) o; + if (readerVersion != that.readerVersion) return false; + if (!indexShard.equals(that.indexShard)) return false; + return true; + } + + @Override + public int hashCode() { + int result = indexShard.hashCode(); + result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32)); + return result; + } + } + + private class Reaper implements Runnable { + + private final ObjectSet currentKeysToClean = new ObjectHashSet<>(); + private final ObjectSet currentFullClean = new ObjectHashSet<>(); + + private volatile boolean closed; + + void close() { + closed = true; + } + + @Override + public void run() { + if (closed) { + return; + } + if (keysToClean.isEmpty()) { + schedule(); + return; + } + try { + threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { + @Override + public void run() { + reap(); + schedule(); + } + }); + } catch (EsRejectedExecutionException ex) { + logger.debug("Can not run ReaderCleaner - execution rejected", ex); + } + } + + private void schedule() { + try { + threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); + } catch (EsRejectedExecutionException ex) { + logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); + } + } + + synchronized void reap() { + currentKeysToClean.clear(); + currentFullClean.clear(); + for (Iterator iterator = keysToClean.iterator(); iterator.hasNext(); ) { + CleanupKey cleanupKey = iterator.next(); + iterator.remove(); + if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) { + // -1 indicates full cleanup, as does a closed shard + currentFullClean.add(cleanupKey.indexShard); + } else { + currentKeysToClean.add(cleanupKey); + } + } + + if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { + CleanupKey lookupKey = new CleanupKey(null, -1); + for (Iterator iterator = cache.asMap().keySet().iterator(); iterator.hasNext(); ) { + Key key = iterator.next(); + if (currentFullClean.contains(key.shard)) { + iterator.remove(); + } else { + lookupKey.indexShard = key.shard; + lookupKey.readerVersion = key.readerVersion; + if (currentKeysToClean.contains(lookupKey)) { + iterator.remove(); + } + } + } + } + + cache.cleanUp(); + currentKeysToClean.clear(); + currentFullClean.clear(); + } + } + + private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception { + // TODO: for now, this will create different keys for different JSON order + // TODO: tricky to get around this, need to parse and order all, which can be expensive + return new Key(context.indexShard(), + ((DirectoryReader) context.searcher().getIndexReader()).getVersion(), + request.cacheKey()); + } +} diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 6cabc9af2b6..83db83c19b0 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -62,7 +62,7 @@ import org.elasticsearch.index.search.shape.ShapeModule; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerModule; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cluster.IndicesClusterStateService; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.memory.IndexingMemoryController; @@ -348,7 +348,7 @@ public class Node implements Releasable { injector.getInstance(IndicesTTLService.class).close(); injector.getInstance(IndicesService.class).close(); // close filter/fielddata caches after indices - injector.getInstance(IndicesFilterCache.class).close(); + injector.getInstance(IndicesQueryCache.class).close(); injector.getInstance(IndicesFieldDataCache.class).close(); injector.getInstance(IndicesStore.class).close(); stopWatch.stop().start("routing"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 09560a4e2e8..8c09b65797b 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -73,8 +73,8 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { for (Map.Entry entry : request.params().entrySet()) { - if (Fields.FILTER.match(entry.getKey())) { - clearIndicesCacheRequest.filterCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.filterCache())); + if (Fields.QUERY.match(entry.getKey())) { + clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache())); } if (Fields.FIELD_DATA.match(entry.getKey())) { clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache())); @@ -91,7 +91,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { } public static class Fields { - public static final ParseField FILTER = new ParseField("filter", "filter_cache"); + public static final ParseField QUERY = new ParseField("query", "filter", "filter_cache"); public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java index 0d6d71fdd90..4431ba5f4b3 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/stats/RestIndicesStatsAction.java @@ -72,13 +72,13 @@ public class RestIndicesStatsAction extends BaseRestHandler { indicesStatsRequest.refresh(metrics.contains("refresh")); indicesStatsRequest.flush(metrics.contains("flush")); indicesStatsRequest.warmer(metrics.contains("warmer")); - indicesStatsRequest.filterCache(metrics.contains("filter_cache")); + indicesStatsRequest.queryCache(metrics.contains("query_cache")); indicesStatsRequest.percolate(metrics.contains("percolate")); indicesStatsRequest.segments(metrics.contains("segments")); indicesStatsRequest.fieldData(metrics.contains("fielddata")); indicesStatsRequest.completion(metrics.contains("completion")); indicesStatsRequest.suggest(metrics.contains("suggest")); - indicesStatsRequest.queryCache(metrics.contains("query_cache")); + indicesStatsRequest.requestCache(metrics.contains("request_cache")); indicesStatsRequest.recovery(metrics.contains("recovery")); indicesStatsRequest.translog(metrics.contains("translog")); } diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index f4ab318ae84..7e21aacb26d 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -118,23 +118,23 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell("fielddata.evictions", "sibling:pri;alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); table.addCell("pri.fielddata.evictions", "default:false;text-align:right;desc:fielddata evictions"); - table.addCell("filter_cache.memory_size", "sibling:pri;alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache"); - table.addCell("pri.filter_cache.memory_size", "default:false;text-align:right;desc:used filter cache"); - - table.addCell("filter_cache.evictions", "sibling:pri;alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions"); - table.addCell("pri.filter_cache.evictions", "default:false;text-align:right;desc:filter cache evictions"); - - table.addCell("query_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.memory_size", "sibling:pri;alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); table.addCell("pri.query_cache.memory_size", "default:false;text-align:right;desc:used query cache"); - table.addCell("query_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); + table.addCell("query_cache.evictions", "sibling:pri;alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("pri.query_cache.evictions", "default:false;text-align:right;desc:query cache evictions"); - table.addCell("query_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit count"); - table.addCell("pri.query_cache.hit_count", "default:false;text-align:right;desc:query cache hit count"); + table.addCell("request_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell("pri.request_cache.memory_size", "default:false;text-align:right;desc:used request cache"); - table.addCell("query_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss count"); - table.addCell("pri.query_cache.miss_count", "default:false;text-align:right;desc:query cache miss count"); + table.addCell("request_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("pri.request_cache.evictions", "default:false;text-align:right;desc:request cache evictions"); + + table.addCell("request_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:request cache hit count"); + table.addCell("pri.request_cache.hit_count", "default:false;text-align:right;desc:request cache hit count"); + + table.addCell("request_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:request cache miss count"); + table.addCell("pri.request_cache.miss_count", "default:false;text-align:right;desc:request cache miss count"); table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("pri.flush.total", "default:false;text-align:right;desc:number of flushes"); @@ -317,23 +317,23 @@ public class RestIndicesAction extends AbstractCatAction { table.addCell(indexStats == null ? null : indexStats.getTotal().getFieldData().getEvictions()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFieldData().getEvictions()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getMemorySize()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getMemorySize()); - - table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getEvictions()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getEvictions()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getMemorySize()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getMemorySize()); table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getEvictions()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getEvictions()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getHitCount()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getHitCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getMemorySize()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getMemorySize()); - table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getMissCount()); - table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getMissCount()); + table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getEvictions()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getEvictions()); + + table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getHitCount()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getHitCount()); + + table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getMissCount()); + table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getMissCount()); table.addCell(indexStats == null ? null : indexStats.getTotal().getFlush().getTotal()); table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFlush().getTotal()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java index 081e3b55f6e..b3cc6a4deab 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestNodesAction.java @@ -35,8 +35,8 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.index.cache.filter.FilterCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.flush.FlushStats; @@ -142,13 +142,13 @@ public class RestNodesAction extends AbstractCatAction { table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); - table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache"); - table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions"); + table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); - table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); - table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); - table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts"); - table.addCell("query_cache.miss_count", "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts"); + table.addCell("request_cache.memory_size", "alias:qcm,requestCacheMemory;default:false;text-align:right;desc:used request cache"); + table.addCell("request_cache.evictions", "alias:qce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions"); + table.addCell("request_cache.hit_count", "alias:qchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts"); + table.addCell("request_cache.miss_count", "alias:qcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -269,11 +269,11 @@ public class RestNodesAction extends AbstractCatAction { table.addCell(fdStats == null ? null : fdStats.getMemorySize()); table.addCell(fdStats == null ? null : fdStats.getEvictions()); - FilterCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache(); + QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache(); table.addCell(fcStats == null ? null : fcStats.getMemorySize()); table.addCell(fcStats == null ? null : fcStats.getEvictions()); - QueryCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache(); + RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache(); table.addCell(qcStats == null ? null : qcStats.getMemorySize()); table.addCell(qcStats == null ? null : qcStats.getEvictions()); table.addCell(qcStats == null ? null : qcStats.getHitCount()); diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 8b37fd2c620..802607829ff 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -101,8 +101,8 @@ public class RestShardsAction extends AbstractCatAction { table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache"); table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions"); - table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache"); - table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions"); + table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache"); + table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions"); table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes"); table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush"); @@ -232,8 +232,8 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shardStats == null ? null : shardStats.getFieldData().getMemorySize()); table.addCell(shardStats == null ? null : shardStats.getFieldData().getEvictions()); - table.addCell(shardStats == null ? null : shardStats.getFilterCache().getMemorySize()); - table.addCell(shardStats == null ? null : shardStats.getFilterCache().getEvictions()); + table.addCell(shardStats == null ? null : shardStats.getQueryCache().getMemorySize()); + table.addCell(shardStats == null ? null : shardStats.getQueryCache().getEvictions()); table.addCell(shardStats == null ? null : shardStats.getFlush().getTotal()); table.addCell(shardStats == null ? null : shardStats.getFlush().getTotalTime()); diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 5a9b1c90b2c..45e69f56f81 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -72,7 +72,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.IndicesWarmer.WarmerContext; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script.ScriptParseException; import org.elasticsearch.script.ScriptContext; @@ -144,7 +144,7 @@ public class SearchService extends AbstractLifecycleComponent { private final FetchPhase fetchPhase; - private final IndicesQueryCache indicesQueryCache; + private final IndicesRequestCache indicesQueryCache; private final long defaultKeepAlive; @@ -159,7 +159,7 @@ public class SearchService extends AbstractLifecycleComponent { @Inject public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, - IndicesQueryCache indicesQueryCache) { + IndicesRequestCache indicesQueryCache) { super(settings); this.threadPool = threadPool; this.clusterService = clusterService; @@ -374,7 +374,7 @@ public class SearchService extends AbstractLifecycleComponent { try { final IndexCache indexCache = context.indexShard().indexService().cache(); context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), - indexCache.filter(), indexCache.filterPolicy())); + indexCache.query(), indexCache.queryPolicy())); } catch (Throwable e) { processFailure(context, e); cleanContext(context); @@ -448,7 +448,7 @@ public class SearchService extends AbstractLifecycleComponent { try { final IndexCache indexCache = context.indexShard().indexService().cache(); context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), - indexCache.filter(), indexCache.filterPolicy())); + indexCache.query(), indexCache.queryPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); @@ -1060,7 +1060,7 @@ public class SearchService extends AbstractLifecycleComponent { try { long now = System.nanoTime(); ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.numberOfShards(), - SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.queryCache()); + SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache()); context = createContext(request, warmerContext.searcher()); // if we use sort, we need to do query to sort on it and load relevant field data // if not, we might as well set size=0 (and cache if needed) diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 8c2b67ef5a8..a0392509d2a 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -70,7 +70,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S private BytesReference extraSource; private BytesReference templateSource; private Template template; - private Boolean queryCache; + private Boolean requestCache; private long nowInMillis; ShardSearchLocalRequest() { @@ -79,7 +79,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards, String[] filteringAliases, long nowInMillis) { this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(), - searchRequest.source(), searchRequest.types(), searchRequest.queryCache()); + searchRequest.source(), searchRequest.types(), searchRequest.requestCache()); this.extraSource = searchRequest.extraSource(); this.templateSource = searchRequest.templateSource(); this.template = searchRequest.template(); @@ -100,14 +100,14 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S } public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, - BytesReference source, String[] types, Boolean queryCache) { + BytesReference source, String[] types, Boolean requestCache) { this.index = shardId.getIndex(); this.shardId = shardId.id(); this.numberOfShards = numberOfShards; this.searchType = searchType; this.source = source; this.types = types; - this.queryCache = queryCache; + this.requestCache = requestCache; } @Override @@ -171,8 +171,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S } @Override - public Boolean queryCache() { - return queryCache; + public Boolean requestCache() { + return requestCache; } @Override @@ -201,7 +201,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S if (in.readBoolean()) { template = Template.readTemplate(in); } - queryCache = in.readOptionalBoolean(); + requestCache = in.readOptionalBoolean(); } protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException { @@ -231,7 +231,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S if (hasTemplate) { template.writeTo(out); } - out.writeOptionalBoolean(queryCache); + out.writeOptionalBoolean(requestCache); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 0411521d78d..6d9734f8994 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -60,7 +60,7 @@ public interface ShardSearchRequest extends HasContextAndHeaders { BytesReference templateSource(); - Boolean queryCache(); + Boolean requestCache(); Scroll scroll(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 3609fc3699d..e7b1e2f9e62 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -132,8 +132,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha } @Override - public Boolean queryCache() { - return shardSearchLocalRequest.queryCache(); + public Boolean requestCache() { + return shardSearchLocalRequest.requestCache(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java index 1a0731bc361..16b9a0d217e 100644 --- a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java @@ -19,12 +19,12 @@ package org.elasticsearch.search.warmer; +import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; -import org.elasticsearch.Version; + import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -67,13 +67,13 @@ public class IndexWarmersMetaData extends AbstractDiffable private final String name; private final String[] types; private final BytesReference source; - private final Boolean queryCache; + private final Boolean requestCache; - public Entry(String name, String[] types, Boolean queryCache, BytesReference source) { + public Entry(String name, String[] types, Boolean requestCache, BytesReference source) { this.name = name; this.types = types == null ? Strings.EMPTY_ARRAY : types; this.source = source; - this.queryCache = queryCache; + this.requestCache = requestCache; } public String name() { @@ -90,8 +90,8 @@ public class IndexWarmersMetaData extends AbstractDiffable } @Nullable - public Boolean queryCache() { - return this.queryCache; + public Boolean requestCache() { + return this.requestCache; } @Override @@ -104,7 +104,7 @@ public class IndexWarmersMetaData extends AbstractDiffable if (!name.equals(entry.name)) return false; if (!Arrays.equals(types, entry.types)) return false; if (!source.equals(entry.source)) return false; - return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null); + return Objects.equal(requestCache, entry.requestCache); } @@ -113,7 +113,7 @@ public class IndexWarmersMetaData extends AbstractDiffable int result = name.hashCode(); result = 31 * result + Arrays.hashCode(types); result = 31 * result + source.hashCode(); - result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0); + result = 31 * result + (requestCache != null ? requestCache.hashCode() : 0); return result; } } @@ -163,7 +163,7 @@ public class IndexWarmersMetaData extends AbstractDiffable out.writeBoolean(true); out.writeBytesReference(entry.source()); } - out.writeOptionalBoolean(entry.queryCache()); + out.writeOptionalBoolean(entry.requestCache()); } } @@ -241,8 +241,8 @@ public class IndexWarmersMetaData extends AbstractDiffable boolean binary = params.paramAsBoolean("binary", false); builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); builder.field("types", entry.types()); - if (entry.queryCache() != null) { - builder.field("queryCache", entry.queryCache()); + if (entry.requestCache() != null) { + builder.field("queryCache", entry.requestCache()); } builder.field("source"); if (binary) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java index b93d7a2f72b..aad405a3955 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java @@ -44,7 +44,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest { for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { try { enableIndexBlock("test", blockSetting); - ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true).execute().actionGet(); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); } finally { @@ -55,7 +55,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest { for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { try { enableIndexBlock("test", blockSetting); - assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true)); + assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true)); } finally { disableIndexBlock("test", blockSetting); } diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index 9fbfd3914f2..538a8d452e9 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -106,12 +106,12 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase { assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); - assertThat(request.requests().get(0).queryCache(), equalTo(true)); + assertThat(request.requests().get(0).requestCache(), equalTo(true)); assertThat(request.requests().get(0).preference(), nullValue()); assertThat(request.requests().get(1).indices()[0], equalTo("test2")); assertThat(request.requests().get(1).indices()[1], equalTo("test3")); assertThat(request.requests().get(1).types()[0], equalTo("type1")); - assertThat(request.requests().get(1).queryCache(), nullValue()); + assertThat(request.requests().get(1).requestCache(), nullValue()); assertThat(request.requests().get(1).preference(), equalTo("_local")); assertThat(request.requests().get(2).indices()[0], equalTo("test4")); assertThat(request.requests().get(2).indices()[1], equalTo("test1")); diff --git a/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java index 0b869d72e36..47370c29314 100644 --- a/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java +++ b/core/src/test/java/org/elasticsearch/document/DocumentActionsTests.java @@ -78,7 +78,7 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest { assertThat(indexExists("test1234565"), equalTo(false)); logger.info("Clearing cache"); - ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true)).actionGet(); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).queryCache(true)).actionGet(); assertNoFailures(clearIndicesCacheResponse); assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); diff --git a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheTests.java similarity index 91% rename from core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java rename to core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheTests.java index 9013156a59b..37ea2ef8d37 100644 --- a/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cache/query/IndicesRequestCacheTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.indices.cache.query; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; @@ -34,13 +34,13 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.greaterThan; -public class IndicesQueryCacheTests extends ElasticsearchIntegrationTest { +public class IndicesRequestCacheTests extends ElasticsearchIntegrationTest { // One of the primary purposes of the query cache is to cache aggs results public void testCacheAggs() throws Exception { assertAcked(client().admin().indices().prepareCreate("index") .addMapping("type", "f", "type=date") - .setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get()); + .setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true).get()); indexRandom(true, client().prepareIndex("index", "type").setSource("f", "2014-03-10T00:00:00.000Z"), client().prepareIndex("index", "type").setSource("f", "2014-05-13T00:00:00.000Z")); @@ -54,7 +54,7 @@ public class IndicesQueryCacheTests extends ElasticsearchIntegrationTest { assertSearchResponse(r1); // The cached is actually used - assertThat(client().admin().indices().prepareStats("index").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); for (int i = 0; i < 10; ++i) { final SearchResponse r2 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java index 555dccc6c19..9a8a37c9a08 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java @@ -39,14 +39,14 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.cache.filter.index.IndexFilterCache; +import org.elasticsearch.index.cache.query.QueryCacheModule; +import org.elasticsearch.index.cache.query.QueryCacheStats; +import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.query.index.IndexQueryCache; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -78,9 +78,9 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms") + .put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, "1ms") .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) + .put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class) .build(); } @@ -146,10 +146,10 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test") - .clear().setFieldData(true).setFilterCache(true) + .clear().setFieldData(true).setQueryCache(true) .execute().actionGet(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l)); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); // sort to load it to field data and filter to load filter cache client().prepareSearch() @@ -167,10 +167,10 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); indicesStats = client().admin().indices().prepareStats("test") - .clear().setFieldData(true).setFilterCache(true) + .clear().setFieldData(true).setQueryCache(true) .execute().actionGet(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l)); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); client().admin().indices().prepareClearCache().execute().actionGet(); Thread.sleep(100); // Make sure the filter cache entries have been removed... @@ -180,15 +180,15 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); indicesStats = client().admin().indices().prepareStats("test") - .clear().setFieldData(true).setFilterCache(true) + .clear().setFieldData(true).setQueryCache(true) .execute().actionGet(); assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l)); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); } @Test public void testQueryCache() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get()); + assertAcked(client().admin().indices().prepareCreate("idx").setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true).get()); ensureGreen(); // index docs until we have at least one doc on each shard, otherwise, our tests will not work @@ -221,15 +221,15 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { } } - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), equalTo(0l)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(0l)); for (int i = 0; i < 10; i++) { assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); } - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), greaterThan(0l)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), greaterThan(0l)); // index the data again... IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; @@ -245,36 +245,36 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { assertBusy(new Runnable() { @Override public void run() { - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); } }); for (int i = 0; i < 10; i++) { assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); } - client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); // test explicit request parameter assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(false).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); // set the index level setting to false, and see that the reverse works - client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache - assertAcked(client().admin().indices().prepareUpdateSettings("idx").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false))); + client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache + assertAcked(client().admin().indices().prepareUpdateSettings("idx").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false))); assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs)); - assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); } @@ -638,8 +638,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { @Test public void testFlagOrdinalOrder() { Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh, - Flag.FilterCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments, - Flag.Translog, Flag.Suggest, Flag.QueryCache, Flag.Recovery}; + Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments, + Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery}; assertThat(flags.length, equalTo(Flag.values().length)); for (int i = 0; i < flags.length; i++) { @@ -862,8 +862,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { case FieldData: builder.setFieldData(set); break; - case FilterCache: - builder.setFilterCache(set); + case QueryCache: + builder.setQueryCache(set); break; case Flush: builder.setFlush(set); @@ -904,8 +904,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { case Suggest: builder.setSuggest(set); break; - case QueryCache: - builder.setQueryCache(set); + case RequestCache: + builder.setRequestCache(set); break; case Recovery: builder.setRecovery(set); @@ -922,8 +922,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { return response.getDocs() != null; case FieldData: return response.getFieldData() != null; - case FilterCache: - return response.getFilterCache() != null; + case QueryCache: + return response.getQueryCache() != null; case Flush: return response.getFlush() != null; case Get: @@ -950,8 +950,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { return response.getTranslog() != null; case Suggest: return response.getSuggest() != null; - case QueryCache: - return response.getQueryCache() != null; + case RequestCache: + return response.getRequestCache() != null; case Recovery: return response.getRecoveryStats() != null; default: @@ -960,7 +960,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { } } - private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) { + private void assertEquals(QueryCacheStats stats1, QueryCacheStats stats2) { assertEquals(stats1.getCacheCount(), stats2.getCacheCount()); assertEquals(stats1.getCacheSize(), stats2.getCacheSize()); assertEquals(stats1.getEvictions(), stats2.getEvictions()); @@ -972,13 +972,13 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) { assertAllSuccessful(response); - FilterCacheStats total = response.getTotal().filterCache; - FilterCacheStats indexTotal = new FilterCacheStats(); - FilterCacheStats shardTotal = new FilterCacheStats(); + QueryCacheStats total = response.getTotal().queryCache; + QueryCacheStats indexTotal = new QueryCacheStats(); + QueryCacheStats shardTotal = new QueryCacheStats(); for (IndexStats indexStats : response.getIndices().values()) { - indexTotal.add(indexStats.getTotal().filterCache); + indexTotal.add(indexStats.getTotal().queryCache); for (ShardStats shardStats : response.getShards()) { - shardTotal.add(shardStats.getStats().filterCache); + shardTotal.add(shardStats.getStats().queryCache); } } assertEquals(total, indexTotal); @@ -992,58 +992,58 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { client().prepareIndex("index", "type", "2").setSource("foo", "baz")); ensureGreen(); - IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertEquals(0, response.getTotal().filterCache.getCacheSize()); + assertEquals(0, response.getTotal().queryCache.getCacheSize()); SearchResponse r; assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); - response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L)); - assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); - assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getHitCount(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L)); assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); - response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); - assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L)); assertTrue(client().prepareDelete("index", "type", "1").get().isFound()); assertTrue(client().prepareDelete("index", "type", "2").get().isFound()); refresh(); - response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); - assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getCacheCount(), greaterThan(0L)); indexRandom(true, client().prepareIndex("index", "type", "1").setSource("foo", "bar"), client().prepareIndex("index", "type", "2").setSource("foo", "baz")); assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); - response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), greaterThan(0L)); - assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get()); - response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertAllSuccessful(client().admin().indices().prepareClearCache("index").setQueryCache(true).get()); + response = client().admin().indices().prepareStats("index").setQueryCache(true).get(); assertCumulativeFilterCacheStats(response); - assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); - assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); - assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), equalTo(0L)); } } diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java index adf1fbf2a04..6f9a2a1bdc4 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java @@ -38,7 +38,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.MappedFieldType.Loading; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.warmer.IndexWarmerMissingException; import org.elasticsearch.search.warmer.IndexWarmersMetaData; @@ -345,14 +345,14 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest { createIndex("test"); ensureGreen(); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false))); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false))); logger.info("register warmer with no query cache, validate no cache is used"); assertAcked(client().admin().indices().preparePutWarmer("warmer_1") .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())) .get()); client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); logger.info("register warmer with query cache, validate caching happened"); assertAcked(client().admin().indices().preparePutWarmer("warmer_1") @@ -361,13 +361,13 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest { // index again, to make sure it gets refreshed client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); - client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache - assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l)); + client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache + assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l)); logger.info("enable default query caching on the index level, and test that no flag on warmer still caches"); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true))); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true))); assertAcked(client().admin().indices().preparePutWarmer("warmer_1") .setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())) @@ -375,6 +375,6 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest { // index again, to make sure it gets refreshed client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet(); - assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l)); + assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l)); } } diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java index a55c7e44cbb..92d019e1c92 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchTests.java @@ -30,9 +30,9 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.index.IndexFilterCache; +import org.elasticsearch.index.cache.query.QueryCacheModule; +import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.query.index.IndexQueryCache; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.query.HasChildQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -75,7 +75,7 @@ public class ChildQuerySearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the filter cache size - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) + .put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } diff --git a/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java index 97417a53e7f..2569e7b3f45 100644 --- a/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java +++ b/core/src/test/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchTests.java @@ -21,9 +21,9 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.index.IndexFilterCache; +import org.elasticsearch.index.cache.query.QueryCacheModule; +import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.query.index.IndexQueryCache; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.sort.SortOrder; @@ -51,7 +51,7 @@ public class ScriptQuerySearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the number of iterations of the script filters - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) + .put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index 3ad3cd76dfb..7189faa4757 100644 --- a/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -27,6 +27,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Joiner; import com.google.common.base.Predicate; import com.google.common.collect.Lists; + import org.apache.commons.lang3.StringUtils; import org.apache.http.impl.client.HttpClients; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -108,7 +109,7 @@ import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.TranslogWriter; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.flush.IndicesSyncedFlushResult; import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.store.IndicesStore; @@ -463,7 +464,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } if (random.nextBoolean()) { - builder.put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, random.nextBoolean()); + builder.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, random.nextBoolean()); } if (random.nextBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java b/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java index bc868417c75..28f99c45e5c 100644 --- a/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java +++ b/core/src/test/java/org/elasticsearch/test/ElasticsearchSingleNodeTest.java @@ -222,7 +222,7 @@ public abstract class ElasticsearchSingleNodeTest extends ElasticsearchTestCase BigArrays bigArrays = indexService.injector().getInstance(BigArrays.class); ThreadPool threadPool = indexService.injector().getInstance(ThreadPool.class); PageCacheRecycler pageCacheRecycler = indexService.injector().getInstance(PageCacheRecycler.class); - return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService, indexService.cache().filter(), indexService.fieldData()); + return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService, indexService.cache().query(), indexService.fieldData()); } /** diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index 9e9bf3ec7ca..2b1b8b5bcf6 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -30,6 +30,7 @@ import com.google.common.collect.*; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.SettableFuture; + import org.apache.lucene.store.StoreRateLimiting; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; @@ -71,10 +72,10 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.index.IndexFilterCache; -import org.elasticsearch.index.cache.filter.none.NoneFilterCache; +import org.elasticsearch.index.cache.query.QueryCacheModule; +import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.query.index.IndexQueryCache; +import org.elasticsearch.index.cache.query.none.NoneQueryCache; import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineClosedException; @@ -85,7 +86,7 @@ import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.store.IndicesStore; @@ -437,7 +438,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class); + builder.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexQueryCache.class : NoneQueryCache.class); } if (random.nextBoolean()) { @@ -468,7 +469,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); + builder.put(IndicesRequestCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); } if (random.nextBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java index 42123493ee1..c97ac47612e 100644 --- a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -19,6 +19,7 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; + import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; @@ -34,7 +35,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; +import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -83,7 +84,7 @@ public class TestSearchContext extends SearchContext { private String[] types; private SearchContextAggregations aggregations; - public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService, FilterCache filterCache, IndexFieldDataService indexFieldDataService) { + public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService, QueryCache filterCache, IndexFieldDataService indexFieldDataService) { this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; diff --git a/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index 699d13b7082..9ebecc68c46 100644 --- a/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/core/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -82,8 +82,8 @@ public final class MockEngineSupport { public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); shardId = config.getShardId(); - filterCache = config.getFilterCache(); - filterCachingPolicy = config.getFilterCachingPolicy(); + filterCache = config.getQueryCache(); + filterCachingPolicy = config.getQueryCachingPolicy(); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow diff --git a/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java index 684a66f1b01..80b83affd28 100644 --- a/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java +++ b/core/src/test/java/org/elasticsearch/test/search/MockSearchService.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.cache.query.IndicesQueryCache; +import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.dfs.DfsPhase; @@ -54,7 +54,7 @@ public class MockSearchService extends SearchService { @Inject public MockSearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, - DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesQueryCache indicesQueryCache) { + DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase, queryPhase, fetchPhase, indicesQueryCache); } diff --git a/docs/reference/aggregations/misc.asciidoc b/docs/reference/aggregations/misc.asciidoc index 73c24e73585..074a6eaaca3 100644 --- a/docs/reference/aggregations/misc.asciidoc +++ b/docs/reference/aggregations/misc.asciidoc @@ -7,7 +7,7 @@ can be cached for faster responses. These cached results are the same results that would be returned by an uncached aggregation -- you will never get stale results. -See <> for more details. +See <> for more details. [[returning-only-agg-results]] == Returning only aggregation results @@ -73,4 +73,4 @@ Then that piece of metadata will be returned in place for our `titles` terms agg } } } --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/indices/clearcache.asciidoc b/docs/reference/indices/clearcache.asciidoc index 5015d09fb03..21008e5b46b 100644 --- a/docs/reference/indices/clearcache.asciidoc +++ b/docs/reference/indices/clearcache.asciidoc @@ -10,7 +10,7 @@ $ curl -XPOST 'http://localhost:9200/twitter/_cache/clear' -------------------------------------------------- The API, by default, will clear all caches. Specific caches can be cleaned -explicitly by setting `filter`, `fielddata` or `query_cache`. +explicitly by setting `query`, `fielddata` or `request`. All caches relating to a specific field(s) can also be cleared by specifying `fields` parameter with a comma delimited list of the @@ -29,5 +29,3 @@ $ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear' $ curl -XPOST 'http://localhost:9200/_cache/clear' -------------------------------------------------- -NOTE: The `filter` cache is not cleared immediately but is scheduled to be -cleared within 60 seconds. diff --git a/docs/reference/indices/stats.asciidoc b/docs/reference/indices/stats.asciidoc index 6f6cd4a7ceb..98cdbbf94c7 100644 --- a/docs/reference/indices/stats.asciidoc +++ b/docs/reference/indices/stats.asciidoc @@ -43,7 +43,7 @@ specified as well in the URI. Those stats can be any of: `fielddata`:: Fielddata statistics. `flush`:: Flush statistics. `merge`:: Merge statistics. -`query_cache`:: <> statistics. +`request_cache`:: <> statistics. `refresh`:: Refresh statistics. `suggest`:: Suggest statistics. `warmer`:: Warmer statistics. diff --git a/docs/reference/modules/indices.asciidoc b/docs/reference/modules/indices.asciidoc index e8f2ef05ce1..309d4490123 100644 --- a/docs/reference/modules/indices.asciidoc +++ b/docs/reference/modules/indices.asciidoc @@ -14,17 +14,17 @@ Available settings include: Set limits on the amount of heap used by the in-memory fielddata cache. -<>:: +<>:: - Configure the amount heap used to cache filter results. + Configure the amount heap used to cache queries results. <>:: Control the size of the buffer allocated to the indexing process. -<>:: +<>:: - Control the behaviour of the shard-level query cache. + Control the behaviour of the shard-level request cache. <>:: @@ -38,11 +38,11 @@ include::indices/circuit_breaker.asciidoc[] include::indices/fielddata.asciidoc[] -include::indices/filter_cache.asciidoc[] +include::indices/query_cache.asciidoc[] include::indices/indexing_buffer.asciidoc[] -include::indices/query-cache.asciidoc[] +include::indices/request_cache.asciidoc[] include::indices/recovery.asciidoc[] diff --git a/docs/reference/modules/indices/filter_cache.asciidoc b/docs/reference/modules/indices/filter_cache.asciidoc deleted file mode 100644 index 163a75138b4..00000000000 --- a/docs/reference/modules/indices/filter_cache.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[filter-cache]] -=== Node Filter Cache - -The filter cache is responsible for caching the results of filters (used in -the query). There is one filter cache per node that is shared by all shards. -The cache implements an LRU eviction policy: when a cache becomes full, the -least recently used data is evicted to make way for new data. - -The following setting is _static_ and must be configured on every data node in -the cluster: - -`indices.cache.filter.size`:: - - Controls the memory size for the filter cache , defaults to `10%`. Accepts - either a percentage value, like `30%`, or an exact value, like `512mb`. - diff --git a/docs/reference/modules/indices/query_cache.asciidoc b/docs/reference/modules/indices/query_cache.asciidoc new file mode 100644 index 00000000000..47506650b21 --- /dev/null +++ b/docs/reference/modules/indices/query_cache.asciidoc @@ -0,0 +1,18 @@ +[[query-cache]] +=== Node Query Cache + +The query cache is responsible for caching the results of queries. +There is one queries cache per node that is shared by all shards. +The cache implements an LRU eviction policy: when a cache becomes full, the +least recently used data is evicted to make way for new data. + +The query cache only caches queries which are being used in a filter context. + +The following setting is _static_ and must be configured on every data node in +the cluster: + +`indices.queries.cache.size`:: + + Controls the memory size for the filter cache , defaults to `10%`. Accepts + either a percentage value, like `5%`, or an exact value, like `512mb`. + diff --git a/docs/reference/modules/indices/query-cache.asciidoc b/docs/reference/modules/indices/request_cache.asciidoc similarity index 80% rename from docs/reference/modules/indices/query-cache.asciidoc rename to docs/reference/modules/indices/request_cache.asciidoc index 444e578de06..1976e470ad7 100644 --- a/docs/reference/modules/indices/query-cache.asciidoc +++ b/docs/reference/modules/indices/request_cache.asciidoc @@ -1,21 +1,21 @@ -[[shard-query-cache]] -=== Shard query cache +[[shard-request-cache]] +=== Shard request cache When a search request is run against an index or against many indices, each involved shard executes the search locally and returns its local results to the _coordinating node_, which combines these shard-level results into a ``global'' result set. -The shard-level query cache module caches the local results on each shard. +The shard-level request cache module caches the local results on each shard. This allows frequently used (and potentially heavy) search requests to return -results almost instantly. The query cache is a very good fit for the logging +results almost instantly. The requests cache is a very good fit for the logging use case, where only the most recent index is being actively updated -- results from older indices will be served directly from the cache. [IMPORTANT] =================================== -For now, the query cache will only cache the results of search requests +For now, the requests cache will only cache the results of search requests where `size=0`, so it will not cache `hits`, but it will cache `hits.total`, <>, and <>. @@ -42,7 +42,7 @@ The cache can be expired manually with the <> API: [source,json] ------------------------ -curl 'localhost:9200/_stats/query_cache?pretty&human' +curl 'localhost:9200/_stats/request_cache?pretty&human' ------------------------ or by node with the <> API: [source,json] ------------------------ -curl 'localhost:9200/_nodes/stats/indices/query_cache?pretty&human' +curl 'localhost:9200/_nodes/stats/indices/request_cache?pretty&human' ------------------------ diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 3e1699fc55b..9c6a9b49aae 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -370,11 +370,14 @@ or the <> instead. +// CACHES +[role="exclude",id="shard-query-cache"] +=== Shard request cache +The shard query cache has been renamed <>. +[role="exclude",id="filter-cache"] +=== Query cache - - - - +The filter cache has been renamed <>. diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index cfd0394fc2d..74ef55cee6e 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -73,7 +73,7 @@ And here is a sample response: Set to `true` or `false` to enable or disable the caching of search results for requests where `size` is 0, ie aggregations and suggestions (no top hits returned). - See <>. + See <>. `terminate_after`:: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json index 6873e91b37b..31a50c4a8c4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.clear_cache.json @@ -24,13 +24,9 @@ "type" : "list", "description" : "A comma-separated list of fields to clear when using the `field_data` parameter (default: all)" }, - "filter": { + "query": { "type" : "boolean", - "description" : "Clear filter caches" - }, - "filter_cache": { - "type" : "boolean", - "description" : "Clear filter caches" + "description" : "Clear query caches" }, "ignore_unavailable": { "type" : "boolean", @@ -54,9 +50,9 @@ "type" : "boolean", "description" : "Clear the recycler cache" }, - "query_cache": { + "request": { "type" : "boolean", - "description" : "Clear query cache" + "description" : "Clear request cache" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json index 0375721ed43..7ec665ca060 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.stats.json @@ -17,7 +17,7 @@ }, "metric" : { "type" : "list", - "options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], + "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], "description" : "Limit the information returned the specific metrics." } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json index 0a0870020b3..59b69c85e4c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/nodes.stats.json @@ -20,7 +20,7 @@ }, "index_metric" : { "type" : "list", - "options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], + "options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"], "description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified." }, "node_id": { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index e3c286c842c..f0ca4d80baf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -151,9 +151,9 @@ "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" }, - "query_cache": { + "request_cache": { "type" : "boolean", - "description" : "Specify if query cache should be used for this request or not, defaults to index level setting" + "description" : "Specify if request cache should be used for this request or not, defaults to index level setting" } } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index a8f0b9bb6ac..0efb307e133 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -22,8 +22,8 @@ completion.size .+ \n fielddata.memory_size .+ \n fielddata.evictions .+ \n - filter_cache.memory_size .+ \n - filter_cache.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n flush.total .+ \n flush.total_time .+ \n get.current .+ \n diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml index 2857ff6f008..ed676924f99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/11_metric.yaml @@ -29,7 +29,7 @@ setup: - is_true: _all.total.refresh - is_true: _all.total.flush - is_true: _all.total.warmer - - is_true: _all.total.filter_cache + - is_true: _all.total.query_cache - is_true: _all.total.fielddata - is_true: _all.total.percolate - is_true: _all.total.completion @@ -52,7 +52,7 @@ setup: - is_true: _all.total.refresh - is_true: _all.total.flush - is_true: _all.total.warmer - - is_true: _all.total.filter_cache + - is_true: _all.total.query_cache - is_true: _all.total.fielddata - is_true: _all.total.percolate - is_true: _all.total.completion @@ -75,7 +75,7 @@ setup: - is_false: _all.total.refresh - is_false: _all.total.flush - is_false: _all.total.warmer - - is_false: _all.total.filter_cache + - is_false: _all.total.query_cache - is_false: _all.total.fielddata - is_false: _all.total.percolate - is_false: _all.total.completion @@ -98,7 +98,7 @@ setup: - is_false: _all.total.refresh - is_false: _all.total.flush - is_false: _all.total.warmer - - is_false: _all.total.filter_cache + - is_false: _all.total.query_cache - is_false: _all.total.fielddata - is_false: _all.total.percolate - is_false: _all.total.completion @@ -122,7 +122,7 @@ setup: - is_false: _all.total.refresh - is_false: _all.total.flush - is_false: _all.total.warmer - - is_false: _all.total.filter_cache + - is_false: _all.total.query_cache - is_false: _all.total.fielddata - is_false: _all.total.percolate - is_false: _all.total.completion