mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Merge branch 'master' into feature/query-refactoring
This commit is contained in:
commit
53f6bf0625
@ -29,7 +29,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.percolator.stats.PercolateStats;
|
||||
@ -46,7 +46,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
private DocsStats docs;
|
||||
private StoreStats store;
|
||||
private FieldDataStats fieldData;
|
||||
private FilterCacheStats filterCache;
|
||||
private QueryCacheStats queryCache;
|
||||
private CompletionStats completion;
|
||||
private SegmentsStats segments;
|
||||
private PercolateStats percolate;
|
||||
@ -60,7 +60,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
this.docs = new DocsStats();
|
||||
this.store = new StoreStats();
|
||||
this.fieldData = new FieldDataStats();
|
||||
this.filterCache = new FilterCacheStats();
|
||||
this.queryCache = new QueryCacheStats();
|
||||
this.completion = new CompletionStats();
|
||||
this.segments = new SegmentsStats();
|
||||
this.percolate = new PercolateStats();
|
||||
@ -83,7 +83,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
}
|
||||
store.add(shardCommonStats.store);
|
||||
fieldData.add(shardCommonStats.fieldData);
|
||||
filterCache.add(shardCommonStats.filterCache);
|
||||
queryCache.add(shardCommonStats.queryCache);
|
||||
completion.add(shardCommonStats.completion);
|
||||
segments.add(shardCommonStats.segments);
|
||||
percolate.add(shardCommonStats.percolate);
|
||||
@ -117,8 +117,8 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
return fieldData;
|
||||
}
|
||||
|
||||
public FilterCacheStats getFilterCache() {
|
||||
return filterCache;
|
||||
public QueryCacheStats getQueryCache() {
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
public CompletionStats getCompletion() {
|
||||
@ -140,7 +140,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
docs = DocsStats.readDocStats(in);
|
||||
store = StoreStats.readStoreStats(in);
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
filterCache = FilterCacheStats.readFilterCacheStats(in);
|
||||
queryCache = QueryCacheStats.readFilterCacheStats(in);
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
segments = SegmentsStats.readSegmentsStats(in);
|
||||
percolate = PercolateStats.readPercolateStats(in);
|
||||
@ -153,7 +153,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
docs.writeTo(out);
|
||||
store.writeTo(out);
|
||||
fieldData.writeTo(out);
|
||||
filterCache.writeTo(out);
|
||||
queryCache.writeTo(out);
|
||||
completion.writeTo(out);
|
||||
segments.writeTo(out);
|
||||
percolate.writeTo(out);
|
||||
@ -176,7 +176,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
docs.toXContent(builder, params);
|
||||
store.toXContent(builder, params);
|
||||
fieldData.toXContent(builder, params);
|
||||
filterCache.toXContent(builder, params);
|
||||
queryCache.toXContent(builder, params);
|
||||
completion.toXContent(builder, params);
|
||||
segments.toXContent(builder, params);
|
||||
percolate.toXContent(builder, params);
|
||||
|
@ -55,7 +55,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
|
||||
|
||||
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
|
||||
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
|
||||
CommonStatsFlags.Flag.Percolate);
|
||||
|
||||
private final NodeService nodeService;
|
||||
|
@ -30,10 +30,10 @@ import java.io.IOException;
|
||||
*/
|
||||
public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCacheRequest> {
|
||||
|
||||
private boolean filterCache = false;
|
||||
private boolean queryCache = false;
|
||||
private boolean fieldDataCache = false;
|
||||
private boolean recycler = false;
|
||||
private boolean queryCache = false;
|
||||
private boolean requestCache = false;
|
||||
private String[] fields = null;
|
||||
|
||||
|
||||
@ -44,17 +44,8 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
super(indices);
|
||||
}
|
||||
|
||||
public boolean filterCache() {
|
||||
return filterCache;
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequest filterCache(boolean filterCache) {
|
||||
this.filterCache = filterCache;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean queryCache() {
|
||||
return this.queryCache;
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequest queryCache(boolean queryCache) {
|
||||
@ -62,6 +53,15 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean requestCache() {
|
||||
return this.requestCache;
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequest requestCache(boolean requestCache) {
|
||||
this.requestCache = requestCache;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean fieldDataCache() {
|
||||
return this.fieldDataCache;
|
||||
}
|
||||
@ -92,20 +92,20 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
filterCache = in.readBoolean();
|
||||
queryCache = in.readBoolean();
|
||||
fieldDataCache = in.readBoolean();
|
||||
recycler = in.readBoolean();
|
||||
fields = in.readStringArray();
|
||||
queryCache = in.readBoolean();
|
||||
requestCache = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(filterCache);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(fieldDataCache);
|
||||
out.writeBoolean(recycler);
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(requestCache);
|
||||
}
|
||||
}
|
||||
|
@ -31,13 +31,13 @@ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBu
|
||||
super(client, action, new ClearIndicesCacheRequest());
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequestBuilder setFilterCache(boolean filterCache) {
|
||||
request.filterCache(filterCache);
|
||||
public ClearIndicesCacheRequestBuilder setQueryCache(boolean queryCache) {
|
||||
request.queryCache(queryCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequestBuilder setQueryCache(boolean queryCache) {
|
||||
request.queryCache(queryCache);
|
||||
public ClearIndicesCacheRequestBuilder setRequestCache(boolean requestCache) {
|
||||
request.requestCache(requestCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -31,10 +31,10 @@ import java.io.IOException;
|
||||
*/
|
||||
class ShardClearIndicesCacheRequest extends BroadcastShardRequest {
|
||||
|
||||
private boolean filterCache = false;
|
||||
private boolean queryCache = false;
|
||||
private boolean fieldDataCache = false;
|
||||
private boolean recycler;
|
||||
private boolean queryCache = false;
|
||||
private boolean requestCache = false;
|
||||
|
||||
private String[] fields = null;
|
||||
|
||||
@ -43,21 +43,21 @@ class ShardClearIndicesCacheRequest extends BroadcastShardRequest {
|
||||
|
||||
ShardClearIndicesCacheRequest(ShardId shardId, ClearIndicesCacheRequest request) {
|
||||
super(shardId, request);
|
||||
filterCache = request.filterCache();
|
||||
queryCache = request.queryCache();
|
||||
fieldDataCache = request.fieldDataCache();
|
||||
fields = request.fields();
|
||||
recycler = request.recycler();
|
||||
queryCache = request.queryCache();
|
||||
}
|
||||
|
||||
public boolean filterCache() {
|
||||
return filterCache;
|
||||
requestCache = request.requestCache();
|
||||
}
|
||||
|
||||
public boolean queryCache() {
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
public boolean requestCache() {
|
||||
return requestCache;
|
||||
}
|
||||
|
||||
public boolean fieldDataCache() {
|
||||
return this.fieldDataCache;
|
||||
}
|
||||
@ -73,20 +73,20 @@ class ShardClearIndicesCacheRequest extends BroadcastShardRequest {
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
filterCache = in.readBoolean();
|
||||
queryCache = in.readBoolean();
|
||||
fieldDataCache = in.readBoolean();
|
||||
recycler = in.readBoolean();
|
||||
fields = in.readStringArray();
|
||||
queryCache = in.readBoolean();
|
||||
requestCache = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(filterCache);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(fieldDataCache);
|
||||
out.writeBoolean(recycler);
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(requestCache);
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
@ -50,16 +50,16 @@ import static com.google.common.collect.Lists.newArrayList;
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ShardClearIndicesCacheRequest, ShardClearIndicesCacheResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
private final IndicesQueryCache indicesQueryCache;
|
||||
private final IndicesRequestCache indicesRequestCache;
|
||||
|
||||
@Inject
|
||||
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
IndicesQueryCache indicesQueryCache, ActionFilters actionFilters) {
|
||||
IndicesRequestCache indicesQueryCache, ActionFilters actionFilters) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
ClearIndicesCacheRequest.class, ShardClearIndicesCacheRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
this.indicesQueryCache = indicesQueryCache;
|
||||
this.indicesRequestCache = indicesQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -100,9 +100,9 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
if (service != null) {
|
||||
IndexShard shard = service.shard(request.shardId().id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
if (request.filterCache()) {
|
||||
if (request.queryCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
service.cache().filter().clear("api");
|
||||
service.cache().query().clear("api");
|
||||
}
|
||||
if (request.fieldDataCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
@ -114,9 +114,9 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
}
|
||||
}
|
||||
}
|
||||
if (request.queryCache()) {
|
||||
if (request.requestCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
indicesQueryCache.clear(shard);
|
||||
indicesRequestCache.clear(shard);
|
||||
}
|
||||
if (request.recycler()) {
|
||||
logger.debug("Clear CacheRecycler on index [{}]", service.index());
|
||||
@ -132,7 +132,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
} else {
|
||||
service.cache().clear("api");
|
||||
service.fieldData().clear();
|
||||
indicesQueryCache.clear(shard);
|
||||
indicesRequestCache.clear(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ public class GetIndexResponse extends ActionResponse {
|
||||
for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) {
|
||||
out.writeString(warmerEntry.name());
|
||||
out.writeStringArray(warmerEntry.types());
|
||||
out.writeOptionalBoolean(warmerEntry.queryCache());
|
||||
out.writeOptionalBoolean(warmerEntry.requestCache());
|
||||
out.writeBytesReference(warmerEntry.source());
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,8 @@ import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
@ -88,8 +88,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
case Warmer:
|
||||
warmer = new WarmerStats();
|
||||
break;
|
||||
case FilterCache:
|
||||
filterCache = new FilterCacheStats();
|
||||
case QueryCache:
|
||||
queryCache = new QueryCacheStats();
|
||||
break;
|
||||
case FieldData:
|
||||
fieldData = new FieldDataStats();
|
||||
@ -109,8 +109,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
case Suggest:
|
||||
suggest = new SuggestStats();
|
||||
break;
|
||||
case QueryCache:
|
||||
queryCache = new QueryCacheStats();
|
||||
case RequestCache:
|
||||
requestCache = new RequestCacheStats();
|
||||
break;
|
||||
case Recovery:
|
||||
recoveryStats = new RecoveryStats();
|
||||
@ -154,8 +154,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
case Warmer:
|
||||
warmer = indexShard.warmerStats();
|
||||
break;
|
||||
case FilterCache:
|
||||
filterCache = indexShard.filterCacheStats();
|
||||
case QueryCache:
|
||||
queryCache = indexShard.queryCacheStats();
|
||||
break;
|
||||
case FieldData:
|
||||
fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
|
||||
@ -175,8 +175,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
case Suggest:
|
||||
suggest = indexShard.suggestStats();
|
||||
break;
|
||||
case QueryCache:
|
||||
queryCache = indexShard.queryCache().stats();
|
||||
case RequestCache:
|
||||
requestCache = indexShard.requestCache().stats();
|
||||
break;
|
||||
case Recovery:
|
||||
recoveryStats = indexShard.recoveryStats();
|
||||
@ -215,7 +215,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
public WarmerStats warmer;
|
||||
|
||||
@Nullable
|
||||
public FilterCacheStats filterCache;
|
||||
public QueryCacheStats queryCache;
|
||||
|
||||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
@ -236,7 +236,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
public SuggestStats suggest;
|
||||
|
||||
@Nullable
|
||||
public QueryCacheStats queryCache;
|
||||
public RequestCacheStats requestCache;
|
||||
|
||||
@Nullable
|
||||
public RecoveryStats recoveryStats;
|
||||
@ -314,13 +314,13 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
} else {
|
||||
warmer.add(stats.getWarmer());
|
||||
}
|
||||
if (filterCache == null) {
|
||||
if (stats.getFilterCache() != null) {
|
||||
filterCache = new FilterCacheStats();
|
||||
filterCache.add(stats.getFilterCache());
|
||||
if (queryCache == null) {
|
||||
if (stats.getQueryCache() != null) {
|
||||
queryCache = new QueryCacheStats();
|
||||
queryCache.add(stats.getQueryCache());
|
||||
}
|
||||
} else {
|
||||
filterCache.add(stats.getFilterCache());
|
||||
queryCache.add(stats.getQueryCache());
|
||||
}
|
||||
|
||||
if (fieldData == null) {
|
||||
@ -371,13 +371,13 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
} else {
|
||||
suggest.add(stats.getSuggest());
|
||||
}
|
||||
if (queryCache == null) {
|
||||
if (stats.getQueryCache() != null) {
|
||||
queryCache = new QueryCacheStats();
|
||||
queryCache.add(stats.getQueryCache());
|
||||
if (requestCache == null) {
|
||||
if (stats.getRequestCache() != null) {
|
||||
requestCache = new RequestCacheStats();
|
||||
requestCache.add(stats.getRequestCache());
|
||||
}
|
||||
} else {
|
||||
queryCache.add(stats.getQueryCache());
|
||||
requestCache.add(stats.getRequestCache());
|
||||
}
|
||||
if (recoveryStats == null) {
|
||||
if (stats.getRecoveryStats() != null) {
|
||||
@ -435,8 +435,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public FilterCacheStats getFilterCache() {
|
||||
return this.filterCache;
|
||||
public QueryCacheStats getQueryCache() {
|
||||
return this.queryCache;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@ -470,8 +470,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public QueryCacheStats getQueryCache() {
|
||||
return queryCache;
|
||||
public RequestCacheStats getRequestCache() {
|
||||
return requestCache;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@ -494,8 +494,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
if (this.getFieldData() != null) {
|
||||
size += this.getFieldData().getMemorySizeInBytes();
|
||||
}
|
||||
if (this.getFilterCache() != null) {
|
||||
size += this.getFilterCache().getMemorySizeInBytes();
|
||||
if (this.getQueryCache() != null) {
|
||||
size += this.getQueryCache().getMemorySizeInBytes();
|
||||
}
|
||||
if (this.getPercolate() != null) {
|
||||
size += this.getPercolate().getMemorySizeInBytes();
|
||||
@ -539,7 +539,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
warmer = WarmerStats.readWarmerStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
filterCache = FilterCacheStats.readFilterCacheStats(in);
|
||||
queryCache = QueryCacheStats.readFilterCacheStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
@ -555,7 +555,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
}
|
||||
translog = in.readOptionalStreamable(new TranslogStats());
|
||||
suggest = in.readOptionalStreamable(new SuggestStats());
|
||||
queryCache = in.readOptionalStreamable(new QueryCacheStats());
|
||||
requestCache = in.readOptionalStreamable(new RequestCacheStats());
|
||||
recoveryStats = in.readOptionalStreamable(new RecoveryStats());
|
||||
}
|
||||
|
||||
@ -615,11 +615,11 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
out.writeBoolean(true);
|
||||
warmer.writeTo(out);
|
||||
}
|
||||
if (filterCache == null) {
|
||||
if (queryCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
filterCache.writeTo(out);
|
||||
queryCache.writeTo(out);
|
||||
}
|
||||
if (fieldData == null) {
|
||||
out.writeBoolean(false);
|
||||
@ -647,7 +647,7 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
}
|
||||
out.writeOptionalStreamable(translog);
|
||||
out.writeOptionalStreamable(suggest);
|
||||
out.writeOptionalStreamable(queryCache);
|
||||
out.writeOptionalStreamable(requestCache);
|
||||
out.writeOptionalStreamable(recoveryStats);
|
||||
}
|
||||
|
||||
@ -681,8 +681,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
if (warmer != null) {
|
||||
warmer.toXContent(builder, params);
|
||||
}
|
||||
if (filterCache != null) {
|
||||
filterCache.toXContent(builder, params);
|
||||
if (queryCache != null) {
|
||||
queryCache.toXContent(builder, params);
|
||||
}
|
||||
if (fieldData != null) {
|
||||
fieldData.toXContent(builder, params);
|
||||
@ -702,8 +702,8 @@ public class CommonStats implements Streamable, ToXContent {
|
||||
if (suggest != null) {
|
||||
suggest.toXContent(builder, params);
|
||||
}
|
||||
if (queryCache != null) {
|
||||
queryCache.toXContent(builder, params);
|
||||
if (requestCache != null) {
|
||||
requestCache.toXContent(builder, params);
|
||||
}
|
||||
if (recoveryStats != null) {
|
||||
recoveryStats.toXContent(builder, params);
|
||||
|
@ -215,7 +215,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
Merge("merge"),
|
||||
Flush("flush"),
|
||||
Refresh("refresh"),
|
||||
FilterCache("filter_cache"),
|
||||
QueryCache("query_cache"),
|
||||
FieldData("fielddata"),
|
||||
Docs("docs"),
|
||||
Warmer("warmer"),
|
||||
@ -224,7 +224,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
Segments("segments"),
|
||||
Translog("translog"),
|
||||
Suggest("suggest"),
|
||||
QueryCache("query_cache"),
|
||||
RequestCache("request_cache"),
|
||||
Recovery("recovery");
|
||||
|
||||
|
||||
|
@ -166,13 +166,13 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
||||
return flags.isSet(Flag.Warmer);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest filterCache(boolean filterCache) {
|
||||
flags.set(Flag.FilterCache, filterCache);
|
||||
public IndicesStatsRequest queryCache(boolean queryCache) {
|
||||
flags.set(Flag.QueryCache, queryCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean filterCache() {
|
||||
return flags.isSet(Flag.FilterCache);
|
||||
public boolean queryCache() {
|
||||
return flags.isSet(Flag.QueryCache);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest fieldData(boolean fieldData) {
|
||||
@ -247,13 +247,13 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
||||
return flags.isSet(Flag.Suggest);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest queryCache(boolean queryCache) {
|
||||
flags.set(Flag.QueryCache, queryCache);
|
||||
public IndicesStatsRequest requestCache(boolean requestCache) {
|
||||
flags.set(Flag.RequestCache, requestCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean queryCache() {
|
||||
return flags.isSet(Flag.QueryCache);
|
||||
public boolean requestCache() {
|
||||
return flags.isSet(Flag.RequestCache);
|
||||
}
|
||||
|
||||
public IndicesStatsRequest recovery(boolean recovery) {
|
||||
|
@ -112,8 +112,8 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setFilterCache(boolean filterCache) {
|
||||
request.filterCache(filterCache);
|
||||
public IndicesStatsRequestBuilder setQueryCache(boolean queryCache) {
|
||||
request.queryCache(queryCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -157,8 +157,8 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
|
||||
return this;
|
||||
}
|
||||
|
||||
public IndicesStatsRequestBuilder setQueryCache(boolean queryCache) {
|
||||
request.queryCache(queryCache);
|
||||
public IndicesStatsRequestBuilder setRequestCache(boolean requestCache) {
|
||||
request.requestCache(requestCache);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -157,8 +157,8 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
if (request.request.warmer()) {
|
||||
flags.set(CommonStatsFlags.Flag.Warmer);
|
||||
}
|
||||
if (request.request.filterCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.FilterCache);
|
||||
if (request.request.queryCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.QueryCache);
|
||||
}
|
||||
if (request.request.fieldData()) {
|
||||
flags.set(CommonStatsFlags.Flag.FieldData);
|
||||
@ -180,8 +180,8 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
if (request.request.suggest()) {
|
||||
flags.set(CommonStatsFlags.Flag.Suggest);
|
||||
}
|
||||
if (request.request.queryCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.QueryCache);
|
||||
if (request.request.requestCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.RequestCache);
|
||||
}
|
||||
if (request.request.recovery()) {
|
||||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
|
@ -93,7 +93,7 @@ public class GetWarmersResponse extends ActionResponse {
|
||||
out.writeString(warmerEntry.name());
|
||||
out.writeStringArray(warmerEntry.types());
|
||||
out.writeBytesReference(warmerEntry.source());
|
||||
out.writeOptionalBoolean(warmerEntry.queryCache());
|
||||
out.writeOptionalBoolean(warmerEntry.requestCache());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -22,9 +22,9 @@
|
||||
* <p>
|
||||
* Index warming allows to run registered search requests to warm up the index before it is available for search.
|
||||
* With the near real time aspect of search, cold data (segments) will be warmed up before they become available for
|
||||
* search. This includes things such as the filter cache, filesystem cache, and loading field data for fields.
|
||||
* search. This includes things such as the query cache, filesystem cache, and loading field data for fields.
|
||||
* </p>
|
||||
*
|
||||
* @see the reference guide for more detailed information about the Indices / Search Warmer
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer;
|
||||
package org.elasticsearch.action.admin.indices.warmer;
|
||||
|
@ -132,21 +132,21 @@ public class TransportPutWarmerAction extends TransportMasterNodeAction<PutWarme
|
||||
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (warmers == null) {
|
||||
logger.info("[{}] putting warmer [{}]", index, request.name());
|
||||
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().queryCache(), source));
|
||||
warmers = new IndexWarmersMetaData(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
boolean found = false;
|
||||
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>(warmers.entries().size() + 1);
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
if (entry.name().equals(request.name())) {
|
||||
found = true;
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().queryCache(), source));
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
entries.add(entry);
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
logger.info("[{}] put warmer [{}]", index, request.name());
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().queryCache(), source));
|
||||
entries.add(new IndexWarmersMetaData.Entry(request.name(), request.searchRequest().types(), request.searchRequest().requestCache(), source));
|
||||
} else {
|
||||
logger.info("[{}] update warmer [{}]", index, request.name());
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
private BytesReference source;
|
||||
|
||||
private BytesReference extraSource;
|
||||
private Boolean queryCache;
|
||||
private Boolean requestCache;
|
||||
|
||||
private Scroll scroll;
|
||||
|
||||
@ -103,7 +103,7 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
this.template = searchRequest.template;
|
||||
this.source = searchRequest.source;
|
||||
this.extraSource = searchRequest.extraSource;
|
||||
this.queryCache = searchRequest.queryCache;
|
||||
this.requestCache = searchRequest.requestCache;
|
||||
this.scroll = searchRequest.scroll;
|
||||
this.types = searchRequest.types;
|
||||
this.indicesOptions = searchRequest.indicesOptions;
|
||||
@ -533,12 +533,12 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
* will default to the index level setting if query cache is enabled or not).
|
||||
*/
|
||||
public SearchRequest queryCache(Boolean queryCache) {
|
||||
this.queryCache = queryCache;
|
||||
this.requestCache = queryCache;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Boolean queryCache() {
|
||||
return this.queryCache;
|
||||
public Boolean requestCache() {
|
||||
return this.requestCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -568,7 +568,7 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
if (in.readBoolean()) {
|
||||
template = Template.readTemplate(in);
|
||||
}
|
||||
queryCache = in.readOptionalBoolean();
|
||||
requestCache = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -602,6 +602,6 @@ public class SearchRequest extends ActionRequest<SearchRequest> implements Indic
|
||||
template.writeTo(out);
|
||||
}
|
||||
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
out.writeOptionalBoolean(requestCache);
|
||||
}
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ public class DateMathParser {
|
||||
}
|
||||
|
||||
// Note: we take a callable here for the timestamp in order to be able to figure out
|
||||
// if it has been used. For instance, the query cache does not cache queries that make
|
||||
// if it has been used. For instance, the request cache does not cache requests that make
|
||||
// use of `now`.
|
||||
public long parse(String text, Callable<Long> now, boolean roundUp, DateTimeZone timeZone) {
|
||||
long time;
|
||||
|
@ -52,7 +52,7 @@ import org.elasticsearch.index.translog.TranslogService;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InternalIndicesLifecycle;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.ShardsPluginsModule;
|
||||
|
||||
@ -309,7 +309,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
|
||||
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
injector.getInstance(IndicesFilterCache.class).onClose(shardId);
|
||||
injector.getInstance(IndicesQueryCache.class).onClose(shardId);
|
||||
}
|
||||
}), path));
|
||||
modules.add(new DeletionPolicyModule(indexSettings));
|
||||
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
||||
import java.io.Closeable;
|
||||
@ -37,24 +37,24 @@ import java.io.IOException;
|
||||
*/
|
||||
public class IndexCache extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
private final FilterCache filterCache;
|
||||
private final QueryCachingPolicy filterCachingPolicy;
|
||||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
|
||||
@Inject
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) {
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, BitsetFilterCache bitsetFilterCache) {
|
||||
super(index, indexSettings);
|
||||
this.filterCache = filterCache;
|
||||
this.filterCachingPolicy = filterCachingPolicy;
|
||||
this.queryCache = queryCache;
|
||||
this.queryCachingPolicy = queryCachingPolicy;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
}
|
||||
|
||||
public FilterCache filter() {
|
||||
return filterCache;
|
||||
public QueryCache query() {
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
public QueryCachingPolicy filterPolicy() {
|
||||
return filterCachingPolicy;
|
||||
public QueryCachingPolicy queryPolicy() {
|
||||
return queryCachingPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -66,11 +66,11 @@ public class IndexCache extends AbstractIndexComponent implements Closeable {
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
IOUtils.close(filterCache, bitsetFilterCache);
|
||||
IOUtils.close(queryCache, bitsetFilterCache);
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
filterCache.clear(reason);
|
||||
queryCache.clear(reason);
|
||||
bitsetFilterCache.clear(reason);
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ package org.elasticsearch.index.cache;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -37,7 +37,7 @@ public class IndexCacheModule extends AbstractModule {
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
new FilterCacheModule(settings).configure(binder());
|
||||
new QueryCacheModule(settings).configure(binder());
|
||||
new BitsetFilterCacheModule(settings).configure(binder());
|
||||
|
||||
bind(IndexCache.class).asEagerSingleton();
|
||||
|
@ -67,7 +67,7 @@ import java.util.concurrent.Executor;
|
||||
* <p/>
|
||||
* Use this cache with care, only components that require that a filter is to be materialized as a {@link BitDocIdSet}
|
||||
* and require that it should always be around should use this cache, otherwise the
|
||||
* {@link org.elasticsearch.index.cache.filter.FilterCache} should be used instead.
|
||||
* {@link org.elasticsearch.index.cache.query.QueryCache} should be used instead.
|
||||
*/
|
||||
public class BitsetFilterCache extends AbstractIndexComponent implements LeafReader.CoreClosedListener, RemovalListener<Object, Cache<Filter, BitsetFilterCache.Value>>, Closeable {
|
||||
|
||||
|
@ -1,162 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.filter;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class FilterCacheStats implements Streamable, ToXContent {
|
||||
|
||||
long ramBytesUsed;
|
||||
long hitCount;
|
||||
long missCount;
|
||||
long cacheCount;
|
||||
long cacheSize;
|
||||
|
||||
public FilterCacheStats() {
|
||||
}
|
||||
|
||||
public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) {
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
this.hitCount = hitCount;
|
||||
this.missCount = missCount;
|
||||
this.cacheCount = cacheCount;
|
||||
this.cacheSize = cacheSize;
|
||||
}
|
||||
|
||||
public void add(FilterCacheStats stats) {
|
||||
ramBytesUsed += stats.ramBytesUsed;
|
||||
hitCount += stats.hitCount;
|
||||
missCount += stats.missCount;
|
||||
cacheCount += stats.cacheCount;
|
||||
cacheSize += stats.cacheSize;
|
||||
}
|
||||
|
||||
public long getMemorySizeInBytes() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
public ByteSizeValue getMemorySize() {
|
||||
return new ByteSizeValue(ramBytesUsed);
|
||||
}
|
||||
|
||||
/**
|
||||
* The total number of lookups in the cache.
|
||||
*/
|
||||
public long getTotalCount() {
|
||||
return hitCount + missCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of successful lookups in the cache.
|
||||
*/
|
||||
public long getHitCount() {
|
||||
return hitCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of lookups in the cache that failed to retrieve a {@link DocIdSet}.
|
||||
*/
|
||||
public long getMissCount() {
|
||||
return missCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that have been cached.
|
||||
*/
|
||||
public long getCacheCount() {
|
||||
return cacheCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that are in the cache.
|
||||
*/
|
||||
public long getCacheSize() {
|
||||
return cacheSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that have been evicted from the cache.
|
||||
*/
|
||||
public long getEvictions() {
|
||||
return cacheCount - cacheSize;
|
||||
}
|
||||
|
||||
public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException {
|
||||
FilterCacheStats stats = new FilterCacheStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
ramBytesUsed = in.readLong();
|
||||
hitCount = in.readLong();
|
||||
missCount = in.readLong();
|
||||
cacheCount = in.readLong();
|
||||
cacheSize = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(ramBytesUsed);
|
||||
out.writeLong(hitCount);
|
||||
out.writeLong(missCount);
|
||||
out.writeLong(cacheCount);
|
||||
out.writeLong(cacheSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(Fields.FILTER_CACHE);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed);
|
||||
builder.field(Fields.TOTAL_COUNT, getTotalCount());
|
||||
builder.field(Fields.HIT_COUNT, getHitCount());
|
||||
builder.field(Fields.MISS_COUNT, getMissCount());
|
||||
builder.field(Fields.CACHE_SIZE, getCacheSize());
|
||||
builder.field(Fields.CACHE_COUNT, getCacheCount());
|
||||
builder.field(Fields.EVICTIONS, getEvictions());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache");
|
||||
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
|
||||
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
|
||||
static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
|
||||
static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count");
|
||||
static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count");
|
||||
static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size");
|
||||
static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count");
|
||||
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
|
||||
}
|
||||
|
||||
}
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.filter;
|
||||
package org.elasticsearch.index.cache.query;
|
||||
|
||||
import org.elasticsearch.index.IndexComponent;
|
||||
|
||||
@ -26,7 +26,7 @@ import java.io.Closeable;
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache {
|
||||
public interface QueryCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache {
|
||||
|
||||
static class EntriesStats {
|
||||
public final long sizeInBytes;
|
@ -17,38 +17,38 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.filter;
|
||||
package org.elasticsearch.index.cache.query;
|
||||
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Scopes;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
|
||||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class FilterCacheModule extends AbstractModule {
|
||||
public class QueryCacheModule extends AbstractModule {
|
||||
|
||||
public static final class FilterCacheSettings {
|
||||
public static final String FILTER_CACHE_TYPE = "index.cache.filter.type";
|
||||
public static final String FILTER_CACHE_TYPE = "index.queries.cache.type";
|
||||
// for test purposes only
|
||||
public static final String FILTER_CACHE_EVERYTHING = "index.cache.filter.everything";
|
||||
public static final String FILTER_CACHE_EVERYTHING = "index.queries.cache.everything";
|
||||
}
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
public FilterCacheModule(Settings settings) {
|
||||
public QueryCacheModule(Settings settings) {
|
||||
this.settings = settings;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(FilterCache.class)
|
||||
.to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache"))
|
||||
bind(QueryCache.class)
|
||||
.to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class, "org.elasticsearch.index.cache.query.", "FilterCache"))
|
||||
.in(Scopes.SINGLETON);
|
||||
// the filter cache is a node-level thing, however we want the most popular filters
|
||||
// the query cache is a node-level thing, however we want the most popular queries
|
||||
// to be computed on a per-index basis, that is why we don't use the SINGLETON
|
||||
// scope below
|
||||
if (settings.getAsBoolean(FilterCacheSettings.FILTER_CACHE_EVERYTHING, false)) {
|
@ -33,81 +33,130 @@ import java.io.IOException;
|
||||
*/
|
||||
public class QueryCacheStats implements Streamable, ToXContent {
|
||||
|
||||
long memorySize;
|
||||
long evictions;
|
||||
long ramBytesUsed;
|
||||
long hitCount;
|
||||
long missCount;
|
||||
long cacheCount;
|
||||
long cacheSize;
|
||||
|
||||
public QueryCacheStats() {
|
||||
}
|
||||
|
||||
public QueryCacheStats(long memorySize, long evictions, long hitCount, long missCount) {
|
||||
this.memorySize = memorySize;
|
||||
this.evictions = evictions;
|
||||
public QueryCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) {
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
this.hitCount = hitCount;
|
||||
this.missCount = missCount;
|
||||
this.cacheCount = cacheCount;
|
||||
this.cacheSize = cacheSize;
|
||||
}
|
||||
|
||||
public void add(QueryCacheStats stats) {
|
||||
this.memorySize += stats.memorySize;
|
||||
this.evictions += stats.evictions;
|
||||
this.hitCount += stats.hitCount;
|
||||
this.missCount += stats.missCount;
|
||||
ramBytesUsed += stats.ramBytesUsed;
|
||||
hitCount += stats.hitCount;
|
||||
missCount += stats.missCount;
|
||||
cacheCount += stats.cacheCount;
|
||||
cacheSize += stats.cacheSize;
|
||||
}
|
||||
|
||||
public long getMemorySizeInBytes() {
|
||||
return this.memorySize;
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
public ByteSizeValue getMemorySize() {
|
||||
return new ByteSizeValue(memorySize);
|
||||
return new ByteSizeValue(ramBytesUsed);
|
||||
}
|
||||
|
||||
public long getEvictions() {
|
||||
return this.evictions;
|
||||
/**
|
||||
* The total number of lookups in the cache.
|
||||
*/
|
||||
public long getTotalCount() {
|
||||
return hitCount + missCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of successful lookups in the cache.
|
||||
*/
|
||||
public long getHitCount() {
|
||||
return this.hitCount;
|
||||
return hitCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of lookups in the cache that failed to retrieve a {@link DocIdSet}.
|
||||
*/
|
||||
public long getMissCount() {
|
||||
return this.missCount;
|
||||
return missCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that have been cached.
|
||||
*/
|
||||
public long getCacheCount() {
|
||||
return cacheCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that are in the cache.
|
||||
*/
|
||||
public long getCacheSize() {
|
||||
return cacheSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* The number of {@link DocIdSet}s that have been evicted from the cache.
|
||||
*/
|
||||
public long getEvictions() {
|
||||
return cacheCount - cacheSize;
|
||||
}
|
||||
|
||||
public static QueryCacheStats readFilterCacheStats(StreamInput in) throws IOException {
|
||||
QueryCacheStats stats = new QueryCacheStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
memorySize = in.readVLong();
|
||||
evictions = in.readVLong();
|
||||
hitCount = in.readVLong();
|
||||
missCount = in.readVLong();
|
||||
ramBytesUsed = in.readLong();
|
||||
hitCount = in.readLong();
|
||||
missCount = in.readLong();
|
||||
cacheCount = in.readLong();
|
||||
cacheSize = in.readLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(memorySize);
|
||||
out.writeVLong(evictions);
|
||||
out.writeVLong(hitCount);
|
||||
out.writeVLong(missCount);
|
||||
out.writeLong(ramBytesUsed);
|
||||
out.writeLong(hitCount);
|
||||
out.writeLong(missCount);
|
||||
out.writeLong(cacheCount);
|
||||
out.writeLong(cacheSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.QUERY_CACHE_STATS);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
|
||||
builder.field(Fields.EVICTIONS, getEvictions());
|
||||
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(Fields.QUERY_CACHE);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed);
|
||||
builder.field(Fields.TOTAL_COUNT, getTotalCount());
|
||||
builder.field(Fields.HIT_COUNT, getHitCount());
|
||||
builder.field(Fields.MISS_COUNT, getMissCount());
|
||||
builder.field(Fields.CACHE_SIZE, getCacheSize());
|
||||
builder.field(Fields.CACHE_COUNT, getCacheCount());
|
||||
builder.field(Fields.EVICTIONS, getEvictions());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString QUERY_CACHE_STATS = new XContentBuilderString("query_cache");
|
||||
static final XContentBuilderString QUERY_CACHE = new XContentBuilderString("query_cache");
|
||||
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
|
||||
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
|
||||
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
|
||||
static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count");
|
||||
static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count");
|
||||
static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count");
|
||||
static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size");
|
||||
static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count");
|
||||
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.filter.index;
|
||||
package org.elasticsearch.index.cache.query.index;
|
||||
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.Weight;
|
||||
@ -26,20 +26,20 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
|
||||
/**
|
||||
* The index-level filter cache. This class mostly delegates to the node-level
|
||||
* filter cache: {@link IndicesFilterCache}.
|
||||
* The index-level query cache. This class mostly delegates to the node-level
|
||||
* query cache: {@link IndicesQueryCache}.
|
||||
*/
|
||||
public class IndexFilterCache extends AbstractIndexComponent implements FilterCache {
|
||||
public class IndexQueryCache extends AbstractIndexComponent implements QueryCache {
|
||||
|
||||
final IndicesFilterCache indicesFilterCache;
|
||||
final IndicesQueryCache indicesFilterCache;
|
||||
|
||||
@Inject
|
||||
public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) {
|
||||
public IndexQueryCache(Index index, @IndexSettings Settings indexSettings, IndicesQueryCache indicesFilterCache) {
|
||||
super(index, indexSettings);
|
||||
this.indicesFilterCache = indicesFilterCache;
|
||||
}
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.filter.none;
|
||||
package org.elasticsearch.index.cache.query.none;
|
||||
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.Weight;
|
||||
@ -25,18 +25,18 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.AbstractIndexComponent;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class NoneFilterCache extends AbstractIndexComponent implements FilterCache {
|
||||
public class NoneQueryCache extends AbstractIndexComponent implements QueryCache {
|
||||
|
||||
@Inject
|
||||
public NoneFilterCache(Index index, @IndexSettings Settings indexSettings) {
|
||||
public NoneQueryCache(Index index, @IndexSettings Settings indexSettings) {
|
||||
super(index, indexSettings);
|
||||
logger.debug("Using no filter cache");
|
||||
logger.debug("Using no query cache");
|
||||
}
|
||||
|
||||
@Override
|
113
core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java
vendored
Normal file
113
core/src/main/java/org/elasticsearch/index/cache/request/RequestCacheStats.java
vendored
Normal file
@ -0,0 +1,113 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.request;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class RequestCacheStats implements Streamable, ToXContent {
|
||||
|
||||
long memorySize;
|
||||
long evictions;
|
||||
long hitCount;
|
||||
long missCount;
|
||||
|
||||
public RequestCacheStats() {
|
||||
}
|
||||
|
||||
public RequestCacheStats(long memorySize, long evictions, long hitCount, long missCount) {
|
||||
this.memorySize = memorySize;
|
||||
this.evictions = evictions;
|
||||
this.hitCount = hitCount;
|
||||
this.missCount = missCount;
|
||||
}
|
||||
|
||||
public void add(RequestCacheStats stats) {
|
||||
this.memorySize += stats.memorySize;
|
||||
this.evictions += stats.evictions;
|
||||
this.hitCount += stats.hitCount;
|
||||
this.missCount += stats.missCount;
|
||||
}
|
||||
|
||||
public long getMemorySizeInBytes() {
|
||||
return this.memorySize;
|
||||
}
|
||||
|
||||
public ByteSizeValue getMemorySize() {
|
||||
return new ByteSizeValue(memorySize);
|
||||
}
|
||||
|
||||
public long getEvictions() {
|
||||
return this.evictions;
|
||||
}
|
||||
|
||||
public long getHitCount() {
|
||||
return this.hitCount;
|
||||
}
|
||||
|
||||
public long getMissCount() {
|
||||
return this.missCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
memorySize = in.readVLong();
|
||||
evictions = in.readVLong();
|
||||
hitCount = in.readVLong();
|
||||
missCount = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(memorySize);
|
||||
out.writeVLong(evictions);
|
||||
out.writeVLong(hitCount);
|
||||
out.writeVLong(missCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.QUERY_CACHE_STATS);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
|
||||
builder.field(Fields.EVICTIONS, getEvictions());
|
||||
builder.field(Fields.HIT_COUNT, getHitCount());
|
||||
builder.field(Fields.MISS_COUNT, getMissCount());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString QUERY_CACHE_STATS = new XContentBuilderString("request_cache");
|
||||
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
|
||||
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
|
||||
static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions");
|
||||
static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count");
|
||||
static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count");
|
||||
}
|
||||
}
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.cache.query;
|
||||
package org.elasticsearch.index.cache.request;
|
||||
|
||||
import com.google.common.cache.RemovalListener;
|
||||
import com.google.common.cache.RemovalNotification;
|
||||
@ -28,23 +28,23 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.settings.IndexSettings;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardQueryCache extends AbstractIndexShardComponent implements RemovalListener<IndicesQueryCache.Key, IndicesQueryCache.Value> {
|
||||
public class ShardRequestCache extends AbstractIndexShardComponent implements RemovalListener<IndicesRequestCache.Key, IndicesRequestCache.Value> {
|
||||
|
||||
final CounterMetric evictionsMetric = new CounterMetric();
|
||||
final CounterMetric totalMetric = new CounterMetric();
|
||||
final CounterMetric hitCount = new CounterMetric();
|
||||
final CounterMetric missCount = new CounterMetric();
|
||||
|
||||
public ShardQueryCache(ShardId shardId, @IndexSettings Settings indexSettings) {
|
||||
public ShardRequestCache(ShardId shardId, @IndexSettings Settings indexSettings) {
|
||||
super(shardId, indexSettings);
|
||||
}
|
||||
|
||||
public QueryCacheStats stats() {
|
||||
return new QueryCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count());
|
||||
public RequestCacheStats stats() {
|
||||
return new RequestCacheStats(totalMetric.count(), evictionsMetric.count(), hitCount.count(), missCount.count());
|
||||
}
|
||||
|
||||
public void onHit() {
|
||||
@ -55,12 +55,12 @@ public class ShardQueryCache extends AbstractIndexShardComponent implements Remo
|
||||
missCount.inc();
|
||||
}
|
||||
|
||||
public void onCached(IndicesQueryCache.Key key, IndicesQueryCache.Value value) {
|
||||
public void onCached(IndicesRequestCache.Key key, IndicesRequestCache.Value value) {
|
||||
totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<IndicesQueryCache.Key, IndicesQueryCache.Value> removalNotification) {
|
||||
public void onRemoval(RemovalNotification<IndicesRequestCache.Key, IndicesRequestCache.Value> removalNotification) {
|
||||
if (removalNotification.wasEvicted()) {
|
||||
evictionsMetric.inc();
|
||||
}
|
@ -35,7 +35,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
|
||||
import org.elasticsearch.index.indexing.ShardIndexingService;
|
||||
import org.elasticsearch.index.settings.IndexSettingsService;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
@ -76,8 +75,8 @@ public final class EngineConfig {
|
||||
private final CodecService codecService;
|
||||
private final Engine.FailedEngineListener failedEngineListener;
|
||||
private final boolean forceNewTranslog;
|
||||
private final QueryCache filterCache;
|
||||
private final QueryCachingPolicy filterCachingPolicy;
|
||||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
|
||||
/**
|
||||
* Index setting for index concurrency / number of threadstates in the indexwriter.
|
||||
@ -144,7 +143,7 @@ public final class EngineConfig {
|
||||
Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
|
||||
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
|
||||
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
|
||||
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy, TranslogConfig translogConfig) {
|
||||
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig) {
|
||||
this.shardId = shardId;
|
||||
this.indexSettings = indexSettings;
|
||||
this.threadPool = threadPool;
|
||||
@ -168,8 +167,8 @@ public final class EngineConfig {
|
||||
updateVersionMapSize();
|
||||
this.translogRecoveryPerformer = translogRecoveryPerformer;
|
||||
this.forceNewTranslog = indexSettings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false);
|
||||
this.filterCache = filterCache;
|
||||
this.filterCachingPolicy = filterCachingPolicy;
|
||||
this.queryCache = queryCache;
|
||||
this.queryCachingPolicy = queryCachingPolicy;
|
||||
this.translogConfig = translogConfig;
|
||||
}
|
||||
|
||||
@ -409,17 +408,17 @@ public final class EngineConfig {
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the cache to use for filters.
|
||||
* Return the cache to use for queries.
|
||||
*/
|
||||
public QueryCache getFilterCache() {
|
||||
return filterCache;
|
||||
public QueryCache getQueryCache() {
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the policy to use when caching filters.
|
||||
* Return the policy to use when caching queries.
|
||||
*/
|
||||
public QueryCachingPolicy getFilterCachingPolicy() {
|
||||
return filterCachingPolicy;
|
||||
public QueryCachingPolicy getQueryCachingPolicy() {
|
||||
return queryCachingPolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -41,8 +41,8 @@ public class EngineSearcherFactory extends SearcherFactory {
|
||||
@Override
|
||||
public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException {
|
||||
IndexSearcher searcher = super.newSearcher(reader, previousReader);
|
||||
searcher.setQueryCache(engineConfig.getFilterCache());
|
||||
searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy());
|
||||
searcher.setQueryCache(engineConfig.getQueryCache());
|
||||
searcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
|
||||
searcher.setSimilarity(engineConfig.getSimilarity());
|
||||
return searcher;
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
@ -38,7 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.indices.cache.filter.terms.TermsLookup;
|
||||
import org.elasticsearch.indices.cache.query.terms.TermsLookup;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -39,7 +39,7 @@ import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.index.translog.TranslogService;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.ttl.IndicesTTLService;
|
||||
|
||||
/**
|
||||
@ -110,7 +110,8 @@ public class IndexDynamicSettingsModule extends AbstractModule {
|
||||
indexDynamicSettings.addDynamicSetting(TranslogService.INDEX_TRANSLOG_DISABLE_FLUSH);
|
||||
indexDynamicSettings.addDynamicSetting(TranslogConfig.INDEX_TRANSLOG_DURABILITY);
|
||||
indexDynamicSettings.addDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED);
|
||||
indexDynamicSettings.addDynamicSetting(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(IndicesRequestCache.DEPRECATED_INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
indexDynamicSettings.addDynamicSetting(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, Validator.TIME);
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.shard;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import org.apache.lucene.codecs.PostingsFormat;
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
@ -58,8 +59,8 @@ import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.aliases.IndexAliasesService;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.query.ShardQueryCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.ShardRequestCache;
|
||||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
|
||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||
@ -99,7 +100,7 @@ import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.InternalIndicesLifecycle;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat;
|
||||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
@ -134,7 +135,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
private final ShardSearchStats searchService;
|
||||
private final ShardGetService getService;
|
||||
private final ShardIndexWarmerService shardWarmerService;
|
||||
private final ShardQueryCache shardQueryCache;
|
||||
private final ShardRequestCache shardQueryCache;
|
||||
private final ShardFieldData shardFieldData;
|
||||
private final PercolatorQueriesRegistry percolatorQueriesRegistry;
|
||||
private final ShardPercolateService shardPercolateService;
|
||||
@ -154,7 +155,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
private final EngineConfig engineConfig;
|
||||
private final TranslogConfig translogConfig;
|
||||
private final MergePolicyConfig mergePolicyConfig;
|
||||
private final IndicesFilterCache indicesFilterCache;
|
||||
private final IndicesQueryCache indicesFilterCache;
|
||||
private final StoreRecoveryService storeRecoveryService;
|
||||
|
||||
private TimeValue refreshInterval;
|
||||
@ -191,7 +192,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
@Inject
|
||||
public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService,
|
||||
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService,
|
||||
IndicesFilterCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
IndicesQueryCache indicesFilterCache, ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, IndexService indexService,
|
||||
@Nullable IndicesWarmer warmer, SnapshotDeletionPolicy deletionPolicy, SimilarityService similarityService, EngineFactory factory,
|
||||
ClusterService clusterService, ShardPath path, BigArrays bigArrays) {
|
||||
@ -219,7 +220,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
this.searchService = new ShardSearchStats(indexSettings);
|
||||
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
|
||||
this.indicesFilterCache = indicesFilterCache;
|
||||
this.shardQueryCache = new ShardQueryCache(shardId, indexSettings);
|
||||
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
|
||||
this.shardFieldData = new ShardFieldData();
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, indicesLifecycle, mapperService, indexFieldDataService, shardPercolateService);
|
||||
this.shardPercolateService = shardPercolateService;
|
||||
@ -296,7 +297,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
return this.shardWarmerService;
|
||||
}
|
||||
|
||||
public ShardQueryCache queryCache() {
|
||||
public ShardRequestCache requestCache() {
|
||||
return this.shardQueryCache;
|
||||
}
|
||||
|
||||
@ -614,7 +615,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
return shardWarmerService.stats();
|
||||
}
|
||||
|
||||
public FilterCacheStats filterCacheStats() {
|
||||
public QueryCacheStats queryCacheStats() {
|
||||
return indicesFilterCache.getStats(shardId);
|
||||
}
|
||||
|
||||
@ -1345,7 +1346,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||
};
|
||||
return new EngineConfig(shardId,
|
||||
threadPool, indexingService, indexSettingsService.indexSettings(), warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy(), translogConfig);
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), indexCache.queryPolicy(), translogConfig);
|
||||
}
|
||||
|
||||
private static class IndexShardOperationCounter extends AbstractRefCounted {
|
||||
|
@ -42,7 +42,7 @@ import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.termvectors.ShardTermVectorsService;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -60,7 +60,7 @@ public final class ShadowIndexShard extends IndexShard {
|
||||
IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService,
|
||||
ThreadPool threadPool, MapperService mapperService,
|
||||
IndexQueryParserService queryParserService, IndexCache indexCache,
|
||||
IndexAliasesService indexAliasesService, IndicesFilterCache indicesFilterCache,
|
||||
IndexAliasesService indexAliasesService, IndicesQueryCache indicesFilterCache,
|
||||
ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
ShardTermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
|
||||
IndexService indexService, @Nullable IndicesWarmer warmer,
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
import org.elasticsearch.action.update.UpdateHelper;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
@ -27,8 +28,8 @@ import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.SpawnModules;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.analysis.IndicesAnalysisModule;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||
@ -71,8 +72,8 @@ public class IndicesModule extends AbstractModule implements SpawnModules {
|
||||
bind(IndicesClusterStateService.class).asEagerSingleton();
|
||||
bind(IndexingMemoryController.class).asEagerSingleton();
|
||||
bind(SyncedFlushService.class).asEagerSingleton();
|
||||
bind(IndicesFilterCache.class).asEagerSingleton();
|
||||
bind(IndicesQueryCache.class).asEagerSingleton();
|
||||
bind(IndicesRequestCache.class).asEagerSingleton();
|
||||
bind(IndicesFieldDataCache.class).asEagerSingleton();
|
||||
bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton();
|
||||
bind(IndicesTTLService.class).asEagerSingleton();
|
||||
|
@ -33,8 +33,8 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
@ -132,13 +132,13 @@ public class NodeIndicesStats implements Streamable, Serializable, ToXContent {
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public FilterCacheStats getFilterCache() {
|
||||
return stats.getFilterCache();
|
||||
public QueryCacheStats getFilterCache() {
|
||||
return stats.getQueryCache();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public QueryCacheStats getQueryCache() {
|
||||
return stats.getQueryCache();
|
||||
public RequestCacheStats getQueryCache() {
|
||||
return stats.getRequestCache();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -1,308 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.filter;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.LRUQueryCache;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryCache;
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.ShardCoreKeyMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
public class IndicesFilterCache extends AbstractComponent implements QueryCache, Closeable {
|
||||
|
||||
public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size";
|
||||
public static final String INDICES_CACHE_QUERY_COUNT = "indices.cache.filter.count";
|
||||
|
||||
private final LRUQueryCache cache;
|
||||
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
|
||||
private final Map<ShardId, Stats> shardStats = new ConcurrentHashMap<>();
|
||||
private volatile long sharedRamBytesUsed;
|
||||
|
||||
// This is a hack for the fact that the close listener for the
|
||||
// ShardCoreKeyMap will be called before onDocIdSetEviction
|
||||
// See onDocIdSetEviction for more info
|
||||
private final Map<Object, StatsAndCount> stats2 = new IdentityHashMap<>();
|
||||
|
||||
@Inject
|
||||
public IndicesFilterCache(Settings settings) {
|
||||
super(settings);
|
||||
final String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE, "10%");
|
||||
final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString, INDICES_CACHE_QUERY_SIZE);
|
||||
final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 1000);
|
||||
logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], max filter count [{}]",
|
||||
sizeString, size, count);
|
||||
cache = new LRUQueryCache(count, size.bytes()) {
|
||||
|
||||
private Stats getStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
if (shardId == null) {
|
||||
return null;
|
||||
}
|
||||
return shardStats.get(shardId);
|
||||
}
|
||||
|
||||
private Stats getOrCreateStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
Stats stats = shardStats.get(shardId);
|
||||
if (stats == null) {
|
||||
stats = new Stats();
|
||||
shardStats.put(shardId, stats);
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
// It's ok to not protect these callbacks by a lock since it is
|
||||
// done in LRUQueryCache
|
||||
@Override
|
||||
protected void onClear() {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onClear();
|
||||
for (Stats stats : shardStats.values()) {
|
||||
// don't throw away hit/miss
|
||||
stats.cacheSize = 0;
|
||||
stats.ramBytesUsed = 0;
|
||||
}
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryCache(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryCache(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed += ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryEviction(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryEviction(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed -= ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.cacheSize += 1;
|
||||
shardStats.cacheCount += 1;
|
||||
shardStats.ramBytesUsed += ramBytesUsed;
|
||||
|
||||
StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
if (statsAndCount == null) {
|
||||
statsAndCount = new StatsAndCount(shardStats);
|
||||
stats2.put(readerCoreKey, statsAndCount);
|
||||
}
|
||||
statsAndCount.count += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onHit(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onHit(readerCoreKey, filter);
|
||||
final Stats shardStats = getStats(readerCoreKey);
|
||||
shardStats.hitCount += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onMiss(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onMiss(readerCoreKey, filter);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.missCount += 1;
|
||||
}
|
||||
};
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
/** Get usage statistics for the given shard. */
|
||||
public FilterCacheStats getStats(ShardId shard) {
|
||||
final Map<ShardId, FilterCacheStats> stats = new HashMap<>();
|
||||
for (Map.Entry<ShardId, Stats> entry : shardStats.entrySet()) {
|
||||
stats.put(entry.getKey(), entry.getValue().toQueryCacheStats());
|
||||
}
|
||||
FilterCacheStats shardStats = new FilterCacheStats();
|
||||
FilterCacheStats info = stats.get(shard);
|
||||
if (info == null) {
|
||||
info = new FilterCacheStats();
|
||||
}
|
||||
shardStats.add(info);
|
||||
|
||||
// We also have some shared ram usage that we try to distribute to
|
||||
// proportionally to their number of cache entries of each shard
|
||||
long totalSize = 0;
|
||||
for (FilterCacheStats s : stats.values()) {
|
||||
totalSize += s.getCacheSize();
|
||||
}
|
||||
final double weight = totalSize == 0
|
||||
? 1d / stats.size()
|
||||
: shardStats.getCacheSize() / totalSize;
|
||||
final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed);
|
||||
shardStats.add(new FilterCacheStats(additionalRamBytesUsed, 0, 0, 0, 0));
|
||||
return shardStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight doCache(Weight weight, QueryCachingPolicy policy) {
|
||||
while (weight instanceof CachingWeightWrapper) {
|
||||
weight = ((CachingWeightWrapper) weight).in;
|
||||
}
|
||||
final Weight in = cache.doCache(weight, policy);
|
||||
// We wrap the weight to track the readers it sees and map them with
|
||||
// the shards they belong to
|
||||
return new CachingWeightWrapper(in);
|
||||
}
|
||||
|
||||
private class CachingWeightWrapper extends Weight {
|
||||
|
||||
private final Weight in;
|
||||
|
||||
protected CachingWeightWrapper(Weight in) {
|
||||
super(in.getQuery());
|
||||
this.in = in;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void extractTerms(Set<Term> terms) {
|
||||
in.extractTerms(terms);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
shardKeyMap.add(context.reader());
|
||||
return in.explain(context, doc);
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getValueForNormalization() throws IOException {
|
||||
return in.getValueForNormalization();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void normalize(float norm, float topLevelBoost) {
|
||||
in.normalize(norm, topLevelBoost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
|
||||
shardKeyMap.add(context.reader());
|
||||
return in.scorer(context, acceptDocs);
|
||||
}
|
||||
}
|
||||
|
||||
/** Clear all entries that belong to the given index. */
|
||||
public void clearIndex(String index) {
|
||||
final Set<Object> coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index);
|
||||
for (Object coreKey : coreCacheKeys) {
|
||||
cache.clearCoreCacheKey(coreKey);
|
||||
}
|
||||
|
||||
// This cache stores two things: filters, and doc id sets. Calling
|
||||
// clear only removes the doc id sets, but if we reach the situation
|
||||
// that the cache does not contain any DocIdSet anymore, then it
|
||||
// probably means that the user wanted to remove everything.
|
||||
if (cache.getCacheSize() == 0) {
|
||||
cache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
assert shardKeyMap.size() == 0 : shardKeyMap.size();
|
||||
assert shardStats.isEmpty() : shardStats.keySet();
|
||||
assert stats2.isEmpty() : stats2;
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
private static class Stats implements Cloneable {
|
||||
|
||||
volatile long ramBytesUsed;
|
||||
volatile long hitCount;
|
||||
volatile long missCount;
|
||||
volatile long cacheCount;
|
||||
volatile long cacheSize;
|
||||
|
||||
FilterCacheStats toQueryCacheStats() {
|
||||
return new FilterCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize);
|
||||
}
|
||||
}
|
||||
|
||||
private static class StatsAndCount {
|
||||
int count;
|
||||
final Stats stats;
|
||||
|
||||
StatsAndCount(Stats stats) {
|
||||
this.stats = stats;
|
||||
this.count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
private boolean empty(Stats stats) {
|
||||
if (stats == null) {
|
||||
return true;
|
||||
}
|
||||
return stats.cacheSize == 0 && stats.ramBytesUsed == 0;
|
||||
}
|
||||
|
||||
public void onClose(ShardId shardId) {
|
||||
assert empty(shardStats.get(shardId));
|
||||
shardStats.remove(shardId);
|
||||
}
|
||||
}
|
@ -19,453 +19,302 @@
|
||||
|
||||
package org.elasticsearch.indices.cache.query;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.RemovalListener;
|
||||
import com.google.common.cache.RemovalNotification;
|
||||
import com.google.common.cache.Weigher;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.LRUQueryCache;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.QueryCache;
|
||||
import org.apache.lucene.search.QueryCachingPolicy;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.lucene.ShardCoreKeyMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.elasticsearch.common.Strings.hasLength;
|
||||
public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable {
|
||||
|
||||
/**
|
||||
* The indices query cache allows to cache a shard level query stage responses, helping with improving
|
||||
* similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent
|
||||
* with the semantics of NRT (the index reader version is part of the cache key), and relies on size based
|
||||
* eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that
|
||||
* are no longer used or closed shards.
|
||||
* <p/>
|
||||
* Currently, the cache is only enabled for {@link SearchType#COUNT}, and can only be opted in on an index
|
||||
* level setting that can be dynamically changed and defaults to false.
|
||||
* <p/>
|
||||
* There are still several TODOs left in this class, some easily addressable, some more complex, but the support
|
||||
* is functional.
|
||||
*/
|
||||
public class IndicesQueryCache extends AbstractComponent implements RemovalListener<IndicesQueryCache.Key, IndicesQueryCache.Value> {
|
||||
public static final String INDICES_CACHE_QUERY_SIZE = "indices.queries.cache.size";
|
||||
@Deprecated
|
||||
public static final String DEPRECATED_INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size";
|
||||
public static final String INDICES_CACHE_QUERY_COUNT = "indices.queries.cache.count";
|
||||
|
||||
/**
|
||||
* A setting to enable or disable query caching on an index level. Its dynamic by default
|
||||
* since we are checking on the cluster state IndexMetaData always.
|
||||
*/
|
||||
public static final String INDEX_CACHE_QUERY_ENABLED = "index.cache.query.enable";
|
||||
public static final String INDICES_CACHE_QUERY_CLEAN_INTERVAL = "indices.cache.query.clean_interval";
|
||||
private final LRUQueryCache cache;
|
||||
private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap();
|
||||
private final Map<ShardId, Stats> shardStats = new ConcurrentHashMap<>();
|
||||
private volatile long sharedRamBytesUsed;
|
||||
|
||||
public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.query.size";
|
||||
public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.cache.query.expire";
|
||||
public static final String INDICES_CACHE_QUERY_CONCURRENCY_LEVEL = "indices.cache.query.concurrency_level";
|
||||
|
||||
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final TimeValue cleanInterval;
|
||||
private final Reaper reaper;
|
||||
|
||||
final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
|
||||
final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
|
||||
|
||||
//TODO make these changes configurable on the cluster level
|
||||
private final String size;
|
||||
private final TimeValue expire;
|
||||
private final int concurrencyLevel;
|
||||
|
||||
private volatile Cache<Key, Value> cache;
|
||||
// This is a hack for the fact that the close listener for the
|
||||
// ShardCoreKeyMap will be called before onDocIdSetEviction
|
||||
// See onDocIdSetEviction for more info
|
||||
private final Map<Object, StatsAndCount> stats2 = new IdentityHashMap<>();
|
||||
|
||||
@Inject
|
||||
public IndicesQueryCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) {
|
||||
public IndicesQueryCache(Settings settings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
this.cleanInterval = settings.getAsTime(INDICES_CACHE_QUERY_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));
|
||||
// this cache can be very small yet still be very effective
|
||||
this.size = settings.get(INDICES_CACHE_QUERY_SIZE, "1%");
|
||||
this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
|
||||
// defaults to 4, but this is a busy map for all indices, increase it a bit by default
|
||||
this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16);
|
||||
if (concurrencyLevel <= 0) {
|
||||
throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
|
||||
String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE);
|
||||
if (sizeString == null) {
|
||||
sizeString = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE);
|
||||
if (sizeString != null) {
|
||||
deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE
|
||||
+ "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead");
|
||||
}
|
||||
}
|
||||
buildCache();
|
||||
if (sizeString == null) {
|
||||
sizeString = "10%";
|
||||
}
|
||||
final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString, INDICES_CACHE_QUERY_SIZE);
|
||||
final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 1000);
|
||||
logger.debug("using [node] query cache with size [{}], actual_size [{}], max filter count [{}]",
|
||||
sizeString, size, count);
|
||||
cache = new LRUQueryCache(count, size.bytes()) {
|
||||
|
||||
this.reaper = new Reaper();
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
|
||||
private Stats getStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
if (shardId == null) {
|
||||
return null;
|
||||
}
|
||||
return shardStats.get(shardId);
|
||||
}
|
||||
|
||||
private Stats getOrCreateStats(Object coreKey) {
|
||||
final ShardId shardId = shardKeyMap.getShardId(coreKey);
|
||||
Stats stats = shardStats.get(shardId);
|
||||
if (stats == null) {
|
||||
stats = new Stats();
|
||||
shardStats.put(shardId, stats);
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
// It's ok to not protect these callbacks by a lock since it is
|
||||
// done in LRUQueryCache
|
||||
@Override
|
||||
protected void onClear() {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onClear();
|
||||
for (Stats stats : shardStats.values()) {
|
||||
// don't throw away hit/miss
|
||||
stats.cacheSize = 0;
|
||||
stats.ramBytesUsed = 0;
|
||||
}
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryCache(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryCache(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed += ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onQueryEviction(Query filter, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onQueryEviction(filter, ramBytesUsed);
|
||||
sharedRamBytesUsed -= ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetCache(readerCoreKey, ramBytesUsed);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.cacheSize += 1;
|
||||
shardStats.cacheCount += 1;
|
||||
shardStats.ramBytesUsed += ramBytesUsed;
|
||||
|
||||
StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
if (statsAndCount == null) {
|
||||
statsAndCount = new StatsAndCount(shardStats);
|
||||
stats2.put(readerCoreKey, statsAndCount);
|
||||
}
|
||||
statsAndCount.count += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed);
|
||||
// We can't use ShardCoreKeyMap here because its core closed
|
||||
// listener is called before the listener of the cache which
|
||||
// triggers this eviction. So instead we use use stats2 that
|
||||
// we only evict when nothing is cached anymore on the segment
|
||||
// instead of relying on close listeners
|
||||
final StatsAndCount statsAndCount = stats2.get(readerCoreKey);
|
||||
final Stats shardStats = statsAndCount.stats;
|
||||
shardStats.cacheSize -= numEntries;
|
||||
shardStats.ramBytesUsed -= sumRamBytesUsed;
|
||||
statsAndCount.count -= numEntries;
|
||||
if (statsAndCount.count == 0) {
|
||||
stats2.remove(readerCoreKey);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onHit(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onHit(readerCoreKey, filter);
|
||||
final Stats shardStats = getStats(readerCoreKey);
|
||||
shardStats.hitCount += 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onMiss(Object readerCoreKey, Query filter) {
|
||||
assert Thread.holdsLock(this);
|
||||
super.onMiss(readerCoreKey, filter);
|
||||
final Stats shardStats = getOrCreateStats(readerCoreKey);
|
||||
shardStats.missCount += 1;
|
||||
}
|
||||
};
|
||||
sharedRamBytesUsed = 0;
|
||||
}
|
||||
|
||||
private void buildCache() {
|
||||
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();
|
||||
|
||||
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.newBuilder()
|
||||
.maximumWeight(sizeInBytes).weigher(new QueryCacheWeigher()).removalListener(this);
|
||||
cacheBuilder.concurrencyLevel(concurrencyLevel);
|
||||
|
||||
if (expire != null) {
|
||||
cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
|
||||
/** Get usage statistics for the given shard. */
|
||||
public QueryCacheStats getStats(ShardId shard) {
|
||||
final Map<ShardId, QueryCacheStats> stats = new HashMap<>();
|
||||
for (Map.Entry<ShardId, Stats> entry : shardStats.entrySet()) {
|
||||
stats.put(entry.getKey(), entry.getValue().toQueryCacheStats());
|
||||
}
|
||||
|
||||
cache = cacheBuilder.build();
|
||||
}
|
||||
|
||||
private static class QueryCacheWeigher implements Weigher<Key, Value> {
|
||||
|
||||
@Override
|
||||
public int weigh(Key key, Value value) {
|
||||
return (int) (key.ramBytesUsed() + value.ramBytesUsed());
|
||||
QueryCacheStats shardStats = new QueryCacheStats();
|
||||
QueryCacheStats info = stats.get(shard);
|
||||
if (info == null) {
|
||||
info = new QueryCacheStats();
|
||||
}
|
||||
}
|
||||
shardStats.add(info);
|
||||
|
||||
public void close() {
|
||||
reaper.close();
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
public void clear(IndexShard shard) {
|
||||
if (shard == null) {
|
||||
return;
|
||||
// We also have some shared ram usage that we try to distribute to
|
||||
// proportionally to their number of cache entries of each shard
|
||||
long totalSize = 0;
|
||||
for (QueryCacheStats s : stats.values()) {
|
||||
totalSize += s.getCacheSize();
|
||||
}
|
||||
keysToClean.add(new CleanupKey(shard, -1));
|
||||
logger.trace("{} explicit cache clear", shard.shardId());
|
||||
reaper.reap();
|
||||
final double weight = totalSize == 0
|
||||
? 1d / stats.size()
|
||||
: shardStats.getCacheSize() / totalSize;
|
||||
final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed);
|
||||
shardStats.add(new QueryCacheStats(additionalRamBytesUsed, 0, 0, 0, 0));
|
||||
return shardStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Value> notification) {
|
||||
if (notification.getKey() == null) {
|
||||
return;
|
||||
public Weight doCache(Weight weight, QueryCachingPolicy policy) {
|
||||
while (weight instanceof CachingWeightWrapper) {
|
||||
weight = ((CachingWeightWrapper) weight).in;
|
||||
}
|
||||
notification.getKey().shard.queryCache().onRemoval(notification);
|
||||
final Weight in = cache.doCache(weight, policy);
|
||||
// We wrap the weight to track the readers it sees and map them with
|
||||
// the shards they belong to
|
||||
return new CachingWeightWrapper(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Can the shard request be cached at all?
|
||||
*/
|
||||
public boolean canCache(ShardSearchRequest request, SearchContext context) {
|
||||
// TODO: for now, template is not supported, though we could use the generated bytes as the key
|
||||
if (hasLength(request.templateSource())) {
|
||||
return false;
|
||||
private class CachingWeightWrapper extends Weight {
|
||||
|
||||
private final Weight in;
|
||||
|
||||
protected CachingWeightWrapper(Weight in) {
|
||||
super(in.getQuery());
|
||||
this.in = in;
|
||||
}
|
||||
|
||||
// for now, only enable it for requests with no hits
|
||||
if (context.size() != 0) {
|
||||
return false;
|
||||
@Override
|
||||
public void extractTerms(Set<Term> terms) {
|
||||
in.extractTerms(terms);
|
||||
}
|
||||
|
||||
// We cannot cache with DFS because results depend not only on the content of the index but also
|
||||
// on the overridden statistics. So if you ran two queries on the same index with different stats
|
||||
// (because an other shard was updated) you would get wrong results because of the scores
|
||||
// (think about top_hits aggs or scripts using the score)
|
||||
if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
|
||||
return false;
|
||||
@Override
|
||||
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
|
||||
shardKeyMap.add(context.reader());
|
||||
return in.explain(context, doc);
|
||||
}
|
||||
|
||||
IndexMetaData index = clusterService.state().getMetaData().index(request.index());
|
||||
if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted
|
||||
return false;
|
||||
@Override
|
||||
public float getValueForNormalization() throws IOException {
|
||||
return in.getValueForNormalization();
|
||||
}
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.queryCache() == null) {
|
||||
if (!index.settings().getAsBoolean(INDEX_CACHE_QUERY_ENABLED, Boolean.FALSE)) {
|
||||
return false;
|
||||
}
|
||||
} else if (!request.queryCache()) {
|
||||
return false;
|
||||
}
|
||||
// if the reader is not a directory reader, we can't get the version from it
|
||||
if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) {
|
||||
return false;
|
||||
}
|
||||
// if now in millis is used (or in the future, a more generic "isDeterministic" flag
|
||||
// then we can't cache based on "now" key within the search request, as it is not deterministic
|
||||
if (context.nowInMillisUsed()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
|
||||
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
|
||||
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
|
||||
* the same cache.
|
||||
*/
|
||||
public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception {
|
||||
assert canCache(request, context);
|
||||
Key key = buildKey(request, context);
|
||||
Loader loader = new Loader(queryPhase, context, key);
|
||||
Value value = cache.get(key, loader);
|
||||
if (loader.isLoaded()) {
|
||||
key.shard.queryCache().onMiss();
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
|
||||
if (!registeredClosedListeners.containsKey(cleanupKey)) {
|
||||
Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
|
||||
if (previous == null) {
|
||||
context.searcher().getIndexReader().addReaderClosedListener(cleanupKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key.shard.queryCache().onHit();
|
||||
// restore the cached query result into the context
|
||||
final QuerySearchResult result = context.queryResult();
|
||||
result.readFromWithId(context.id(), value.reference.streamInput());
|
||||
result.shardTarget(context.shardTarget());
|
||||
@Override
|
||||
public void normalize(float norm, float topLevelBoost) {
|
||||
in.normalize(norm, topLevelBoost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException {
|
||||
shardKeyMap.add(context.reader());
|
||||
return in.scorer(context, acceptDocs);
|
||||
}
|
||||
}
|
||||
|
||||
private static class Loader implements Callable<Value> {
|
||||
|
||||
private final QueryPhase queryPhase;
|
||||
private final SearchContext context;
|
||||
private final IndicesQueryCache.Key key;
|
||||
private boolean loaded;
|
||||
|
||||
Loader(QueryPhase queryPhase, SearchContext context, IndicesQueryCache.Key key) {
|
||||
this.queryPhase = queryPhase;
|
||||
this.context = context;
|
||||
this.key = key;
|
||||
/** Clear all entries that belong to the given index. */
|
||||
public void clearIndex(String index) {
|
||||
final Set<Object> coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index);
|
||||
for (Object coreKey : coreCacheKeys) {
|
||||
cache.clearCoreCacheKey(coreKey);
|
||||
}
|
||||
|
||||
public boolean isLoaded() {
|
||||
return this.loaded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Value call() throws Exception {
|
||||
queryPhase.execute(context);
|
||||
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
context.queryResult().writeToNoId(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
final BytesReference reference = out.bytes();
|
||||
loaded = true;
|
||||
Value value = new Value(reference, out.ramBytesUsed());
|
||||
key.shard.queryCache().onCached(key, value);
|
||||
return value;
|
||||
}
|
||||
// This cache stores two things: filters, and doc id sets. Calling
|
||||
// clear only removes the doc id sets, but if we reach the situation
|
||||
// that the cache does not contain any DocIdSet anymore, then it
|
||||
// probably means that the user wanted to remove everything.
|
||||
if (cache.getCacheSize() == 0) {
|
||||
cache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
public static class Value implements Accountable {
|
||||
final BytesReference reference;
|
||||
final long ramBytesUsed;
|
||||
@Override
|
||||
public void close() {
|
||||
assert shardKeyMap.size() == 0 : shardKeyMap.size();
|
||||
assert shardStats.isEmpty() : shardStats.keySet();
|
||||
assert stats2.isEmpty() : stats2;
|
||||
cache.clear();
|
||||
}
|
||||
|
||||
public Value(BytesReference reference, long ramBytesUsed) {
|
||||
this.reference = reference;
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
}
|
||||
private static class Stats implements Cloneable {
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
volatile long ramBytesUsed;
|
||||
volatile long hitCount;
|
||||
volatile long missCount;
|
||||
volatile long cacheCount;
|
||||
volatile long cacheSize;
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
QueryCacheStats toQueryCacheStats() {
|
||||
return new QueryCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Key implements Accountable {
|
||||
public final IndexShard shard; // use as identity equality
|
||||
public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
public final BytesReference value;
|
||||
private static class StatsAndCount {
|
||||
int count;
|
||||
final Stats stats;
|
||||
|
||||
Key(IndexShard shard, long readerVersion, BytesReference value) {
|
||||
this.shard = shard;
|
||||
this.readerVersion = readerVersion;
|
||||
this.value = value;
|
||||
StatsAndCount(Stats stats) {
|
||||
this.stats = stats;
|
||||
this.count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
// TODO: more detailed ram usage?
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
Key key = (Key) o;
|
||||
if (readerVersion != key.readerVersion) return false;
|
||||
if (!shard.equals(key.shard)) return false;
|
||||
if (!value.equals(key.value)) return false;
|
||||
private boolean empty(Stats stats) {
|
||||
if (stats == null) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = shard.hashCode();
|
||||
result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32));
|
||||
result = 31 * result + value.hashCode();
|
||||
return result;
|
||||
}
|
||||
return stats.cacheSize == 0 && stats.ramBytesUsed == 0;
|
||||
}
|
||||
|
||||
private class CleanupKey implements IndexReader.ReaderClosedListener {
|
||||
IndexShard indexShard;
|
||||
long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
|
||||
private CleanupKey(IndexShard indexShard, long readerVersion) {
|
||||
this.indexShard = indexShard;
|
||||
this.readerVersion = readerVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader reader) {
|
||||
Boolean remove = registeredClosedListeners.remove(this);
|
||||
if (remove != null) {
|
||||
keysToClean.add(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
CleanupKey that = (CleanupKey) o;
|
||||
if (readerVersion != that.readerVersion) return false;
|
||||
if (!indexShard.equals(that.indexShard)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = indexShard.hashCode();
|
||||
result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class Reaper implements Runnable {
|
||||
|
||||
private final ObjectSet<CleanupKey> currentKeysToClean = new ObjectHashSet<>();
|
||||
private final ObjectSet<IndexShard> currentFullClean = new ObjectHashSet<>();
|
||||
|
||||
private volatile boolean closed;
|
||||
|
||||
void close() {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
if (keysToClean.isEmpty()) {
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
reap();
|
||||
schedule();
|
||||
}
|
||||
});
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not run ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void schedule() {
|
||||
try {
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this);
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not schedule ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized void reap() {
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext(); ) {
|
||||
CleanupKey cleanupKey = iterator.next();
|
||||
iterator.remove();
|
||||
if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) {
|
||||
// -1 indicates full cleanup, as does a closed shard
|
||||
currentFullClean.add(cleanupKey.indexShard);
|
||||
} else {
|
||||
currentKeysToClean.add(cleanupKey);
|
||||
}
|
||||
}
|
||||
|
||||
if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) {
|
||||
CleanupKey lookupKey = new CleanupKey(null, -1);
|
||||
for (Iterator<Key> iterator = cache.asMap().keySet().iterator(); iterator.hasNext(); ) {
|
||||
Key key = iterator.next();
|
||||
if (currentFullClean.contains(key.shard)) {
|
||||
iterator.remove();
|
||||
} else {
|
||||
lookupKey.indexShard = key.shard;
|
||||
lookupKey.readerVersion = key.readerVersion;
|
||||
if (currentKeysToClean.contains(lookupKey)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cache.cleanUp();
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
}
|
||||
}
|
||||
|
||||
private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception {
|
||||
// TODO: for now, this will create different keys for different JSON order
|
||||
// TODO: tricky to get around this, need to parse and order all, which can be expensive
|
||||
return new Key(context.indexShard(),
|
||||
((DirectoryReader) context.searcher().getIndexReader()).getVersion(),
|
||||
request.cacheKey());
|
||||
public void onClose(ShardId shardId) {
|
||||
assert empty(shardStats.get(shardId));
|
||||
shardStats.remove(shardId);
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,7 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.filter.terms;
|
||||
package org.elasticsearch.indices.cache.query.terms;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
503
core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java
vendored
Normal file
503
core/src/main/java/org/elasticsearch/indices/cache/request/IndicesRequestCache.java
vendored
Normal file
@ -0,0 +1,503 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.cache.request;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.RemovalListener;
|
||||
import com.google.common.cache.RemovalNotification;
|
||||
import com.google.common.cache.Weigher;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.MemorySizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.query.QueryPhase;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.Strings.hasLength;
|
||||
|
||||
/**
|
||||
* The indices request cache allows to cache a shard level request stage responses, helping with improving
|
||||
* similar requests that are potentially expensive (because of aggs for example). The cache is fully coherent
|
||||
* with the semantics of NRT (the index reader version is part of the cache key), and relies on size based
|
||||
* eviction to evict old reader associated cache entries as well as scheduler reaper to clean readers that
|
||||
* are no longer used or closed shards.
|
||||
* <p/>
|
||||
* Currently, the cache is only enabled for {@link SearchType#COUNT}, and can only be opted in on an index
|
||||
* level setting that can be dynamically changed and defaults to false.
|
||||
* <p/>
|
||||
* There are still several TODOs left in this class, some easily addressable, some more complex, but the support
|
||||
* is functional.
|
||||
*/
|
||||
public class IndicesRequestCache extends AbstractComponent implements RemovalListener<IndicesRequestCache.Key, IndicesRequestCache.Value> {
|
||||
|
||||
/**
|
||||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||
* since we are checking on the cluster state IndexMetaData always.
|
||||
*/
|
||||
public static final String INDEX_CACHE_REQUEST_ENABLED = "index.requests.cache.enable";
|
||||
@Deprecated
|
||||
public static final String DEPRECATED_INDEX_CACHE_REQUEST_ENABLED = "index.cache.query.enable";
|
||||
public static final String INDICES_CACHE_REQUEST_CLEAN_INTERVAL = "indices.requests.cache.clean_interval";
|
||||
|
||||
public static final String INDICES_CACHE_QUERY_SIZE = "indices.requests.cache.size";
|
||||
@Deprecated
|
||||
public static final String DEPRECATED_INDICES_CACHE_QUERY_SIZE = "indices.cache.query.size";
|
||||
public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.requests.cache.expire";
|
||||
public static final String INDICES_CACHE_QUERY_CONCURRENCY_LEVEL = "indices.requests.cache.concurrency_level";
|
||||
|
||||
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final TimeValue cleanInterval;
|
||||
private final Reaper reaper;
|
||||
|
||||
final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
|
||||
final Set<CleanupKey> keysToClean = ConcurrentCollections.newConcurrentSet();
|
||||
|
||||
|
||||
//TODO make these changes configurable on the cluster level
|
||||
private final String size;
|
||||
private final TimeValue expire;
|
||||
private final int concurrencyLevel;
|
||||
|
||||
private volatile Cache<Key, Value> cache;
|
||||
|
||||
@Inject
|
||||
public IndicesRequestCache(Settings settings, ClusterService clusterService, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));
|
||||
|
||||
String size = settings.get(INDICES_CACHE_QUERY_SIZE);
|
||||
if (size == null) {
|
||||
size = settings.get(DEPRECATED_INDICES_CACHE_QUERY_SIZE);
|
||||
if (size != null) {
|
||||
deprecationLogger.deprecated("The [" + DEPRECATED_INDICES_CACHE_QUERY_SIZE
|
||||
+ "] settings is now deprecated, use [" + INDICES_CACHE_QUERY_SIZE + "] instead");
|
||||
}
|
||||
}
|
||||
if (size == null) {
|
||||
// this cache can be very small yet still be very effective
|
||||
size = "1%";
|
||||
}
|
||||
this.size = size;
|
||||
|
||||
this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
|
||||
// defaults to 4, but this is a busy map for all indices, increase it a bit by default
|
||||
this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16);
|
||||
if (concurrencyLevel <= 0) {
|
||||
throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel);
|
||||
}
|
||||
buildCache();
|
||||
|
||||
this.reaper = new Reaper();
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, reaper);
|
||||
}
|
||||
|
||||
private boolean isCacheEnabled(Settings settings, boolean defaultEnable) {
|
||||
Boolean enable = settings.getAsBoolean(INDEX_CACHE_REQUEST_ENABLED, null);
|
||||
if (enable == null) {
|
||||
enable = settings.getAsBoolean(DEPRECATED_INDEX_CACHE_REQUEST_ENABLED, null);
|
||||
if (enable != null) {
|
||||
deprecationLogger.deprecated("The [" + DEPRECATED_INDEX_CACHE_REQUEST_ENABLED
|
||||
+ "] settings is now deprecated, use [" + INDEX_CACHE_REQUEST_ENABLED + "] instead");
|
||||
}
|
||||
}
|
||||
if (enable == null) {
|
||||
enable = defaultEnable;
|
||||
}
|
||||
return enable;
|
||||
}
|
||||
|
||||
private void buildCache() {
|
||||
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();
|
||||
|
||||
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.newBuilder()
|
||||
.maximumWeight(sizeInBytes).weigher(new QueryCacheWeigher()).removalListener(this);
|
||||
cacheBuilder.concurrencyLevel(concurrencyLevel);
|
||||
|
||||
if (expire != null) {
|
||||
cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
|
||||
cache = cacheBuilder.build();
|
||||
}
|
||||
|
||||
private static class QueryCacheWeigher implements Weigher<Key, Value> {
|
||||
|
||||
@Override
|
||||
public int weigh(Key key, Value value) {
|
||||
return (int) (key.ramBytesUsed() + value.ramBytesUsed());
|
||||
}
|
||||
}
|
||||
|
||||
public void close() {
|
||||
reaper.close();
|
||||
cache.invalidateAll();
|
||||
}
|
||||
|
||||
public void clear(IndexShard shard) {
|
||||
if (shard == null) {
|
||||
return;
|
||||
}
|
||||
keysToClean.add(new CleanupKey(shard, -1));
|
||||
logger.trace("{} explicit cache clear", shard.shardId());
|
||||
reaper.reap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(RemovalNotification<Key, Value> notification) {
|
||||
if (notification.getKey() == null) {
|
||||
return;
|
||||
}
|
||||
notification.getKey().shard.requestCache().onRemoval(notification);
|
||||
}
|
||||
|
||||
/**
|
||||
* Can the shard request be cached at all?
|
||||
*/
|
||||
public boolean canCache(ShardSearchRequest request, SearchContext context) {
|
||||
// TODO: for now, template is not supported, though we could use the generated bytes as the key
|
||||
if (hasLength(request.templateSource())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// for now, only enable it for requests with no hits
|
||||
if (context.size() != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We cannot cache with DFS because results depend not only on the content of the index but also
|
||||
// on the overridden statistics. So if you ran two queries on the same index with different stats
|
||||
// (because an other shard was updated) you would get wrong results because of the scores
|
||||
// (think about top_hits aggs or scripts using the score)
|
||||
if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IndexMetaData index = clusterService.state().getMetaData().index(request.index());
|
||||
if (index == null) { // in case we didn't yet have the cluster state, or it just got deleted
|
||||
return false;
|
||||
}
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (!isCacheEnabled(index.settings(), Boolean.FALSE)) {
|
||||
return false;
|
||||
}
|
||||
} else if (!request.requestCache()) {
|
||||
return false;
|
||||
}
|
||||
// if the reader is not a directory reader, we can't get the version from it
|
||||
if (!(context.searcher().getIndexReader() instanceof DirectoryReader)) {
|
||||
return false;
|
||||
}
|
||||
// if now in millis is used (or in the future, a more generic "isDeterministic" flag
|
||||
// then we can't cache based on "now" key within the search request, as it is not deterministic
|
||||
if (context.nowInMillisUsed()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
|
||||
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
|
||||
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
|
||||
* the same cache.
|
||||
*/
|
||||
public void loadIntoContext(final ShardSearchRequest request, final SearchContext context, final QueryPhase queryPhase) throws Exception {
|
||||
assert canCache(request, context);
|
||||
Key key = buildKey(request, context);
|
||||
Loader loader = new Loader(queryPhase, context, key);
|
||||
Value value = cache.get(key, loader);
|
||||
if (loader.isLoaded()) {
|
||||
key.shard.requestCache().onMiss();
|
||||
// see if its the first time we see this reader, and make sure to register a cleanup key
|
||||
CleanupKey cleanupKey = new CleanupKey(context.indexShard(), ((DirectoryReader) context.searcher().getIndexReader()).getVersion());
|
||||
if (!registeredClosedListeners.containsKey(cleanupKey)) {
|
||||
Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE);
|
||||
if (previous == null) {
|
||||
context.searcher().getIndexReader().addReaderClosedListener(cleanupKey);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key.shard.requestCache().onHit();
|
||||
// restore the cached query result into the context
|
||||
final QuerySearchResult result = context.queryResult();
|
||||
result.readFromWithId(context.id(), value.reference.streamInput());
|
||||
result.shardTarget(context.shardTarget());
|
||||
}
|
||||
}
|
||||
|
||||
private static class Loader implements Callable<Value> {
|
||||
|
||||
private final QueryPhase queryPhase;
|
||||
private final SearchContext context;
|
||||
private final IndicesRequestCache.Key key;
|
||||
private boolean loaded;
|
||||
|
||||
Loader(QueryPhase queryPhase, SearchContext context, IndicesRequestCache.Key key) {
|
||||
this.queryPhase = queryPhase;
|
||||
this.context = context;
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
public boolean isLoaded() {
|
||||
return this.loaded;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Value call() throws Exception {
|
||||
queryPhase.execute(context);
|
||||
|
||||
/* BytesStreamOutput allows to pass the expected size but by default uses
|
||||
* BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie.
|
||||
* a date histogram with 3 buckets is ~100byte so 16k might be very wasteful
|
||||
* since we don't shrink to the actual size once we are done serializing.
|
||||
* By passing 512 as the expected size we will resize the byte array in the stream
|
||||
* slowly until we hit the page size and don't waste too much memory for small query
|
||||
* results.*/
|
||||
final int expectedSizeInBytes = 512;
|
||||
try (BytesStreamOutput out = new BytesStreamOutput(expectedSizeInBytes)) {
|
||||
context.queryResult().writeToNoId(out);
|
||||
// for now, keep the paged data structure, which might have unused bytes to fill a page, but better to keep
|
||||
// the memory properly paged instead of having varied sized bytes
|
||||
final BytesReference reference = out.bytes();
|
||||
loaded = true;
|
||||
Value value = new Value(reference, out.ramBytesUsed());
|
||||
key.shard.requestCache().onCached(key, value);
|
||||
return value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class Value implements Accountable {
|
||||
final BytesReference reference;
|
||||
final long ramBytesUsed;
|
||||
|
||||
public Value(BytesReference reference, long ramBytesUsed) {
|
||||
this.reference = reference;
|
||||
this.ramBytesUsed = ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return ramBytesUsed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
public static class Key implements Accountable {
|
||||
public final IndexShard shard; // use as identity equality
|
||||
public final long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
public final BytesReference value;
|
||||
|
||||
Key(IndexShard shard, long readerVersion, BytesReference value) {
|
||||
this.shard = shard;
|
||||
this.readerVersion = readerVersion;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_LONG + value.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<Accountable> getChildResources() {
|
||||
// TODO: more detailed ram usage?
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
Key key = (Key) o;
|
||||
if (readerVersion != key.readerVersion) return false;
|
||||
if (!shard.equals(key.shard)) return false;
|
||||
if (!value.equals(key.value)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = shard.hashCode();
|
||||
result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32));
|
||||
result = 31 * result + value.hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class CleanupKey implements IndexReader.ReaderClosedListener {
|
||||
IndexShard indexShard;
|
||||
long readerVersion; // use the reader version to now keep a reference to a "short" lived reader until its reaped
|
||||
|
||||
private CleanupKey(IndexShard indexShard, long readerVersion) {
|
||||
this.indexShard = indexShard;
|
||||
this.readerVersion = readerVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader reader) {
|
||||
Boolean remove = registeredClosedListeners.remove(this);
|
||||
if (remove != null) {
|
||||
keysToClean.add(this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
CleanupKey that = (CleanupKey) o;
|
||||
if (readerVersion != that.readerVersion) return false;
|
||||
if (!indexShard.equals(that.indexShard)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = indexShard.hashCode();
|
||||
result = 31 * result + (int) (readerVersion ^ (readerVersion >>> 32));
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private class Reaper implements Runnable {
|
||||
|
||||
private final ObjectSet<CleanupKey> currentKeysToClean = new ObjectHashSet<>();
|
||||
private final ObjectSet<IndexShard> currentFullClean = new ObjectHashSet<>();
|
||||
|
||||
private volatile boolean closed;
|
||||
|
||||
void close() {
|
||||
closed = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
if (keysToClean.isEmpty()) {
|
||||
schedule();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
reap();
|
||||
schedule();
|
||||
}
|
||||
});
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not run ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void schedule() {
|
||||
try {
|
||||
threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this);
|
||||
} catch (EsRejectedExecutionException ex) {
|
||||
logger.debug("Can not schedule ReaderCleaner - execution rejected", ex);
|
||||
}
|
||||
}
|
||||
|
||||
synchronized void reap() {
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
for (Iterator<CleanupKey> iterator = keysToClean.iterator(); iterator.hasNext(); ) {
|
||||
CleanupKey cleanupKey = iterator.next();
|
||||
iterator.remove();
|
||||
if (cleanupKey.readerVersion == -1 || cleanupKey.indexShard.state() == IndexShardState.CLOSED) {
|
||||
// -1 indicates full cleanup, as does a closed shard
|
||||
currentFullClean.add(cleanupKey.indexShard);
|
||||
} else {
|
||||
currentKeysToClean.add(cleanupKey);
|
||||
}
|
||||
}
|
||||
|
||||
if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) {
|
||||
CleanupKey lookupKey = new CleanupKey(null, -1);
|
||||
for (Iterator<Key> iterator = cache.asMap().keySet().iterator(); iterator.hasNext(); ) {
|
||||
Key key = iterator.next();
|
||||
if (currentFullClean.contains(key.shard)) {
|
||||
iterator.remove();
|
||||
} else {
|
||||
lookupKey.indexShard = key.shard;
|
||||
lookupKey.readerVersion = key.readerVersion;
|
||||
if (currentKeysToClean.contains(lookupKey)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cache.cleanUp();
|
||||
currentKeysToClean.clear();
|
||||
currentFullClean.clear();
|
||||
}
|
||||
}
|
||||
|
||||
private static Key buildKey(ShardSearchRequest request, SearchContext context) throws Exception {
|
||||
// TODO: for now, this will create different keys for different JSON order
|
||||
// TODO: tricky to get around this, need to parse and order all, which can be expensive
|
||||
return new Key(context.indexShard(),
|
||||
((DirectoryReader) context.searcher().getIndexReader()).getVersion(),
|
||||
request.cacheKey());
|
||||
}
|
||||
}
|
@ -62,7 +62,7 @@ import org.elasticsearch.index.search.shape.ShapeModule;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerModule;
|
||||
import org.elasticsearch.indices.cache.filter.IndicesFilterCache;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.memory.IndexingMemoryController;
|
||||
@ -348,7 +348,7 @@ public class Node implements Releasable {
|
||||
injector.getInstance(IndicesTTLService.class).close();
|
||||
injector.getInstance(IndicesService.class).close();
|
||||
// close filter/fielddata caches after indices
|
||||
injector.getInstance(IndicesFilterCache.class).close();
|
||||
injector.getInstance(IndicesQueryCache.class).close();
|
||||
injector.getInstance(IndicesFieldDataCache.class).close();
|
||||
injector.getInstance(IndicesStore.class).close();
|
||||
stopWatch.stop().start("routing");
|
||||
|
@ -73,8 +73,8 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
|
||||
for (Map.Entry<String, String> entry : request.params().entrySet()) {
|
||||
|
||||
if (Fields.FILTER.match(entry.getKey())) {
|
||||
clearIndicesCacheRequest.filterCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.filterCache()));
|
||||
if (Fields.QUERY.match(entry.getKey())) {
|
||||
clearIndicesCacheRequest.queryCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.queryCache()));
|
||||
}
|
||||
if (Fields.FIELD_DATA.match(entry.getKey())) {
|
||||
clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache()));
|
||||
@ -91,7 +91,7 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
|
||||
}
|
||||
|
||||
public static class Fields {
|
||||
public static final ParseField FILTER = new ParseField("filter", "filter_cache");
|
||||
public static final ParseField QUERY = new ParseField("query", "filter", "filter_cache");
|
||||
public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata");
|
||||
public static final ParseField RECYCLER = new ParseField("recycler");
|
||||
public static final ParseField FIELDS = new ParseField("fields");
|
||||
|
@ -72,13 +72,13 @@ public class RestIndicesStatsAction extends BaseRestHandler {
|
||||
indicesStatsRequest.refresh(metrics.contains("refresh"));
|
||||
indicesStatsRequest.flush(metrics.contains("flush"));
|
||||
indicesStatsRequest.warmer(metrics.contains("warmer"));
|
||||
indicesStatsRequest.filterCache(metrics.contains("filter_cache"));
|
||||
indicesStatsRequest.queryCache(metrics.contains("query_cache"));
|
||||
indicesStatsRequest.percolate(metrics.contains("percolate"));
|
||||
indicesStatsRequest.segments(metrics.contains("segments"));
|
||||
indicesStatsRequest.fieldData(metrics.contains("fielddata"));
|
||||
indicesStatsRequest.completion(metrics.contains("completion"));
|
||||
indicesStatsRequest.suggest(metrics.contains("suggest"));
|
||||
indicesStatsRequest.queryCache(metrics.contains("query_cache"));
|
||||
indicesStatsRequest.requestCache(metrics.contains("request_cache"));
|
||||
indicesStatsRequest.recovery(metrics.contains("recovery"));
|
||||
indicesStatsRequest.translog(metrics.contains("translog"));
|
||||
}
|
||||
|
@ -118,23 +118,23 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
table.addCell("fielddata.evictions", "sibling:pri;alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
|
||||
table.addCell("pri.fielddata.evictions", "default:false;text-align:right;desc:fielddata evictions");
|
||||
|
||||
table.addCell("filter_cache.memory_size", "sibling:pri;alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
|
||||
table.addCell("pri.filter_cache.memory_size", "default:false;text-align:right;desc:used filter cache");
|
||||
|
||||
table.addCell("filter_cache.evictions", "sibling:pri;alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
|
||||
table.addCell("pri.filter_cache.evictions", "default:false;text-align:right;desc:filter cache evictions");
|
||||
|
||||
table.addCell("query_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
|
||||
table.addCell("query_cache.memory_size", "sibling:pri;alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
|
||||
table.addCell("pri.query_cache.memory_size", "default:false;text-align:right;desc:used query cache");
|
||||
|
||||
table.addCell("query_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
table.addCell("query_cache.evictions", "sibling:pri;alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
table.addCell("pri.query_cache.evictions", "default:false;text-align:right;desc:query cache evictions");
|
||||
|
||||
table.addCell("query_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit count");
|
||||
table.addCell("pri.query_cache.hit_count", "default:false;text-align:right;desc:query cache hit count");
|
||||
table.addCell("request_cache.memory_size", "sibling:pri;alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used request cache");
|
||||
table.addCell("pri.request_cache.memory_size", "default:false;text-align:right;desc:used request cache");
|
||||
|
||||
table.addCell("query_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss count");
|
||||
table.addCell("pri.query_cache.miss_count", "default:false;text-align:right;desc:query cache miss count");
|
||||
table.addCell("request_cache.evictions", "sibling:pri;alias:qce,queryCacheEvictions;default:false;text-align:right;desc:request cache evictions");
|
||||
table.addCell("pri.request_cache.evictions", "default:false;text-align:right;desc:request cache evictions");
|
||||
|
||||
table.addCell("request_cache.hit_count", "sibling:pri;alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:request cache hit count");
|
||||
table.addCell("pri.request_cache.hit_count", "default:false;text-align:right;desc:request cache hit count");
|
||||
|
||||
table.addCell("request_cache.miss_count", "sibling:pri;alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:request cache miss count");
|
||||
table.addCell("pri.request_cache.miss_count", "default:false;text-align:right;desc:request cache miss count");
|
||||
|
||||
table.addCell("flush.total", "sibling:pri;alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
|
||||
table.addCell("pri.flush.total", "default:false;text-align:right;desc:number of flushes");
|
||||
@ -317,23 +317,23 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getFieldData().getEvictions());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFieldData().getEvictions());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getMemorySize());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getMemorySize());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getFilterCache().getEvictions());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFilterCache().getEvictions());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getMemorySize());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getMemorySize());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getEvictions());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getEvictions());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getHitCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getHitCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getMemorySize());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getMemorySize());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getQueryCache().getMissCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getQueryCache().getMissCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getEvictions());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getEvictions());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getHitCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getHitCount());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getRequestCache().getMissCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getRequestCache().getMissCount());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getFlush().getTotal());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getFlush().getTotal());
|
||||
|
@ -35,8 +35,8 @@ import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.request.RequestCacheStats;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
@ -142,13 +142,13 @@ public class RestNodesAction extends AbstractCatAction {
|
||||
table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
|
||||
table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
|
||||
|
||||
table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
|
||||
table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
|
||||
table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
|
||||
table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
|
||||
table.addCell("query_cache.memory_size", "alias:qcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
|
||||
table.addCell("query_cache.evictions", "alias:qce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
table.addCell("query_cache.hit_count", "alias:qchc,queryCacheHitCount;default:false;text-align:right;desc:query cache hit counts");
|
||||
table.addCell("query_cache.miss_count", "alias:qcmc,queryCacheMissCount;default:false;text-align:right;desc:query cache miss counts");
|
||||
table.addCell("request_cache.memory_size", "alias:qcm,requestCacheMemory;default:false;text-align:right;desc:used request cache");
|
||||
table.addCell("request_cache.evictions", "alias:qce,requestCacheEvictions;default:false;text-align:right;desc:request cache evictions");
|
||||
table.addCell("request_cache.hit_count", "alias:qchc,requestCacheHitCount;default:false;text-align:right;desc:request cache hit counts");
|
||||
table.addCell("request_cache.miss_count", "alias:qcmc,requestCacheMissCount;default:false;text-align:right;desc:request cache miss counts");
|
||||
|
||||
table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
|
||||
table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
|
||||
@ -269,11 +269,11 @@ public class RestNodesAction extends AbstractCatAction {
|
||||
table.addCell(fdStats == null ? null : fdStats.getMemorySize());
|
||||
table.addCell(fdStats == null ? null : fdStats.getEvictions());
|
||||
|
||||
FilterCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache();
|
||||
QueryCacheStats fcStats = indicesStats == null ? null : indicesStats.getFilterCache();
|
||||
table.addCell(fcStats == null ? null : fcStats.getMemorySize());
|
||||
table.addCell(fcStats == null ? null : fcStats.getEvictions());
|
||||
|
||||
QueryCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache();
|
||||
RequestCacheStats qcStats = indicesStats == null ? null : indicesStats.getQueryCache();
|
||||
table.addCell(qcStats == null ? null : qcStats.getMemorySize());
|
||||
table.addCell(qcStats == null ? null : qcStats.getEvictions());
|
||||
table.addCell(qcStats == null ? null : qcStats.getHitCount());
|
||||
|
@ -101,8 +101,8 @@ public class RestShardsAction extends AbstractCatAction {
|
||||
table.addCell("fielddata.memory_size", "alias:fm,fielddataMemory;default:false;text-align:right;desc:used fielddata cache");
|
||||
table.addCell("fielddata.evictions", "alias:fe,fielddataEvictions;default:false;text-align:right;desc:fielddata evictions");
|
||||
|
||||
table.addCell("filter_cache.memory_size", "alias:fcm,filterCacheMemory;default:false;text-align:right;desc:used filter cache");
|
||||
table.addCell("filter_cache.evictions", "alias:fce,filterCacheEvictions;default:false;text-align:right;desc:filter cache evictions");
|
||||
table.addCell("query_cache.memory_size", "alias:fcm,queryCacheMemory;default:false;text-align:right;desc:used query cache");
|
||||
table.addCell("query_cache.evictions", "alias:fce,queryCacheEvictions;default:false;text-align:right;desc:query cache evictions");
|
||||
|
||||
table.addCell("flush.total", "alias:ft,flushTotal;default:false;text-align:right;desc:number of flushes");
|
||||
table.addCell("flush.total_time", "alias:ftt,flushTotalTime;default:false;text-align:right;desc:time spent in flush");
|
||||
@ -232,8 +232,8 @@ public class RestShardsAction extends AbstractCatAction {
|
||||
table.addCell(shardStats == null ? null : shardStats.getFieldData().getMemorySize());
|
||||
table.addCell(shardStats == null ? null : shardStats.getFieldData().getEvictions());
|
||||
|
||||
table.addCell(shardStats == null ? null : shardStats.getFilterCache().getMemorySize());
|
||||
table.addCell(shardStats == null ? null : shardStats.getFilterCache().getEvictions());
|
||||
table.addCell(shardStats == null ? null : shardStats.getQueryCache().getMemorySize());
|
||||
table.addCell(shardStats == null ? null : shardStats.getQueryCache().getEvictions());
|
||||
|
||||
table.addCell(shardStats == null ? null : shardStats.getFlush().getTotal());
|
||||
table.addCell(shardStats == null ? null : shardStats.getFlush().getTotalTime());
|
||||
|
@ -72,7 +72,7 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.IndicesWarmer.TerminationHandle;
|
||||
import org.elasticsearch.indices.IndicesWarmer.WarmerContext;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script.ScriptParseException;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
@ -144,7 +144,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
|
||||
private final IndicesQueryCache indicesQueryCache;
|
||||
private final IndicesRequestCache indicesQueryCache;
|
||||
|
||||
private final long defaultKeepAlive;
|
||||
|
||||
@ -159,7 +159,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
@Inject
|
||||
public SearchService(Settings settings, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool,
|
||||
ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase,
|
||||
IndicesQueryCache indicesQueryCache) {
|
||||
IndicesRequestCache indicesQueryCache) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
@ -374,7 +374,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
try {
|
||||
final IndexCache indexCache = context.indexShard().indexService().cache();
|
||||
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
|
||||
indexCache.filter(), indexCache.filterPolicy()));
|
||||
indexCache.query(), indexCache.queryPolicy()));
|
||||
} catch (Throwable e) {
|
||||
processFailure(context, e);
|
||||
cleanContext(context);
|
||||
@ -448,7 +448,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
try {
|
||||
final IndexCache indexCache = context.indexShard().indexService().cache();
|
||||
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
|
||||
indexCache.filter(), indexCache.filterPolicy()));
|
||||
indexCache.query(), indexCache.queryPolicy()));
|
||||
} catch (Throwable e) {
|
||||
freeContext(context.id());
|
||||
cleanContext(context);
|
||||
@ -1060,7 +1060,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
||||
try {
|
||||
long now = System.nanoTime();
|
||||
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.numberOfShards(),
|
||||
SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.queryCache());
|
||||
SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache());
|
||||
context = createContext(request, warmerContext.searcher());
|
||||
// if we use sort, we need to do query to sort on it and load relevant field data
|
||||
// if not, we might as well set size=0 (and cache if needed)
|
||||
|
@ -70,7 +70,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
private BytesReference extraSource;
|
||||
private BytesReference templateSource;
|
||||
private Template template;
|
||||
private Boolean queryCache;
|
||||
private Boolean requestCache;
|
||||
private long nowInMillis;
|
||||
|
||||
ShardSearchLocalRequest() {
|
||||
@ -79,7 +79,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
ShardSearchLocalRequest(SearchRequest searchRequest, ShardRouting shardRouting, int numberOfShards,
|
||||
String[] filteringAliases, long nowInMillis) {
|
||||
this(shardRouting.shardId(), numberOfShards, searchRequest.searchType(),
|
||||
searchRequest.source(), searchRequest.types(), searchRequest.queryCache());
|
||||
searchRequest.source(), searchRequest.types(), searchRequest.requestCache());
|
||||
this.extraSource = searchRequest.extraSource();
|
||||
this.templateSource = searchRequest.templateSource();
|
||||
this.template = searchRequest.template();
|
||||
@ -100,14 +100,14 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
}
|
||||
|
||||
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType,
|
||||
BytesReference source, String[] types, Boolean queryCache) {
|
||||
BytesReference source, String[] types, Boolean requestCache) {
|
||||
this.index = shardId.getIndex();
|
||||
this.shardId = shardId.id();
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.searchType = searchType;
|
||||
this.source = source;
|
||||
this.types = types;
|
||||
this.queryCache = queryCache;
|
||||
this.requestCache = requestCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -171,8 +171,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean queryCache() {
|
||||
return queryCache;
|
||||
public Boolean requestCache() {
|
||||
return requestCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -201,7 +201,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
if (in.readBoolean()) {
|
||||
template = Template.readTemplate(in);
|
||||
}
|
||||
queryCache = in.readOptionalBoolean();
|
||||
requestCache = in.readOptionalBoolean();
|
||||
}
|
||||
|
||||
protected void innerWriteTo(StreamOutput out, boolean asKey) throws IOException {
|
||||
@ -231,7 +231,7 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
|
||||
if (hasTemplate) {
|
||||
template.writeTo(out);
|
||||
}
|
||||
out.writeOptionalBoolean(queryCache);
|
||||
out.writeOptionalBoolean(requestCache);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +60,7 @@ public interface ShardSearchRequest extends HasContextAndHeaders {
|
||||
|
||||
BytesReference templateSource();
|
||||
|
||||
Boolean queryCache();
|
||||
Boolean requestCache();
|
||||
|
||||
Scroll scroll();
|
||||
|
||||
|
@ -132,8 +132,8 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
|
||||
}
|
||||
|
||||
@Override
|
||||
public Boolean queryCache() {
|
||||
return shardSearchLocalRequest.queryCache();
|
||||
public Boolean requestCache() {
|
||||
return shardSearchLocalRequest.requestCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -19,12 +19,12 @@
|
||||
|
||||
package org.elasticsearch.search.warmer;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.Version;
|
||||
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
@ -67,13 +67,13 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
private final String name;
|
||||
private final String[] types;
|
||||
private final BytesReference source;
|
||||
private final Boolean queryCache;
|
||||
private final Boolean requestCache;
|
||||
|
||||
public Entry(String name, String[] types, Boolean queryCache, BytesReference source) {
|
||||
public Entry(String name, String[] types, Boolean requestCache, BytesReference source) {
|
||||
this.name = name;
|
||||
this.types = types == null ? Strings.EMPTY_ARRAY : types;
|
||||
this.source = source;
|
||||
this.queryCache = queryCache;
|
||||
this.requestCache = requestCache;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
@ -90,8 +90,8 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Boolean queryCache() {
|
||||
return this.queryCache;
|
||||
public Boolean requestCache() {
|
||||
return this.requestCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -104,7 +104,7 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
if (!name.equals(entry.name)) return false;
|
||||
if (!Arrays.equals(types, entry.types)) return false;
|
||||
if (!source.equals(entry.source)) return false;
|
||||
return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null);
|
||||
return Objects.equal(requestCache, entry.requestCache);
|
||||
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(types);
|
||||
result = 31 * result + source.hashCode();
|
||||
result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0);
|
||||
result = 31 * result + (requestCache != null ? requestCache.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@ -163,7 +163,7 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
out.writeBoolean(true);
|
||||
out.writeBytesReference(entry.source());
|
||||
}
|
||||
out.writeOptionalBoolean(entry.queryCache());
|
||||
out.writeOptionalBoolean(entry.requestCache());
|
||||
}
|
||||
}
|
||||
|
||||
@ -241,8 +241,8 @@ public class IndexWarmersMetaData extends AbstractDiffable<IndexMetaData.Custom>
|
||||
boolean binary = params.paramAsBoolean("binary", false);
|
||||
builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.field("types", entry.types());
|
||||
if (entry.queryCache() != null) {
|
||||
builder.field("queryCache", entry.queryCache());
|
||||
if (entry.requestCache() != null) {
|
||||
builder.field("queryCache", entry.requestCache());
|
||||
}
|
||||
builder.field("source");
|
||||
if (binary) {
|
||||
|
@ -44,7 +44,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest {
|
||||
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
|
||||
try {
|
||||
enableIndexBlock("test", blockSetting);
|
||||
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true).execute().actionGet();
|
||||
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true).execute().actionGet();
|
||||
assertNoFailures(clearIndicesCacheResponse);
|
||||
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
} finally {
|
||||
@ -55,7 +55,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest {
|
||||
for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
|
||||
try {
|
||||
enableIndexBlock("test", blockSetting);
|
||||
assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true));
|
||||
assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setQueryCache(true).setFieldDataCache(true));
|
||||
} finally {
|
||||
disableIndexBlock("test", blockSetting);
|
||||
}
|
||||
|
@ -106,12 +106,12 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase {
|
||||
assertThat(request.requests().size(), equalTo(3));
|
||||
assertThat(request.requests().get(0).indices()[0], equalTo("test0"));
|
||||
assertThat(request.requests().get(0).indices()[1], equalTo("test1"));
|
||||
assertThat(request.requests().get(0).queryCache(), equalTo(true));
|
||||
assertThat(request.requests().get(0).requestCache(), equalTo(true));
|
||||
assertThat(request.requests().get(0).preference(), nullValue());
|
||||
assertThat(request.requests().get(1).indices()[0], equalTo("test2"));
|
||||
assertThat(request.requests().get(1).indices()[1], equalTo("test3"));
|
||||
assertThat(request.requests().get(1).types()[0], equalTo("type1"));
|
||||
assertThat(request.requests().get(1).queryCache(), nullValue());
|
||||
assertThat(request.requests().get(1).requestCache(), nullValue());
|
||||
assertThat(request.requests().get(1).preference(), equalTo("_local"));
|
||||
assertThat(request.requests().get(2).indices()[0], equalTo("test4"));
|
||||
assertThat(request.requests().get(2).indices()[1], equalTo("test1"));
|
||||
|
@ -78,7 +78,7 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest {
|
||||
assertThat(indexExists("test1234565"), equalTo(false));
|
||||
|
||||
logger.info("Clearing cache");
|
||||
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true)).actionGet();
|
||||
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).queryCache(true)).actionGet();
|
||||
assertNoFailures(clearIndicesCacheResponse);
|
||||
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
|
||||
|
||||
|
@ -21,7 +21,7 @@ package org.elasticsearch.indices.cache.query;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket;
|
||||
@ -34,13 +34,13 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
public class IndicesQueryCacheTests extends ElasticsearchIntegrationTest {
|
||||
public class IndicesRequestCacheTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
// One of the primary purposes of the query cache is to cache aggs results
|
||||
public void testCacheAggs() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("index")
|
||||
.addMapping("type", "f", "type=date")
|
||||
.setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get());
|
||||
.setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true).get());
|
||||
indexRandom(true,
|
||||
client().prepareIndex("index", "type").setSource("f", "2014-03-10T00:00:00.000Z"),
|
||||
client().prepareIndex("index", "type").setSource("f", "2014-05-13T00:00:00.000Z"));
|
||||
@ -54,7 +54,7 @@ public class IndicesQueryCacheTests extends ElasticsearchIntegrationTest {
|
||||
assertSearchResponse(r1);
|
||||
|
||||
// The cached is actually used
|
||||
assertThat(client().admin().indices().prepareStats("index").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("index").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
final SearchResponse r2 = client().prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH)
|
@ -39,14 +39,14 @@ import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheStats;
|
||||
import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest;
|
||||
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
|
||||
@ -78,9 +78,9 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
//Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
|
||||
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms")
|
||||
.put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, "1ms")
|
||||
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
|
||||
.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
|
||||
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
|
||||
.build();
|
||||
}
|
||||
|
||||
@ -146,10 +146,10 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test")
|
||||
.clear().setFieldData(true).setFilterCache(true)
|
||||
.clear().setFieldData(true).setQueryCache(true)
|
||||
.execute().actionGet();
|
||||
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
// sort to load it to field data and filter to load filter cache
|
||||
client().prepareSearch()
|
||||
@ -167,10 +167,10 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
indicesStats = client().admin().indices().prepareStats("test")
|
||||
.clear().setFieldData(true).setFilterCache(true)
|
||||
.clear().setFieldData(true).setQueryCache(true)
|
||||
.execute().actionGet();
|
||||
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
client().admin().indices().prepareClearCache().execute().actionGet();
|
||||
Thread.sleep(100); // Make sure the filter cache entries have been removed...
|
||||
@ -180,15 +180,15 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
indicesStats = client().admin().indices().prepareStats("test")
|
||||
.clear().setFieldData(true).setFilterCache(true)
|
||||
.clear().setFieldData(true).setQueryCache(true)
|
||||
.execute().actionGet();
|
||||
assertThat(indicesStats.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(indicesStats.getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQueryCache() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("idx").setSettings(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true).get());
|
||||
assertAcked(client().admin().indices().prepareCreate("idx").setSettings(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true).get());
|
||||
ensureGreen();
|
||||
|
||||
// index docs until we have at least one doc on each shard, otherwise, our tests will not work
|
||||
@ -221,15 +221,15 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
}
|
||||
}
|
||||
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), equalTo(0l));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
}
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getHitCount(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMissCount(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getHitCount(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMissCount(), greaterThan(0l));
|
||||
|
||||
// index the data again...
|
||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
|
||||
@ -245,36 +245,36 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
}
|
||||
});
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
}
|
||||
|
||||
client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
// test explicit request parameter
|
||||
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(false).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
// set the index level setting to false, and see that the reverse works
|
||||
|
||||
client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("idx").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false)));
|
||||
client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("idx").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false)));
|
||||
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
assertThat(client().prepareSearch("idx").setSearchType(SearchType.QUERY_THEN_FETCH).setSize(0).setQueryCache(true).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("idx").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
}
|
||||
|
||||
|
||||
@ -638,8 +638,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
@Test
|
||||
public void testFlagOrdinalOrder() {
|
||||
Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
|
||||
Flag.FilterCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments,
|
||||
Flag.Translog, Flag.Suggest, Flag.QueryCache, Flag.Recovery};
|
||||
Flag.QueryCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments,
|
||||
Flag.Translog, Flag.Suggest, Flag.RequestCache, Flag.Recovery};
|
||||
|
||||
assertThat(flags.length, equalTo(Flag.values().length));
|
||||
for (int i = 0; i < flags.length; i++) {
|
||||
@ -862,8 +862,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
case FieldData:
|
||||
builder.setFieldData(set);
|
||||
break;
|
||||
case FilterCache:
|
||||
builder.setFilterCache(set);
|
||||
case QueryCache:
|
||||
builder.setQueryCache(set);
|
||||
break;
|
||||
case Flush:
|
||||
builder.setFlush(set);
|
||||
@ -904,8 +904,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
case Suggest:
|
||||
builder.setSuggest(set);
|
||||
break;
|
||||
case QueryCache:
|
||||
builder.setQueryCache(set);
|
||||
case RequestCache:
|
||||
builder.setRequestCache(set);
|
||||
break;
|
||||
case Recovery:
|
||||
builder.setRecovery(set);
|
||||
@ -922,8 +922,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
return response.getDocs() != null;
|
||||
case FieldData:
|
||||
return response.getFieldData() != null;
|
||||
case FilterCache:
|
||||
return response.getFilterCache() != null;
|
||||
case QueryCache:
|
||||
return response.getQueryCache() != null;
|
||||
case Flush:
|
||||
return response.getFlush() != null;
|
||||
case Get:
|
||||
@ -950,8 +950,8 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
return response.getTranslog() != null;
|
||||
case Suggest:
|
||||
return response.getSuggest() != null;
|
||||
case QueryCache:
|
||||
return response.getQueryCache() != null;
|
||||
case RequestCache:
|
||||
return response.getRequestCache() != null;
|
||||
case Recovery:
|
||||
return response.getRecoveryStats() != null;
|
||||
default:
|
||||
@ -960,7 +960,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
}
|
||||
}
|
||||
|
||||
private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) {
|
||||
private void assertEquals(QueryCacheStats stats1, QueryCacheStats stats2) {
|
||||
assertEquals(stats1.getCacheCount(), stats2.getCacheCount());
|
||||
assertEquals(stats1.getCacheSize(), stats2.getCacheSize());
|
||||
assertEquals(stats1.getEvictions(), stats2.getEvictions());
|
||||
@ -972,13 +972,13 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) {
|
||||
assertAllSuccessful(response);
|
||||
FilterCacheStats total = response.getTotal().filterCache;
|
||||
FilterCacheStats indexTotal = new FilterCacheStats();
|
||||
FilterCacheStats shardTotal = new FilterCacheStats();
|
||||
QueryCacheStats total = response.getTotal().queryCache;
|
||||
QueryCacheStats indexTotal = new QueryCacheStats();
|
||||
QueryCacheStats shardTotal = new QueryCacheStats();
|
||||
for (IndexStats indexStats : response.getIndices().values()) {
|
||||
indexTotal.add(indexStats.getTotal().filterCache);
|
||||
indexTotal.add(indexStats.getTotal().queryCache);
|
||||
for (ShardStats shardStats : response.getShards()) {
|
||||
shardTotal.add(shardStats.getStats().filterCache);
|
||||
shardTotal.add(shardStats.getStats().queryCache);
|
||||
}
|
||||
}
|
||||
assertEquals(total, indexTotal);
|
||||
@ -992,58 +992,58 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
|
||||
client().prepareIndex("index", "type", "2").setSource("foo", "baz"));
|
||||
ensureGreen();
|
||||
|
||||
IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
IndicesStatsResponse response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertEquals(0, response.getTotal().filterCache.getCacheSize());
|
||||
assertEquals(0, response.getTotal().queryCache.getCacheSize());
|
||||
|
||||
SearchResponse r;
|
||||
assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
|
||||
response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L));
|
||||
assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L));
|
||||
assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getHitCount(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L));
|
||||
|
||||
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
|
||||
response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L));
|
||||
assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getEvictions(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L));
|
||||
|
||||
assertTrue(client().prepareDelete("index", "type", "1").get().isFound());
|
||||
assertTrue(client().prepareDelete("index", "type", "2").get().isFound());
|
||||
refresh();
|
||||
response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheCount(), greaterThan(0L));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex("index", "type", "1").setSource("foo", "bar"),
|
||||
client().prepareIndex("index", "type", "2").setSource("foo", "baz"));
|
||||
assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get());
|
||||
|
||||
response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheSize(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), greaterThan(0L));
|
||||
|
||||
assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get());
|
||||
response = client().admin().indices().prepareStats("index").setFilterCache(true).get();
|
||||
assertAllSuccessful(client().admin().indices().prepareClearCache("index").setQueryCache(true).get());
|
||||
response = client().admin().indices().prepareStats("index").setQueryCache(true).get();
|
||||
assertCumulativeFilterCacheStats(response);
|
||||
assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L));
|
||||
assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getHitCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getEvictions(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getMissCount(), greaterThan(0L));
|
||||
assertThat(response.getTotal().queryCache.getCacheSize(), equalTo(0L));
|
||||
assertThat(response.getTotal().queryCache.getMemorySizeInBytes(), equalTo(0L));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType.Loading;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.warmer.IndexWarmerMissingException;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
@ -345,14 +345,14 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, false)));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, false)));
|
||||
logger.info("register warmer with no query cache, validate no cache is used");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
|
||||
.get());
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
logger.info("register warmer with query cache, validate caching happened");
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
@ -361,13 +361,13 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
// index again, to make sure it gets refreshed
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
|
||||
client().admin().indices().prepareClearCache().setQueryCache(true).get(); // clean the cache
|
||||
assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
client().admin().indices().prepareClearCache().setRequestCache(true).get(); // clean the cache
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
|
||||
logger.info("enable default query caching on the index level, and test that no flag on warmer still caches");
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, true)));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, true)));
|
||||
|
||||
assertAcked(client().admin().indices().preparePutWarmer("warmer_1")
|
||||
.setSearchRequest(client().prepareSearch("test").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))
|
||||
@ -375,6 +375,6 @@ public class SimpleIndicesWarmerTests extends ElasticsearchIntegrationTest {
|
||||
|
||||
// index again, to make sure it gets refreshed
|
||||
client().prepareIndex("test", "type1", "1").setSource("field", "value1").setRefresh(true).execute().actionGet();
|
||||
assertThat(client().admin().indices().prepareStats("test").setQueryCache(true).get().getTotal().getQueryCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
assertThat(client().admin().indices().prepareStats("test").setRequestCache(true).get().getTotal().getRequestCache().getMemorySizeInBytes(), greaterThan(0l));
|
||||
}
|
||||
}
|
||||
|
@ -30,9 +30,9 @@ import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.common.lucene.search.function.CombineFunction;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
import org.elasticsearch.index.mapper.MergeMappingException;
|
||||
import org.elasticsearch.index.query.HasChildQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
@ -75,7 +75,7 @@ public class ChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
|
||||
// aggressive filter caching so that we can assert on the filter cache size
|
||||
.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
|
||||
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
|
||||
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
|
||||
.build();
|
||||
}
|
||||
|
@ -21,9 +21,9 @@ package org.elasticsearch.search.scriptfilter;
|
||||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
@ -51,7 +51,7 @@ public class ScriptQuerySearchTests extends ElasticsearchIntegrationTest {
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
|
||||
// aggressive filter caching so that we can assert on the number of iterations of the script filters
|
||||
.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class)
|
||||
.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexQueryCache.class)
|
||||
.put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true)
|
||||
.build();
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
@ -108,7 +109,7 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.index.translog.TranslogService;
|
||||
import org.elasticsearch.index.translog.TranslogWriter;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.flush.IndicesSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
@ -463,7 +464,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(IndicesQueryCache.INDEX_CACHE_QUERY_ENABLED, random.nextBoolean());
|
||||
builder.put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, random.nextBoolean());
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
|
@ -222,7 +222,7 @@ public abstract class ElasticsearchSingleNodeTest extends ElasticsearchTestCase
|
||||
BigArrays bigArrays = indexService.injector().getInstance(BigArrays.class);
|
||||
ThreadPool threadPool = indexService.injector().getInstance(ThreadPool.class);
|
||||
PageCacheRecycler pageCacheRecycler = indexService.injector().getInstance(PageCacheRecycler.class);
|
||||
return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService, indexService.cache().filter(), indexService.fieldData());
|
||||
return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService, indexService.cache().query(), indexService.fieldData());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,6 +30,7 @@ import com.google.common.collect.*;
|
||||
import com.google.common.util.concurrent.Futures;
|
||||
import com.google.common.util.concurrent.ListenableFuture;
|
||||
import com.google.common.util.concurrent.SettableFuture;
|
||||
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -71,10 +72,10 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule;
|
||||
import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.filter.index.IndexFilterCache;
|
||||
import org.elasticsearch.index.cache.filter.none.NoneFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule;
|
||||
import org.elasticsearch.index.cache.query.QueryCacheModule.FilterCacheSettings;
|
||||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
import org.elasticsearch.index.cache.query.none.NoneQueryCache;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
@ -85,7 +86,7 @@ import org.elasticsearch.index.store.IndexStoreModule;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
@ -437,7 +438,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class);
|
||||
builder.put(QueryCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexQueryCache.class : NoneQueryCache.class);
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
@ -468,7 +469,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32));
|
||||
builder.put(IndicesRequestCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32));
|
||||
builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32));
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.test;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
|
||||
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
@ -34,7 +35,7 @@ import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.filter.FilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
@ -83,7 +84,7 @@ public class TestSearchContext extends SearchContext {
|
||||
private String[] types;
|
||||
private SearchContextAggregations aggregations;
|
||||
|
||||
public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService, FilterCache filterCache, IndexFieldDataService indexFieldDataService) {
|
||||
public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService, QueryCache filterCache, IndexFieldDataService indexFieldDataService) {
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays.withCircuitBreaking();
|
||||
this.indexService = indexService;
|
||||
|
@ -82,8 +82,8 @@ public final class MockEngineSupport {
|
||||
public MockEngineSupport(EngineConfig config) {
|
||||
Settings indexSettings = config.getIndexSettings();
|
||||
shardId = config.getShardId();
|
||||
filterCache = config.getFilterCache();
|
||||
filterCachingPolicy = config.getFilterCachingPolicy();
|
||||
filterCache = config.getQueryCache();
|
||||
filterCachingPolicy = config.getQueryCachingPolicy();
|
||||
final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l);
|
||||
Random random = new Random(seed);
|
||||
final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
|
||||
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.dfs.DfsPhase;
|
||||
@ -54,7 +54,7 @@ public class MockSearchService extends SearchService {
|
||||
@Inject
|
||||
public MockSearchService(Settings settings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer,
|
||||
ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
|
||||
DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesQueryCache indicesQueryCache) {
|
||||
DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) {
|
||||
super(settings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase,
|
||||
queryPhase, fetchPhase, indicesQueryCache);
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ client.update(updateRequest).get();
|
||||
[[java-update-api-upsert]]
|
||||
=== Upsert
|
||||
|
||||
There is also support for `upsert`. If the document does not already exists, the content of the `upsert`
|
||||
There is also support for `upsert`. If the document already exists, the content of the `upsert`
|
||||
element will be used to index the fresh doc:
|
||||
|
||||
[source,java]
|
||||
|
@ -7,7 +7,7 @@ can be cached for faster responses. These cached results are the same results
|
||||
that would be returned by an uncached aggregation -- you will never get stale
|
||||
results.
|
||||
|
||||
See <<shard-query-cache>> for more details.
|
||||
See <<shard-request-cache>> for more details.
|
||||
|
||||
[[returning-only-agg-results]]
|
||||
== Returning only aggregation results
|
||||
@ -73,4 +73,4 @@ Then that piece of metadata will be returned in place for our `titles` terms agg
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
--------------------------------------------------
|
||||
|
@ -10,7 +10,7 @@ $ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
|
||||
--------------------------------------------------
|
||||
|
||||
The API, by default, will clear all caches. Specific caches can be cleaned
|
||||
explicitly by setting `filter`, `fielddata` or `query_cache`.
|
||||
explicitly by setting `query`, `fielddata` or `request`.
|
||||
|
||||
All caches relating to a specific field(s) can also be cleared by
|
||||
specifying `fields` parameter with a comma delimited list of the
|
||||
@ -29,5 +29,3 @@ $ curl -XPOST 'http://localhost:9200/kimchy,elasticsearch/_cache/clear'
|
||||
$ curl -XPOST 'http://localhost:9200/_cache/clear'
|
||||
--------------------------------------------------
|
||||
|
||||
NOTE: The `filter` cache is not cleared immediately but is scheduled to be
|
||||
cleared within 60 seconds.
|
||||
|
@ -43,7 +43,7 @@ specified as well in the URI. Those stats can be any of:
|
||||
`fielddata`:: Fielddata statistics.
|
||||
`flush`:: Flush statistics.
|
||||
`merge`:: Merge statistics.
|
||||
`query_cache`:: <<shard-query-cache,Shard query cache>> statistics.
|
||||
`request_cache`:: <<shard-request-cache,Shard request cache>> statistics.
|
||||
`refresh`:: Refresh statistics.
|
||||
`suggest`:: Suggest statistics.
|
||||
`warmer`:: Warmer statistics.
|
||||
|
@ -14,17 +14,17 @@ Available settings include:
|
||||
|
||||
Set limits on the amount of heap used by the in-memory fielddata cache.
|
||||
|
||||
<<filter-cache,Node filter cache>>::
|
||||
<<query-cache,Node query cache>>::
|
||||
|
||||
Configure the amount heap used to cache filter results.
|
||||
Configure the amount heap used to cache queries results.
|
||||
|
||||
<<indexing-buffer,Indexing buffer>>::
|
||||
|
||||
Control the size of the buffer allocated to the indexing process.
|
||||
|
||||
<<shard-query-cache,Shard query cache>>::
|
||||
<<shard-request-cache,Shard request cache>>::
|
||||
|
||||
Control the behaviour of the shard-level query cache.
|
||||
Control the behaviour of the shard-level request cache.
|
||||
|
||||
<<recovery,Recovery>>::
|
||||
|
||||
@ -38,11 +38,11 @@ include::indices/circuit_breaker.asciidoc[]
|
||||
|
||||
include::indices/fielddata.asciidoc[]
|
||||
|
||||
include::indices/filter_cache.asciidoc[]
|
||||
include::indices/query_cache.asciidoc[]
|
||||
|
||||
include::indices/indexing_buffer.asciidoc[]
|
||||
|
||||
include::indices/query-cache.asciidoc[]
|
||||
include::indices/request_cache.asciidoc[]
|
||||
|
||||
include::indices/recovery.asciidoc[]
|
||||
|
||||
|
@ -1,16 +0,0 @@
|
||||
[[filter-cache]]
|
||||
=== Node Filter Cache
|
||||
|
||||
The filter cache is responsible for caching the results of filters (used in
|
||||
the query). There is one filter cache per node that is shared by all shards.
|
||||
The cache implements an LRU eviction policy: when a cache becomes full, the
|
||||
least recently used data is evicted to make way for new data.
|
||||
|
||||
The following setting is _static_ and must be configured on every data node in
|
||||
the cluster:
|
||||
|
||||
`indices.cache.filter.size`::
|
||||
|
||||
Controls the memory size for the filter cache , defaults to `10%`. Accepts
|
||||
either a percentage value, like `30%`, or an exact value, like `512mb`.
|
||||
|
18
docs/reference/modules/indices/query_cache.asciidoc
Normal file
18
docs/reference/modules/indices/query_cache.asciidoc
Normal file
@ -0,0 +1,18 @@
|
||||
[[query-cache]]
|
||||
=== Node Query Cache
|
||||
|
||||
The query cache is responsible for caching the results of queries.
|
||||
There is one queries cache per node that is shared by all shards.
|
||||
The cache implements an LRU eviction policy: when a cache becomes full, the
|
||||
least recently used data is evicted to make way for new data.
|
||||
|
||||
The query cache only caches queries which are being used in a filter context.
|
||||
|
||||
The following setting is _static_ and must be configured on every data node in
|
||||
the cluster:
|
||||
|
||||
`indices.queries.cache.size`::
|
||||
|
||||
Controls the memory size for the filter cache , defaults to `10%`. Accepts
|
||||
either a percentage value, like `5%`, or an exact value, like `512mb`.
|
||||
|
@ -1,21 +1,21 @@
|
||||
[[shard-query-cache]]
|
||||
=== Shard query cache
|
||||
[[shard-request-cache]]
|
||||
=== Shard request cache
|
||||
|
||||
When a search request is run against an index or against many indices, each
|
||||
involved shard executes the search locally and returns its local results to
|
||||
the _coordinating node_, which combines these shard-level results into a
|
||||
``global'' result set.
|
||||
|
||||
The shard-level query cache module caches the local results on each shard.
|
||||
The shard-level request cache module caches the local results on each shard.
|
||||
This allows frequently used (and potentially heavy) search requests to return
|
||||
results almost instantly. The query cache is a very good fit for the logging
|
||||
results almost instantly. The requests cache is a very good fit for the logging
|
||||
use case, where only the most recent index is being actively updated --
|
||||
results from older indices will be served directly from the cache.
|
||||
|
||||
[IMPORTANT]
|
||||
===================================
|
||||
|
||||
For now, the query cache will only cache the results of search requests
|
||||
For now, the requests cache will only cache the results of search requests
|
||||
where `size=0`, so it will not cache `hits`,
|
||||
but it will cache `hits.total`, <<search-aggregations,aggregations>>, and
|
||||
<<search-suggesters,suggestions>>.
|
||||
@ -42,7 +42,7 @@ The cache can be expired manually with the <<indices-clearcache,`clear-cache` AP
|
||||
|
||||
[source,json]
|
||||
------------------------
|
||||
curl -XPOST 'localhost:9200/kimchy,elasticsearch/_cache/clear?query_cache=true'
|
||||
curl -XPOST 'localhost:9200/kimchy,elasticsearch/_cache/clear?request_cache=true'
|
||||
------------------------
|
||||
|
||||
[float]
|
||||
@ -56,7 +56,7 @@ index as follows:
|
||||
curl -XPUT localhost:9200/my_index -d'
|
||||
{
|
||||
"settings": {
|
||||
"index.cache.query.enable": true
|
||||
"index.requests.cache.enable": true
|
||||
}
|
||||
}
|
||||
'
|
||||
@ -68,7 +68,7 @@ It can also be enabled or disabled dynamically on an existing index with the
|
||||
[source,json]
|
||||
-----------------------------
|
||||
curl -XPUT localhost:9200/my_index/_settings -d'
|
||||
{ "index.cache.query.enable": true }
|
||||
{ "index.requests.cache.enable": true }
|
||||
'
|
||||
-----------------------------
|
||||
|
||||
@ -76,11 +76,11 @@ curl -XPUT localhost:9200/my_index/_settings -d'
|
||||
==== Enabling caching per request
|
||||
|
||||
The `query_cache` query-string parameter can be used to enable or disable
|
||||
caching on a *per-query* basis. If set, it overrides the index-level setting:
|
||||
caching on a *per-request* basis. If set, it overrides the index-level setting:
|
||||
|
||||
[source,json]
|
||||
-----------------------------
|
||||
curl 'localhost:9200/my_index/_search?query_cache=true' -d'
|
||||
curl 'localhost:9200/my_index/_search?request_cache=true' -d'
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
@ -96,7 +96,7 @@ curl 'localhost:9200/my_index/_search?query_cache=true' -d'
|
||||
|
||||
IMPORTANT: If your query uses a script whose result is not deterministic (e.g.
|
||||
it uses a random function or references the current time) you should set the
|
||||
`query_cache` flag to `false` to disable caching for that request.
|
||||
`request_cache` flag to `false` to disable caching for that request.
|
||||
|
||||
[float]
|
||||
==== Cache key
|
||||
@ -117,10 +117,10 @@ of the heap. This can be changed in the `config/elasticsearch.yml` file with:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
indices.cache.query.size: 2%
|
||||
indices.requests.cache.size: 2%
|
||||
--------------------------------
|
||||
|
||||
Also, you can use the +indices.cache.query.expire+ setting to specify a TTL
|
||||
Also, you can use the +indices.requests.cache.expire+ setting to specify a TTL
|
||||
for cached results, but there should be no reason to do so. Remember that
|
||||
stale results are automatically invalidated when the index is refreshed. This
|
||||
setting is provided for completeness' sake only.
|
||||
@ -133,12 +133,12 @@ by index, with the <<indices-stats,`indices-stats`>> API:
|
||||
|
||||
[source,json]
|
||||
------------------------
|
||||
curl 'localhost:9200/_stats/query_cache?pretty&human'
|
||||
curl 'localhost:9200/_stats/request_cache?pretty&human'
|
||||
------------------------
|
||||
|
||||
or by node with the <<cluster-nodes-stats,`nodes-stats`>> API:
|
||||
|
||||
[source,json]
|
||||
------------------------
|
||||
curl 'localhost:9200/_nodes/stats/indices/query_cache?pretty&human'
|
||||
curl 'localhost:9200/_nodes/stats/indices/request_cache?pretty&human'
|
||||
------------------------
|
@ -17,10 +17,11 @@ by the query.
|
||||
"function_score": {
|
||||
"query": {},
|
||||
"boost": "boost for the whole query",
|
||||
"FUNCTION": {},
|
||||
"FUNCTION": {}, <1>
|
||||
"boost_mode":"(multiply|replace|...)"
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> See <<score-functions>> for a list of supported functions.
|
||||
|
||||
Furthermore, several functions can be combined. In this case one can
|
||||
optionally choose to apply the function only if a document matches a
|
||||
@ -34,11 +35,11 @@ given filtering query
|
||||
"functions": [
|
||||
{
|
||||
"filter": {},
|
||||
"FUNCTION": {},
|
||||
"FUNCTION": {}, <1>
|
||||
"weight": number
|
||||
},
|
||||
{
|
||||
"FUNCTION": {}
|
||||
"FUNCTION": {} <1>
|
||||
},
|
||||
{
|
||||
"filter": {},
|
||||
@ -51,6 +52,7 @@ given filtering query
|
||||
"min_score" : number
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> See <<score-functions>> for a list of supported functions.
|
||||
|
||||
NOTE: The scores produced by the filtering query of each function do not matter.
|
||||
|
||||
@ -89,8 +91,17 @@ query. The parameter `boost_mode` defines how:
|
||||
By default, modifying the score does not change which documents match. To exclude
|
||||
documents that do not meet a certain score threshold the `min_score` parameter can be set to the desired score threshold.
|
||||
|
||||
[[score-functions]]
|
||||
|
||||
The `function_score` query provides several types of score functions.
|
||||
|
||||
* <<function-script-score,`script_score`>>
|
||||
* <<function-weight,`weight`>>
|
||||
* <<function-random,`random_score`>>
|
||||
* <<function-field-value-factor,`field_value_factor`>>
|
||||
* <<function-decay,decay functions>>: `gauss`, `linear`, `exp`
|
||||
|
||||
[[function-script-score]]
|
||||
==== Script score
|
||||
|
||||
The `script_score` function allows you to wrap another query and customize
|
||||
@ -131,6 +142,7 @@ Note that unlike the `custom_score` query, the
|
||||
score of the query is multiplied with the result of the script scoring. If
|
||||
you wish to inhibit this, set `"boost_mode": "replace"`
|
||||
|
||||
[[function-weight]]
|
||||
==== Weight
|
||||
|
||||
The `weight` score allows you to multiply the score by the provided
|
||||
@ -143,6 +155,7 @@ not.
|
||||
"weight" : number
|
||||
--------------------------------------------------
|
||||
|
||||
[[function-random]]
|
||||
==== Random
|
||||
|
||||
The `random_score` generates scores using a hash of the `_uid` field,
|
||||
@ -159,6 +172,7 @@ be a memory intensive operation since the values are unique.
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[[function-field-value-factor]]
|
||||
==== Field Value factor
|
||||
|
||||
The `field_value_factor` function allows you to use a field from a document to
|
||||
@ -186,23 +200,33 @@ Which will translate into the following formula for scoring:
|
||||
|
||||
There are a number of options for the `field_value_factor` function:
|
||||
|
||||
[cols="<,<",options="header",]
|
||||
|=======================================================================
|
||||
| Parameter |Description
|
||||
|`field` |Field to be extracted from the document.
|
||||
|`factor` |Optional factor to multiply the field value with, defaults to 1.
|
||||
|`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`,
|
||||
`log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`.
|
||||
Defaults to `none`.
|
||||
|`missing` |Value used if the document doesn't have that field. The modifier
|
||||
and factor are still applied to it as though it were read from the document.
|
||||
|=======================================================================
|
||||
[horizontal]
|
||||
`field`::
|
||||
|
||||
Keep in mind that taking the log() of 0, or the square root of a negative number
|
||||
is an illegal operation, and an exception will be thrown. Be sure to limit the
|
||||
values of the field with a range filter to avoid this, or use `log1p` and
|
||||
`ln1p`.
|
||||
Field to be extracted from the document.
|
||||
|
||||
`factor`::
|
||||
|
||||
Optional factor to multiply the field value with, defaults to `1`.
|
||||
|
||||
`modifier`::
|
||||
|
||||
Modifier to apply to the field value, can be one of: `none`, `log`,
|
||||
`log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`.
|
||||
Defaults to `none`.
|
||||
|
||||
`missing`::
|
||||
|
||||
Value used if the document doesn't have that field. The modifier
|
||||
and factor are still applied to it as though it were read from the document.
|
||||
|
||||
|
||||
Keep in mind that taking the log() of 0, or the square root of a negative number
|
||||
is an illegal operation, and an exception will be thrown. Be sure to limit the
|
||||
values of the field with a range filter to avoid this, or use `log1p` and
|
||||
`ln1p`.
|
||||
|
||||
[[function-decay]]
|
||||
==== Decay functions
|
||||
|
||||
Decay functions score a document with a function that decays depending
|
||||
@ -218,8 +242,8 @@ decay function is specified as
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"DECAY_FUNCTION": {
|
||||
"FIELD_NAME": {
|
||||
"DECAY_FUNCTION": { <1>
|
||||
"FIELD_NAME": { <2>
|
||||
"origin": "11, 12",
|
||||
"scale": "2km",
|
||||
"offset": "0km",
|
||||
@ -227,26 +251,26 @@ decay function is specified as
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> The `DECAY_FUNCTION` should be one of `linear`, `exp`, or `gauss`.
|
||||
<2> The specified field must be a numeric, date, or geo-point field.
|
||||
|
||||
In the above example, the field is a <<mapping-geo-point-type>> and origin can be provided in geo format. `scale` and `offset` must be given with a unit in this case. If your field is a date field, you can set `scale` and `offset` as days, weeks, and so on. Example:
|
||||
|
||||
where `DECAY_FUNCTION` can be "linear", "exp" and "gauss" (see below). The specified field must be a numeric field. In the above example, the field is a <<mapping-geo-point-type>> and origin can be provided in geo format. `scale` and `offset` must be given with a unit in this case. If your field is a date field, you can set `scale` and `offset` as days, weeks, and so on. Example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"DECAY_FUNCTION": {
|
||||
"FIELD_NAME": {
|
||||
"origin": "2013-09-17",
|
||||
"gauss": {
|
||||
"date": {
|
||||
"origin": "2013-09-17", <1>
|
||||
"scale": "10d",
|
||||
"offset": "5d",
|
||||
"decay" : 0.5
|
||||
"offset": "5d", <2>
|
||||
"decay" : 0.5 <2>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
The format of the origin depends on the <<mapping-date-format>> defined in your mapping. If you do not define the origin, the current time is used.
|
||||
|
||||
|
||||
The `offset` and `decay` parameters are optional.
|
||||
<1> The date format of the origin depends on the <<mapping-date-format>> defined in
|
||||
your mapping. If you do not define the origin, the current time is used.
|
||||
<2> The `offset` and `decay` parameters are optional.
|
||||
|
||||
[horizontal]
|
||||
`origin`::
|
||||
@ -284,37 +308,45 @@ from the desired location.
|
||||
|
||||
In the second example, documents with a field value between 2013-09-12 and 2013-09-22 would get a weight of 1.0 and documents which are 15 days from that date a weight of 0.5.
|
||||
|
||||
|
||||
===== Supported decay functions
|
||||
|
||||
The `DECAY_FUNCTION` determines the shape of the decay:
|
||||
|
||||
[horizontal]
|
||||
`gauss`::
|
||||
|
||||
Normal decay, computed as:
|
||||
+
|
||||
--
|
||||
Normal decay, computed as:
|
||||
|
||||
image:images/Gaussian.png[]
|
||||
|
||||
where image:images/sigma.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset`
|
||||
|
||||
image:images/sigma_calc.png[]
|
||||
|
||||
[horizontal]
|
||||
`exp`::
|
||||
See <<gauss-decay>> for graphs demonstrating the curve generated by the `gauss` function.
|
||||
|
||||
Exponential decay, computed as:
|
||||
--
|
||||
|
||||
`exp`::
|
||||
+
|
||||
--
|
||||
Exponential decay, computed as:
|
||||
|
||||
image:images/Exponential.png[]
|
||||
|
||||
where again the parameter image:images/lambda.png[] is computed to assure that the score takes the value `decay` at distance `scale` from `origin`+-`offset`
|
||||
|
||||
image:images/lambda_calc.png[]
|
||||
|
||||
[horizontal]
|
||||
`linear`::
|
||||
See <<exp-decay>> for graphs demonstrating the curve generated by the `exp` function.
|
||||
|
||||
Linear decay, computed as:
|
||||
--
|
||||
|
||||
`linear`::
|
||||
+
|
||||
--
|
||||
Linear decay, computed as:
|
||||
|
||||
image:images/Linear.png[].
|
||||
|
||||
|
||||
@ -325,12 +357,13 @@ image:images/s_calc.png[]
|
||||
In contrast to the normal and exponential decay, this function actually
|
||||
sets the score to 0 if the field value exceeds twice the user given
|
||||
scale value.
|
||||
--
|
||||
|
||||
For single functions the three decay functions together with their parameters can be visualized like this (the field in this example called "age"):
|
||||
|
||||
image:images/decay_2d.png[width=600]
|
||||
|
||||
===== Multiple values:
|
||||
===== Multi-values fields
|
||||
|
||||
If a field used for computing the decay contains multiple values, per default the value closest to the origin is chosen for determining the distance.
|
||||
This can be changed by setting `multi_value_mode`.
|
||||
@ -355,8 +388,7 @@ Example:
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
|
||||
===== Detailed example
|
||||
==== Detailed example
|
||||
|
||||
Suppose you are searching for a hotel in a certain town. Your budget is
|
||||
limited. Also, you would like the hotel to be close to the town center,
|
||||
@ -383,69 +415,72 @@ The function for `price` in this case would be
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"DECAY_FUNCTION": {
|
||||
"gauss": { <1>
|
||||
"price": {
|
||||
"origin": "0",
|
||||
"scale": "20"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> This decay function could also be `linear` or `exp`.
|
||||
|
||||
and for `location`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
||||
"DECAY_FUNCTION": {
|
||||
"gauss": { <1>
|
||||
"location": {
|
||||
"origin": "11, 12",
|
||||
"scale": "2km"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
where `DECAY_FUNCTION` can be "linear", "exp" and "gauss".
|
||||
<1> This decay function could also be `linear` or `exp`.
|
||||
|
||||
Suppose you want to multiply these two functions on the original score,
|
||||
the request would look like this:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl 'localhost:9200/hotels/_search/' -d '{
|
||||
"query": {
|
||||
GET /hotels/_search/
|
||||
{
|
||||
"query": {
|
||||
"function_score": {
|
||||
"functions": [
|
||||
{
|
||||
"DECAY_FUNCTION": {
|
||||
"price": {
|
||||
"origin": "0",
|
||||
"scale": "20"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"DECAY_FUNCTION": {
|
||||
"location": {
|
||||
"origin": "11, 12",
|
||||
"scale": "2km"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"query": {
|
||||
"match": {
|
||||
"properties": "balcony"
|
||||
"functions": [
|
||||
{
|
||||
"gauss": {
|
||||
"price": {
|
||||
"origin": "0",
|
||||
"scale": "20"
|
||||
}
|
||||
}
|
||||
},
|
||||
"score_mode": "multiply"
|
||||
{
|
||||
"gauss": {
|
||||
"location": {
|
||||
"origin": "11, 12",
|
||||
"scale": "2km"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"query": {
|
||||
"match": {
|
||||
"properties": "balcony"
|
||||
}
|
||||
},
|
||||
"score_mode": "multiply"
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
Next, we show how the computed score looks like for each of the three
|
||||
possible decay functions.
|
||||
|
||||
[[gauss-decay]]
|
||||
===== Normal decay, keyword `gauss`
|
||||
|
||||
When choosing `gauss` as the decay function in the above example, the
|
||||
@ -467,6 +502,7 @@ of 0.56. "BnB Bellevue" and "Backback Nap" are both pretty close to the
|
||||
defined location but "BnB Bellevue" is cheaper, so it gets a multiplier
|
||||
of 0.86 whereas "Backpack Nap" gets a value of 0.66.
|
||||
|
||||
[[exp-decay]]
|
||||
===== Exponential decay, keyword `exp`
|
||||
|
||||
When choosing `exp` as the decay function in the above example, the
|
||||
@ -476,6 +512,7 @@ image::https://f.cloud.github.com/assets/4320215/768161/082975c0-e899-11e2-86f7-
|
||||
|
||||
image::https://f.cloud.github.com/assets/4320215/768162/0b606884-e899-11e2-907b-aefc77eefef6.png[width="700px"]
|
||||
|
||||
[[linear-decay]]
|
||||
===== Linear decay, keyword `linear`
|
||||
|
||||
When choosing `linear` as the decay function in the above example, the
|
||||
@ -487,8 +524,7 @@ image::https://f.cloud.github.com/assets/4320215/768165/19d8b1aa-e899-11e2-91bc-
|
||||
|
||||
==== Supported fields for decay functions
|
||||
|
||||
Only single valued numeric fields, including time and geo locations,
|
||||
are supported.
|
||||
Only numeric, date, and geo-point fields are supported.
|
||||
|
||||
==== What if a field is missing?
|
||||
|
||||
|
@ -370,11 +370,14 @@ or the <<search-aggregations-metrics-extendedstats-aggregation,`extended_stats`
|
||||
|
||||
Facets have been removed. Use <<search-aggregations>> instead.
|
||||
|
||||
// CACHES
|
||||
|
||||
[role="exclude",id="shard-query-cache"]
|
||||
=== Shard request cache
|
||||
|
||||
The shard query cache has been renamed <<shard-request-cache>>.
|
||||
|
||||
[role="exclude",id="filter-cache"]
|
||||
=== Query cache
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
The filter cache has been renamed <<query-cache>>.
|
||||
|
@ -73,7 +73,7 @@ And here is a sample response:
|
||||
Set to `true` or `false` to enable or disable the caching
|
||||
of search results for requests where `size` is 0, ie
|
||||
aggregations and suggestions (no top hits returned).
|
||||
See <<shard-query-cache>>.
|
||||
See <<shard-request-cache>>.
|
||||
|
||||
`terminate_after`::
|
||||
|
||||
|
@ -551,6 +551,7 @@ keep in mind that scoring more phrases consumes more time and memory.
|
||||
If using `matched_fields` keep in mind that `phrase_limit` phrases per
|
||||
matched field are considered.
|
||||
|
||||
[float]
|
||||
[[explicit-field-order]]
|
||||
=== Field Highlight Order
|
||||
Elasticsearch highlights the fields in the order that they are sent. Per the
|
||||
|
@ -24,13 +24,9 @@
|
||||
"type" : "list",
|
||||
"description" : "A comma-separated list of fields to clear when using the `field_data` parameter (default: all)"
|
||||
},
|
||||
"filter": {
|
||||
"query": {
|
||||
"type" : "boolean",
|
||||
"description" : "Clear filter caches"
|
||||
},
|
||||
"filter_cache": {
|
||||
"type" : "boolean",
|
||||
"description" : "Clear filter caches"
|
||||
"description" : "Clear query caches"
|
||||
},
|
||||
"ignore_unavailable": {
|
||||
"type" : "boolean",
|
||||
@ -54,9 +50,9 @@
|
||||
"type" : "boolean",
|
||||
"description" : "Clear the recycler cache"
|
||||
},
|
||||
"query_cache": {
|
||||
"request": {
|
||||
"type" : "boolean",
|
||||
"description" : "Clear query cache"
|
||||
"description" : "Clear request cache"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -17,7 +17,7 @@
|
||||
},
|
||||
"metric" : {
|
||||
"type" : "list",
|
||||
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
|
||||
"options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
|
||||
"description" : "Limit the information returned the specific metrics."
|
||||
}
|
||||
},
|
||||
|
@ -20,7 +20,7 @@
|
||||
},
|
||||
"index_metric" : {
|
||||
"type" : "list",
|
||||
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
|
||||
"options" : ["_all", "completion", "docs", "fielddata", "query_cache", "flush", "get", "indexing", "merge", "percolate", "request_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
|
||||
"description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified."
|
||||
},
|
||||
"node_id": {
|
||||
|
@ -151,9 +151,9 @@
|
||||
"type" : "boolean",
|
||||
"description" : "Specify whether to return document version as part of a hit"
|
||||
},
|
||||
"query_cache": {
|
||||
"request_cache": {
|
||||
"type" : "boolean",
|
||||
"description" : "Specify if query cache should be used for this request or not, defaults to index level setting"
|
||||
"description" : "Specify if request cache should be used for this request or not, defaults to index level setting"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -22,8 +22,8 @@
|
||||
completion.size .+ \n
|
||||
fielddata.memory_size .+ \n
|
||||
fielddata.evictions .+ \n
|
||||
filter_cache.memory_size .+ \n
|
||||
filter_cache.evictions .+ \n
|
||||
query_cache.memory_size .+ \n
|
||||
query_cache.evictions .+ \n
|
||||
flush.total .+ \n
|
||||
flush.total_time .+ \n
|
||||
get.current .+ \n
|
||||
|
@ -29,7 +29,7 @@ setup:
|
||||
- is_true: _all.total.refresh
|
||||
- is_true: _all.total.flush
|
||||
- is_true: _all.total.warmer
|
||||
- is_true: _all.total.filter_cache
|
||||
- is_true: _all.total.query_cache
|
||||
- is_true: _all.total.fielddata
|
||||
- is_true: _all.total.percolate
|
||||
- is_true: _all.total.completion
|
||||
@ -52,7 +52,7 @@ setup:
|
||||
- is_true: _all.total.refresh
|
||||
- is_true: _all.total.flush
|
||||
- is_true: _all.total.warmer
|
||||
- is_true: _all.total.filter_cache
|
||||
- is_true: _all.total.query_cache
|
||||
- is_true: _all.total.fielddata
|
||||
- is_true: _all.total.percolate
|
||||
- is_true: _all.total.completion
|
||||
@ -75,7 +75,7 @@ setup:
|
||||
- is_false: _all.total.refresh
|
||||
- is_false: _all.total.flush
|
||||
- is_false: _all.total.warmer
|
||||
- is_false: _all.total.filter_cache
|
||||
- is_false: _all.total.query_cache
|
||||
- is_false: _all.total.fielddata
|
||||
- is_false: _all.total.percolate
|
||||
- is_false: _all.total.completion
|
||||
@ -98,7 +98,7 @@ setup:
|
||||
- is_false: _all.total.refresh
|
||||
- is_false: _all.total.flush
|
||||
- is_false: _all.total.warmer
|
||||
- is_false: _all.total.filter_cache
|
||||
- is_false: _all.total.query_cache
|
||||
- is_false: _all.total.fielddata
|
||||
- is_false: _all.total.percolate
|
||||
- is_false: _all.total.completion
|
||||
@ -122,7 +122,7 @@ setup:
|
||||
- is_false: _all.total.refresh
|
||||
- is_false: _all.total.flush
|
||||
- is_false: _all.total.warmer
|
||||
- is_false: _all.total.filter_cache
|
||||
- is_false: _all.total.query_cache
|
||||
- is_false: _all.total.fielddata
|
||||
- is_false: _all.total.percolate
|
||||
- is_false: _all.total.completion
|
||||
|
Loading…
x
Reference in New Issue
Block a user