Removed `id_cache` from stats and cat apis.

Also removed the `id_cache` option from the clear cache api.

Closes #5269
This commit is contained in:
Martijn van Groningen 2015-05-15 14:06:18 +02:00
parent df59288b72
commit ece18f162e
38 changed files with 70 additions and 304 deletions

View File

@ -131,8 +131,6 @@ get operations |2
gets |0s
|`get.missing_total` |`gmto`, `getMissingTotal` |No |Number of failed
get operations |1
|`id_cache.memory_size` |`im`, `idCacheMemory` |No |Used ID cache
memory |216b
|`indexing.delete_current` |`idc`, `indexingDeleteCurrent` |No |Number
of current deletion operations |0
|`indexing.delete_time` |`idti`, `indexingDeleteTime` |No |Time spent in

View File

@ -61,10 +61,6 @@ Will return, for example:
"memory_size_in_bytes": 0,
"evictions": 0
},
"id_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0
},
"completion": {
"size": "0b",
"size_in_bytes": 0

View File

@ -10,8 +10,7 @@ $ curl -XPOST 'http://localhost:9200/twitter/_cache/clear'
--------------------------------------------------
The API, by default, will clear all caches. Specific caches can be cleaned
explicitly by setting `filter`, `fielddata`, `query_cache`,
or `id_cache` to `true`.
explicitly by setting `filter`, `fielddata` or `query_cache`.
All caches relating to a specific field(s) can also be cleared by
specifying `fields` parameter with a comma delimited list of the

View File

@ -560,3 +560,29 @@ same search request will likely be off if `top_children` was used.
=== Removed file based index templates
Index templates can no longer be configured on disk. Use the `_template` API instead.
[float]
=== Removed `id_cache` from stats apis
Removed `id_cache` metric from nodes stats, indices stats and cluster stats apis. This metric has also been removed
from the shards cat, indices cat and nodes cat apis. Parent/child memory is now reported under fielddata, because it
has internally be using fielddata for a while now.
To just see how much parent/child related field data is taking, the `fielddata_fields` option can be used on the stats
apis. Indices stats example:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------
Parent/child is using field data for the `_parent` field since version `1.1.0`, but the memory stats for the `_parent`
field were still shown under `id_cache` metric in the stats apis for backwards compatible reasons between 1.x versions.
Before version `1.1.0` the parent/child had its own in-memory data structures for id values in the `_parent` field.
[float]
=== Removed `id_cache` from clear cache api
Removed `id_cache` option from the clear cache apis. The `fielddata` option should be used to clear `_parent` field
from fielddata.

View File

@ -82,11 +82,11 @@ Additionally, every child document is mapped to its parent using a long
value (approximately). It is advisable to keep the string parent ID short
in order to reduce memory usage.
You can check how much memory is being used by the ID cache using the
<<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
You can check how much memory is being used by the `_parent` field in the fielddata cache
using the <<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
APIS, eg:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------

View File

@ -57,13 +57,13 @@ Additionally, every child document is mapped to its parent using a long
value (approximately). It is advisable to keep the string parent ID short
in order to reduce memory usage.
You can check how much memory is being used by the ID cache using the
<<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
You can check how much memory is being used by the `_parent` field in the fielddata cache
using the <<indices-stats,indices stats>> or <<cluster-nodes-stats,nodes stats>>
APIS, eg:
[source,js]
--------------------------------------------------
curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human"
curl -XGET "http://localhost:9200/_stats/fielddata?pretty&human&fielddata_fields=_parent"
--------------------------------------------------

View File

@ -32,14 +32,6 @@
"type" : "boolean",
"description" : "Clear filter caches"
},
"id": {
"type" : "boolean",
"description" : "Clear ID caches for parent/child"
},
"id_cache": {
"type" : "boolean",
"description" : "Clear ID caches for parent/child"
},
"ignore_unavailable": {
"type" : "boolean",
"description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)"

View File

@ -17,7 +17,7 @@
},
"metric" : {
"type" : "list",
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"description" : "Limit the information returned the specific metrics."
}
},

View File

@ -20,7 +20,7 @@
},
"index_metric" : {
"type" : "list",
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "id_cache", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"options" : ["_all", "completion", "docs", "fielddata", "filter_cache", "flush", "get", "indexing", "merge", "percolate", "query_cache", "refresh", "search", "segments", "store", "warmer", "suggest"],
"description" : "Limit the information returned for `indices` metric to the specific index metrics. Isn't used if `indices` (or `all`) metric isn't specified."
},
"node_id": {

View File

@ -29,7 +29,6 @@
get.exists_total .+ \n
get.missing_time .+ \n
get.missing_total .+ \n
id_cache.memory_size .+ \n
indexing.delete_current .+ \n
indexing.delete_time .+ \n
indexing.delete_total .+ \n

View File

@ -30,7 +30,6 @@ setup:
- is_true: _all.total.flush
- is_true: _all.total.warmer
- is_true: _all.total.filter_cache
- is_true: _all.total.id_cache
- is_true: _all.total.fielddata
- is_true: _all.total.percolate
- is_true: _all.total.completion
@ -54,7 +53,6 @@ setup:
- is_true: _all.total.flush
- is_true: _all.total.warmer
- is_true: _all.total.filter_cache
- is_true: _all.total.id_cache
- is_true: _all.total.fielddata
- is_true: _all.total.percolate
- is_true: _all.total.completion
@ -78,7 +76,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion
@ -102,7 +99,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion
@ -127,7 +123,6 @@ setup:
- is_false: _all.total.flush
- is_false: _all.total.warmer
- is_false: _all.total.filter_cache
- is_false: _all.total.id_cache
- is_false: _all.total.fielddata
- is_false: _all.total.percolate
- is_false: _all.total.completion

View File

@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.stats.PercolateStats;
@ -48,7 +47,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
private StoreStats store;
private FieldDataStats fieldData;
private FilterCacheStats filterCache;
private IdCacheStats idCache;
private CompletionStats completion;
private SegmentsStats segments;
private PercolateStats percolate;
@ -63,7 +61,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
this.store = new StoreStats();
this.fieldData = new FieldDataStats();
this.filterCache = new FilterCacheStats();
this.idCache = new IdCacheStats();
this.completion = new CompletionStats();
this.segments = new SegmentsStats();
this.percolate = new PercolateStats();
@ -87,7 +84,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.add(shardCommonStats.store);
fieldData.add(shardCommonStats.fieldData);
filterCache.add(shardCommonStats.filterCache);
idCache.add(shardCommonStats.idCache);
completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments);
percolate.add(shardCommonStats.percolate);
@ -125,10 +121,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
return filterCache;
}
public IdCacheStats getIdCache() {
return idCache;
}
public CompletionStats getCompletion() {
return completion;
}
@ -149,7 +141,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store = StoreStats.readStoreStats(in);
fieldData = FieldDataStats.readFieldDataStats(in);
filterCache = FilterCacheStats.readFilterCacheStats(in);
idCache = IdCacheStats.readIdCacheStats(in);
completion = CompletionStats.readCompletionStats(in);
segments = SegmentsStats.readSegmentsStats(in);
percolate = PercolateStats.readPercolateStats(in);
@ -163,7 +154,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.writeTo(out);
fieldData.writeTo(out);
filterCache.writeTo(out);
idCache.writeTo(out);
completion.writeTo(out);
segments.writeTo(out);
percolate.writeTo(out);
@ -187,7 +177,6 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
store.toXContent(builder, params);
fieldData.toXContent(builder, params);
filterCache.toXContent(builder, params);
idCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
percolate.toXContent(builder, params);

View File

@ -56,8 +56,8 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction<C
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.IdCache,
CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Percolate);
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
CommonStatsFlags.Flag.Percolate);
private final NodeService nodeService;
private final IndicesService indicesService;

View File

@ -33,7 +33,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler = false;
private boolean queryCache = false;
private String[] fields = null;
@ -82,10 +81,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
return this.fields;
}
public boolean idCache() {
return this.idCache;
}
public ClearIndicesCacheRequest recycler(boolean recycler) {
this.recycler = recycler;
return this;
@ -95,17 +90,11 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
return this.recycler;
}
public ClearIndicesCacheRequest idCache(boolean idCache) {
this.idCache = idCache;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
queryCache = in.readBoolean();
@ -116,7 +105,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest<ClearInd
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeBoolean(queryCache);

View File

@ -51,8 +51,4 @@ public class ClearIndicesCacheRequestBuilder extends BroadcastOperationRequestBu
return this;
}
public ClearIndicesCacheRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache);
return this;
}
}

View File

@ -34,7 +34,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
private boolean filterCache = false;
private boolean fieldDataCache = false;
private boolean idCache = false;
private boolean recycler;
private boolean queryCache = false;
@ -47,7 +46,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
super(shardId, request);
filterCache = request.filterCache();
fieldDataCache = request.fieldDataCache();
idCache = request.idCache();
fields = request.fields();
recycler = request.recycler();
queryCache = request.queryCache();
@ -65,10 +63,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
return this.fieldDataCache;
}
public boolean idCache() {
return this.idCache;
}
public boolean recycler() {
return this.recycler;
}
@ -77,17 +71,11 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
return this.fields;
}
public ShardClearIndicesCacheRequest waitForOperations(boolean waitForOperations) {
this.filterCache = waitForOperations;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
filterCache = in.readBoolean();
fieldDataCache = in.readBoolean();
idCache = in.readBoolean();
recycler = in.readBoolean();
fields = in.readStringArray();
queryCache = in.readBoolean();
@ -98,7 +86,6 @@ class ShardClearIndicesCacheRequest extends BroadcastShardOperationRequest {
super.writeTo(out);
out.writeBoolean(filterCache);
out.writeBoolean(fieldDataCache);
out.writeBoolean(idCache);
out.writeBoolean(recycler);
out.writeStringArrayNullable(fields);
out.writeBoolean(queryCache);

View File

@ -125,10 +125,6 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio
clearedAtLeastOne = true;
// cacheRecycler.clear();
}
if (request.idCache()) {
clearedAtLeastOne = true;
service.fieldData().clearField(ParentFieldMapper.NAME);
}
if (!clearedAtLeastOne) {
if (request.fields() != null && request.fields().length > 0) {
// only clear caches relating to the specified fields

View File

@ -27,7 +27,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
@ -92,9 +91,6 @@ public class CommonStats implements Streamable, ToXContent {
case FilterCache:
filterCache = new FilterCacheStats();
break;
case IdCache:
idCache = new IdCacheStats();
break;
case FieldData:
fieldData = new FieldDataStats();
break;
@ -161,9 +157,6 @@ public class CommonStats implements Streamable, ToXContent {
case FilterCache:
filterCache = indexShard.filterCacheStats();
break;
case IdCache:
idCache = indexShard.idCacheStats();
break;
case FieldData:
fieldData = indexShard.fieldDataStats(flags.fieldDataFields());
break;
@ -224,9 +217,6 @@ public class CommonStats implements Streamable, ToXContent {
@Nullable
public FilterCacheStats filterCache;
@Nullable
public IdCacheStats idCache;
@Nullable
public FieldDataStats fieldData;
@ -333,15 +323,6 @@ public class CommonStats implements Streamable, ToXContent {
filterCache.add(stats.getFilterCache());
}
if (idCache == null) {
if (stats.getIdCache() != null) {
idCache = new IdCacheStats();
idCache.add(stats.getIdCache());
}
} else {
idCache.add(stats.getIdCache());
}
if (fieldData == null) {
if (stats.getFieldData() != null) {
fieldData = new FieldDataStats();
@ -458,11 +439,6 @@ public class CommonStats implements Streamable, ToXContent {
return this.filterCache;
}
@Nullable
public IdCacheStats getIdCache() {
return this.idCache;
}
@Nullable
public FieldDataStats getFieldData() {
return this.fieldData;
@ -511,7 +487,7 @@ public class CommonStats implements Streamable, ToXContent {
/**
* Utility method which computes total memory by adding
* FieldData, IdCache, Percolate, Segments (memory, index writer, version map)
* FieldData, Percolate, Segments (memory, index writer, version map)
*/
public ByteSizeValue getTotalMemory() {
long size = 0;
@ -521,9 +497,6 @@ public class CommonStats implements Streamable, ToXContent {
if (this.getFilterCache() != null) {
size += this.getFilterCache().getMemorySizeInBytes();
}
if (this.getIdCache() != null) {
size += this.getIdCache().getMemorySizeInBytes();
}
if (this.getPercolate() != null) {
size += this.getPercolate().getMemorySizeInBytes();
}
@ -568,9 +541,6 @@ public class CommonStats implements Streamable, ToXContent {
if (in.readBoolean()) {
filterCache = FilterCacheStats.readFilterCacheStats(in);
}
if (in.readBoolean()) {
idCache = IdCacheStats.readIdCacheStats(in);
}
if (in.readBoolean()) {
fieldData = FieldDataStats.readFieldDataStats(in);
}
@ -651,12 +621,6 @@ public class CommonStats implements Streamable, ToXContent {
out.writeBoolean(true);
filterCache.writeTo(out);
}
if (idCache == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
idCache.writeTo(out);
}
if (fieldData == null) {
out.writeBoolean(false);
} else {
@ -720,9 +684,6 @@ public class CommonStats implements Streamable, ToXContent {
if (filterCache != null) {
filterCache.toXContent(builder, params);
}
if (idCache != null) {
idCache.toXContent(builder, params);
}
if (fieldData != null) {
fieldData.toXContent(builder, params);
}

View File

@ -216,7 +216,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
Flush("flush"),
Refresh("refresh"),
FilterCache("filter_cache"),
IdCache("id_cache"),
FieldData("fielddata"),
Docs("docs"),
Warmer("warmer"),

View File

@ -175,15 +175,6 @@ public class IndicesStatsRequest extends BroadcastOperationRequest<IndicesStatsR
return flags.isSet(Flag.FilterCache);
}
public IndicesStatsRequest idCache(boolean idCache) {
flags.set(Flag.IdCache, idCache);
return this;
}
public boolean idCache() {
return flags.isSet(Flag.IdCache);
}
public IndicesStatsRequest fieldData(boolean fieldData) {
flags.set(Flag.FieldData, fieldData);
return this;

View File

@ -117,11 +117,6 @@ public class IndicesStatsRequestBuilder extends BroadcastOperationRequestBuilder
return this;
}
public IndicesStatsRequestBuilder setIdCache(boolean idCache) {
request.idCache(idCache);
return this;
}
public IndicesStatsRequestBuilder setFieldData(boolean fieldData) {
request.fieldData(fieldData);
return this;

View File

@ -161,9 +161,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi
if (request.request.filterCache()) {
flags.set(CommonStatsFlags.Flag.FilterCache);
}
if (request.request.idCache()) {
flags.set(CommonStatsFlags.Flag.IdCache);
}
if (request.request.fieldData()) {
flags.set(CommonStatsFlags.Flag.FieldData);
flags.fieldDataFields(request.request.fieldDataFields());

View File

@ -1,89 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.cache.id;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import java.io.IOException;
/**
* @deprecated Id cache has been removed in favor for {@link org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData}
* this class now reports field data memory usage for _parent field.
*/
@Deprecated
public class IdCacheStats implements Streamable, ToXContent {
long memorySize;
public IdCacheStats() {
}
public IdCacheStats(long memorySize) {
this.memorySize = memorySize;
}
public void add(IdCacheStats stats) {
this.memorySize += stats.memorySize;
}
public long getMemorySizeInBytes() {
return this.memorySize;
}
public ByteSizeValue getMemorySize() {
return new ByteSizeValue(memorySize);
}
public static IdCacheStats readIdCacheStats(StreamInput in) throws IOException {
IdCacheStats stats = new IdCacheStats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
memorySize = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(memorySize);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.ID_CACHE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString ID_CACHE = new XContentBuilderString("id_cache");
static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size");
static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes");
}
}

View File

@ -59,14 +59,7 @@ public class ShardFieldData extends AbstractIndexShardComponent implements Index
}
}
}
// Because we report _parent field used memory separately via id cache, we need to subtract it from the
// field data total memory used. This code should be removed for >= 2.0
long memorySize = totalMetric.count();
if (perFieldTotals.containsKey(ParentFieldMapper.NAME)) {
memorySize -= perFieldTotals.get(ParentFieldMapper.NAME).count();
}
return new FieldDataStats(memorySize, evictionsMetric.count(), fieldTotals);
return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals);
}
@Override

View File

@ -65,7 +65,6 @@ import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.filter.ShardFilterCache;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.cache.query.ShardQueryCache;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy;
@ -665,11 +664,6 @@ public class IndexShard extends AbstractIndexShardComponent {
return shardPercolateService;
}
public IdCacheStats idCacheStats() {
long memorySizeInBytes = shardFieldData.stats(ParentFieldMapper.NAME).getFields().get(ParentFieldMapper.NAME);
return new IdCacheStats(memorySizeInBytes);
}
public TranslogStats translogStats() {
return engine().getTranslog().stats();
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
@ -142,11 +141,6 @@ public class NodeIndicesStats implements Streamable, Serializable, ToXContent {
return stats.getQueryCache();
}
@Nullable
public IdCacheStats getIdCache() {
return stats.getIdCache();
}
@Nullable
public CompletionStats getCompletion() {
return stats.getCompletion();

View File

@ -79,9 +79,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
if (Fields.FIELD_DATA.match(entry.getKey())) {
clearIndicesCacheRequest.fieldDataCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.fieldDataCache()));
}
if (Fields.ID.match(entry.getKey())) {
clearIndicesCacheRequest.idCache(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.idCache()));
}
if (Fields.RECYCLER.match(entry.getKey())) {
clearIndicesCacheRequest.recycler(request.paramAsBoolean(entry.getKey(), clearIndicesCacheRequest.recycler()));
}
@ -96,7 +93,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler {
public static class Fields {
public static final ParseField FILTER = new ParseField("filter", "filter_cache");
public static final ParseField FIELD_DATA = new ParseField("field_data", "fielddata");
public static final ParseField ID = new ParseField("id", "id_cache");
public static final ParseField RECYCLER = new ParseField("recycler");
public static final ParseField FIELDS = new ParseField("fields");
}

View File

@ -73,7 +73,6 @@ public class RestIndicesStatsAction extends BaseRestHandler {
indicesStatsRequest.flush(metrics.contains("flush"));
indicesStatsRequest.warmer(metrics.contains("warmer"));
indicesStatsRequest.filterCache(metrics.contains("filter_cache"));
indicesStatsRequest.idCache(metrics.contains("id_cache"));
indicesStatsRequest.percolate(metrics.contains("percolate"));
indicesStatsRequest.segments(metrics.contains("segments"));
indicesStatsRequest.fieldData(metrics.contains("fielddata"));

View File

@ -163,9 +163,6 @@ public class RestIndicesAction extends AbstractCatAction {
table.addCell("get.missing_total", "sibling:pri;alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
table.addCell("pri.get.missing_total", "default:false;text-align:right;desc:number of failed gets");
table.addCell("id_cache.memory_size", "sibling:pri;alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
table.addCell("pri.id_cache.memory_size", "default:false;text-align:right;desc:used id cache");
table.addCell("indexing.delete_current", "sibling:pri;alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
table.addCell("pri.indexing.delete_current", "default:false;text-align:right;desc:number of current deletions");
@ -365,9 +362,6 @@ public class RestIndicesAction extends AbstractCatAction {
table.addCell(indexStats == null ? null : indexStats.getTotal().getGet().getMissingCount());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getGet().getMissingCount());
table.addCell(indexStats == null ? null : indexStats.getTotal().getIdCache().getMemorySize());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIdCache().getMemorySize());
table.addCell(indexStats == null ? null : indexStats.getTotal().getIndexing().getTotal().getDeleteCurrent());
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getIndexing().getTotal().getDeleteCurrent());

View File

@ -36,7 +36,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
@ -162,8 +161,6 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
table.addCell("id_cache.memory_size", "alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
@ -295,9 +292,6 @@ public class RestNodesAction extends AbstractCatAction {
table.addCell(getStats == null ? null : getStats.getMissingTime());
table.addCell(getStats == null ? null : getStats.getMissingCount());
IdCacheStats idCacheStats = indicesStats == null ? null : indicesStats.getIdCache();
table.addCell(idCacheStats == null ? null : idCacheStats.getMemorySize());
IndexingStats indexingStats = indicesStats == null ? null : indicesStats.getIndexing();
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteCurrent());
table.addCell(indexingStats == null ? null : indexingStats.getTotal().getDeleteTime());

View File

@ -108,8 +108,6 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell("get.missing_time", "alias:gmti,getMissingTime;default:false;text-align:right;desc:time spent in failed gets");
table.addCell("get.missing_total", "alias:gmto,getMissingTotal;default:false;text-align:right;desc:number of failed gets");
table.addCell("id_cache.memory_size", "alias:im,idCacheMemory;default:false;text-align:right;desc:used id cache");
table.addCell("indexing.delete_current", "alias:idc,indexingDeleteCurrent;default:false;text-align:right;desc:number of current deletions");
table.addCell("indexing.delete_time", "alias:idti,indexingDeleteTime;default:false;text-align:right;desc:time spent in deletions");
table.addCell("indexing.delete_total", "alias:idto,indexingDeleteTotal;default:false;text-align:right;desc:number of delete ops");
@ -229,8 +227,6 @@ public class RestShardsAction extends AbstractCatAction {
table.addCell(shardStats == null ? null : shardStats.getGet().getMissingTime());
table.addCell(shardStats == null ? null : shardStats.getGet().getMissingCount());
table.addCell(shardStats == null ? null : shardStats.getIdCache().getMemorySize());
table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteCurrent());
table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteTime());
table.addCell(shardStats == null ? null : shardStats.getIndexing().getTotal().getDeleteCount());

View File

@ -44,7 +44,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest {
for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
try {
enableIndexBlock("test", blockSetting);
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true).execute().actionGet();
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true).execute().actionGet();
assertNoFailures(clearIndicesCacheResponse);
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));
} finally {
@ -55,7 +55,7 @@ public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest {
for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
try {
enableIndexBlock("test", blockSetting);
assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true));
assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setFieldDataCache(true));
} finally {
disableIndexBlock("test", blockSetting);
}

View File

@ -260,7 +260,7 @@ public class ChildSearchBenchmark {
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
System.out.println("--> Running has_child query with score type");
@ -319,7 +319,7 @@ public class ChildSearchBenchmark {
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
client.close();

View File

@ -171,7 +171,7 @@ public class ChildSearchShortCircuitBenchmark {
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
totalQueryTime = 0;
@ -192,7 +192,7 @@ public class ChildSearchShortCircuitBenchmark {
statsResponse = client.admin().cluster().prepareNodesStats()
.setJvm(true).setIndices(true).execute().actionGet();
System.out.println("--> Id cache size: " + statsResponse.getNodes()[0].getIndices().getIdCache().getMemorySize());
System.out.println("--> Field data size: " + statsResponse.getNodes()[0].getIndices().getFieldData().getMemorySize());
System.out.println("--> Used heap size: " + statsResponse.getNodes()[0].getJvm().getMem().getHeapUsed());
client.close();

View File

@ -77,7 +77,7 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest {
assertThat(indexExists("test1234565"), equalTo(false));
logger.info("Clearing cache");
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true).idCache(true)).actionGet();
ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().clearCache(clearIndicesCacheRequest("test").recycler(true).fieldDataCache(true).filterCache(true)).actionGet();
assertNoFailures(clearIndicesCacheResponse);
assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards));

View File

@ -639,7 +639,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
@Test
public void testFlagOrdinalOrder() {
Flag[] flags = new Flag[]{Flag.Store, Flag.Indexing, Flag.Get, Flag.Search, Flag.Merge, Flag.Flush, Flag.Refresh,
Flag.FilterCache, Flag.IdCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments,
Flag.FilterCache, Flag.FieldData, Flag.Docs, Flag.Warmer, Flag.Percolate, Flag.Completion, Flag.Segments,
Flag.Translog, Flag.Suggest, Flag.QueryCache, Flag.Recovery};
assertThat(flags.length, equalTo(Flag.values().length));
@ -872,9 +872,6 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
case Get:
builder.setGet(set);
break;
case IdCache:
builder.setIdCache(set);
break;
case Indexing:
builder.setIndexing(set);
break;
@ -932,8 +929,6 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest {
return response.getFlush() != null;
case Get:
return response.getGet() != null;
case IdCache:
return response.getIdCache() != null;
case Indexing:
return response.getIndexing() != null;
case Merge:

View File

@ -71,7 +71,7 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
refresh();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getIndicesStats().getIdCache().getMemorySizeInBytes(), equalTo(0l));
assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(0l));
logger.info("testing default loading...");
assertAcked(client().admin().indices().prepareDelete("test").get());
@ -86,8 +86,8 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
refresh();
response = client().admin().cluster().prepareClusterStats().get();
long idCacheSizeDefault = response.getIndicesStats().getIdCache().getMemorySizeInBytes();
assertThat(idCacheSizeDefault, greaterThan(0l));
long fielddataSizeDefault = response.getIndicesStats().getFieldData().getMemorySizeInBytes();
assertThat(fielddataSizeDefault, greaterThan(0l));
logger.info("testing eager loading...");
assertAcked(client().admin().indices().prepareDelete("test").get());
@ -102,7 +102,7 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
refresh();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getIndicesStats().getIdCache().getMemorySizeInBytes(), equalTo(idCacheSizeDefault));
assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), equalTo(fielddataSizeDefault));
logger.info("testing eager global ordinals loading...");
assertAcked(client().admin().indices().prepareDelete("test").get());
@ -113,14 +113,14 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
ensureGreen();
// Need to do 2 separate refreshes, otherwise we have 1 segment and then we can't measure if global ordinals
// is loaded by the size of the id_cache, because global ordinals on 1 segment shards takes no extra memory.
// is loaded by the size of the field data cache, because global ordinals on 1 segment shards takes no extra memory.
client().prepareIndex("test", "parent", "1").setSource("{}").get();
refresh();
client().prepareIndex("test", "child", "1").setParent("1").setSource("{}").get();
refresh();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getIndicesStats().getIdCache().getMemorySizeInBytes(), greaterThan(idCacheSizeDefault));
assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(fielddataSizeDefault));
}
@Test
@ -136,8 +136,8 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
refresh();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
long idCacheSizeDefault = response.getIndicesStats().getIdCache().getMemorySizeInBytes();
assertThat(idCacheSizeDefault, greaterThan(0l));
long fielddataSizeDefault = response.getIndicesStats().getFieldData().getMemorySizeInBytes();
assertThat(fielddataSizeDefault, greaterThan(0l));
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("child")
.setSource(childMapping(FieldMapper.Loading.EAGER_GLOBAL_ORDINALS))
@ -165,11 +165,11 @@ public class ParentFieldLoadingTest extends ElasticsearchIntegrationTest {
});
// Need to add a new doc otherwise the refresh doesn't trigger a new searcher
// Because it ends up in its own segment, but isn't of type parent or child, this doc doesn't contribute to the size of the id_cache
// Because it ends up in its own segment, but isn't of type parent or child, this doc doesn't contribute to the size of the fielddata cache
client().prepareIndex("test", "dummy", "dummy").setSource("{}").get();
refresh();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getIndicesStats().getIdCache().getMemorySizeInBytes(), greaterThan(idCacheSizeDefault));
assertThat(response.getIndicesStats().getFieldData().getMemorySizeInBytes(), greaterThan(fielddataSizeDefault));
}
private XContentBuilder childMapping(FieldMapper.Loading loading) throws IOException {

View File

@ -286,8 +286,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
@Test
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9270")
public void testClearIdCacheBug() throws Exception {
// enforce lazy loading to make sure that p/c stats are not counted as part of field data
public void testParentFieldDataCacheBug() throws Exception {
assertAcked(prepareCreate("test")
.setSettings(ImmutableSettings.builder().put(indexSettings())
.put("index.refresh_interval", -1)) // Disable automatic refresh, so that the _parent doesn't get warmed
@ -307,10 +306,10 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get();
refresh();
// No _parent field yet, there shouldn't be anything in the parent id cache
// No _parent field yet, there shouldn't be anything in the field data for _parent field
IndicesStatsResponse indicesStatsResponse = client().admin().indices()
.prepareStats("test").setIdCache(true).get();
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
.prepareStats("test").setFieldData(true).get();
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
// Now add mapping + children
client().admin().indices().preparePutMapping("test").setType("child")
@ -338,12 +337,9 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
refresh();
indicesStatsResponse = client().admin().indices()
.prepareStats("test").setFieldData(true).get();
// automatic warm-up has populated the cache since it found a parent field mapper
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
// Even though p/c is field data based the stats stay zero, because _parent field data field is kept
// track of under id cache stats memory wise for bwc
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
.prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
SearchResponse searchResponse = client().prepareSearch("test")
.setQuery(constantScoreQuery(hasChildQuery("child", termQuery("c_field", "blue"))))
@ -352,17 +348,17 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
indicesStatsResponse = client().admin().indices()
.prepareStats("test").setFieldData(true).get();
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
.prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), greaterThan(0l));
assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), greaterThan(0l));
ClearIndicesCacheResponse clearCacheResponse = client().admin().indices().prepareClearCache("test").setIdCache(true).get();
ClearIndicesCacheResponse clearCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).get();
assertNoFailures(clearCacheResponse);
assertAllSuccessful(clearCacheResponse);
indicesStatsResponse = client().admin().indices()
.prepareStats("test").setFieldData(true).get();
assertThat(indicesStatsResponse.getTotal().getIdCache().getMemorySizeInBytes(), equalTo(0l));
.prepareStats("test").setFieldData(true).setFieldDataFields("_parent").get();
assertThat(indicesStatsResponse.getTotal().getFieldData().getMemorySizeInBytes(), equalTo(0l));
assertThat(indicesStatsResponse.getTotal().getFieldData().getFields().get("_parent"), equalTo(0l));
}
@Test