Move analysis/mappings stats to cluster-stats. (#51875)

Closes #51138
This commit is contained in:
Adrien Grand 2020-02-05 11:02:25 +01:00 committed by GitHub
parent 4def3694ab
commit ad9d2f1922
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 1139 additions and 828 deletions

View File

@ -438,6 +438,19 @@ The API returns the following response:
"fixed_bit_set_memory_in_bytes": 0,
"max_unsafe_auto_id_timestamp" : -9223372036854775808,
"file_sizes": {}
},
"mappings": {
"field_types": []
},
"analysis": {
"char_filter_types": [],
"tokenizer_types": [],
"filter_types": [],
"analyzer_types": [],
"built_in_char_filters": [],
"built_in_tokenizers": [],
"built_in_filters": [],
"built_in_analyzers": []
}
},
"nodes": {
@ -554,6 +567,7 @@ The API returns the following response:
// TESTRESPONSE[s/"processor_stats": \{[^\}]*\}/"processor_stats": $body.$_path/]
// TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/]
// TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/]
// TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/]
// TESTRESPONSE[s/: true|false/: $body.$_path/]
// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/]
// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/]

View File

@ -91,10 +91,6 @@ Example response:
"available" : true,
"enabled" : true
},
"index" : {
"available" : true,
"enabled" : true
},
"logstash" : {
"available" : true,
"enabled" : true

View File

@ -0,0 +1,122 @@
---
"get cluster stats returns analysis stats":
- skip:
version: " - 7.6.99"
reason: "analysis stats are added for v7.7.0"
- do:
cluster.stats: {}
- length: { indices.analysis.char_filter_types: 0 }
- length: { indices.analysis.tokenizer_types: 0 }
- length: { indices.analysis.filter_types: 0 }
- length: { indices.analysis.analyzer_types: 0 }
- length: { indices.analysis.built_in_char_filters: 0 }
- length: { indices.analysis.built_in_tokenizers: 0 }
- length: { indices.analysis.built_in_filters: 0 }
- length: { indices.analysis.built_in_analyzers: 0 }
- do:
indices.create:
index: test-index1
body:
settings:
analysis:
char_filter:
c:
type: mapping
mappings: [ "a => b" ]
tokenizer:
tok:
type: pattern
pattern: ","
filter:
st:
type: stop
stopwords: [ "a" ]
st2:
type: stop
stopwords: [ "b" ]
analyzer:
en:
type: standard
stopwords: "_english_"
cust:
char_filter: [ "html_strip" ]
tokenizer: "keyword"
filter: [ "trim" ]
mappings:
properties:
message:
type: "text"
analyzer: french
search_analyzer: spanish
search_quote_analyzer: german
description:
type: "text"
analyzer: french
- do:
indices.create:
index: test-index2
body:
mappings:
properties:
message:
type: "text"
analyzer: spanish
- do:
cluster.stats: {}
- length: { indices.analysis.char_filter_types: 1 }
- match: { indices.analysis.char_filter_types.0.name: mapping }
- match: { indices.analysis.char_filter_types.0.count: 1 }
- match: { indices.analysis.char_filter_types.0.index_count: 1 }
- length: { indices.analysis.tokenizer_types: 1 }
- match: { indices.analysis.tokenizer_types.0.name: pattern }
- match: { indices.analysis.tokenizer_types.0.count: 1 }
- match: { indices.analysis.tokenizer_types.0.index_count: 1 }
- length: { indices.analysis.filter_types: 1 }
- match: { indices.analysis.filter_types.0.name: stop }
- match: { indices.analysis.filter_types.0.count: 2 }
- match: { indices.analysis.filter_types.0.index_count: 1 }
- length: { indices.analysis.analyzer_types: 2 }
- match: { indices.analysis.analyzer_types.0.name: custom }
- match: { indices.analysis.analyzer_types.0.count: 1 }
- match: { indices.analysis.analyzer_types.0.index_count: 1 }
- match: { indices.analysis.analyzer_types.1.name: standard }
- match: { indices.analysis.analyzer_types.1.count: 1 }
- match: { indices.analysis.analyzer_types.1.index_count: 1 }
- length: { indices.analysis.built_in_char_filters: 1 }
- match: { indices.analysis.built_in_char_filters.0.name: html_strip }
- match: { indices.analysis.built_in_char_filters.0.count: 1 }
- match: { indices.analysis.built_in_char_filters.0.index_count: 1 }
- length: { indices.analysis.built_in_tokenizers: 1 }
- match: { indices.analysis.built_in_tokenizers.0.name: keyword }
- match: { indices.analysis.built_in_tokenizers.0.count: 1 }
- match: { indices.analysis.built_in_tokenizers.0.index_count: 1 }
- length: { indices.analysis.built_in_filters: 1 }
- match: { indices.analysis.built_in_filters.0.name: trim }
- match: { indices.analysis.built_in_filters.0.count: 1 }
- match: { indices.analysis.built_in_filters.0.index_count: 1 }
- length: { indices.analysis.built_in_analyzers: 3 }
- match: { indices.analysis.built_in_analyzers.0.name: french }
- match: { indices.analysis.built_in_analyzers.0.count: 2 }
- match: { indices.analysis.built_in_analyzers.0.index_count: 1 }
- match: { indices.analysis.built_in_analyzers.1.name: german }
- match: { indices.analysis.built_in_analyzers.1.count: 1 }
- match: { indices.analysis.built_in_analyzers.1.index_count: 1 }
- match: { indices.analysis.built_in_analyzers.2.name: spanish }
- match: { indices.analysis.built_in_analyzers.2.count: 2 }
- match: { indices.analysis.built_in_analyzers.2.index_count: 2 }

View File

@ -89,3 +89,57 @@
cluster.stats: {}
- is_true: nodes.packaging_types
---
"get cluster stats returns mapping stats":
- skip:
version: " - 7.6.99"
reason: "mapping stats are added for v7.7.0"
- do:
cluster.stats: {}
- length: { indices.mappings.field_types: 0 }
- do:
indices.create:
index: test-index1
body:
mappings:
properties:
foo:
type: keyword
- do:
indices.create:
index: test-index2
body:
mappings:
properties:
foo:
type: keyword
bar:
properties:
quux:
type: integer
baz:
type: keyword
- do:
cluster.stats: {}
- length: { indices.mappings.field_types: 3 }
- match: { indices.mappings.field_types.0.name: integer }
- match: { indices.mappings.field_types.0.count: 1 }
- match: { indices.mappings.field_types.0.index_count: 1 }
- match: { indices.mappings.field_types.1.name: keyword }
- match: { indices.mappings.field_types.1.count: 3 }
- match: { indices.mappings.field_types.1.index_count: 2 }
- match: { indices.mappings.field_types.2.name: object }
- match: { indices.mappings.field_types.2.count: 1 }
- match: { indices.mappings.field_types.2.index_count: 1 }

View File

@ -0,0 +1,319 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
/**
* Statistics about analysis usage.
*/
public final class AnalysisStats implements ToXContentFragment, Writeable {
/**
* Create {@link AnalysisStats} from the given cluster state.
*/
public static AnalysisStats of(ClusterState state) {
final Map<String, IndexFeatureStats> usedCharFilterTypes = new HashMap<>();
final Map<String, IndexFeatureStats> usedTokenizerTypes = new HashMap<>();
final Map<String, IndexFeatureStats> usedTokenFilterTypes = new HashMap<>();
final Map<String, IndexFeatureStats> usedAnalyzerTypes = new HashMap<>();
final Map<String, IndexFeatureStats> usedBuiltInCharFilters = new HashMap<>();
final Map<String, IndexFeatureStats> usedBuiltInTokenizers = new HashMap<>();
final Map<String, IndexFeatureStats> usedBuiltInTokenFilters = new HashMap<>();
final Map<String, IndexFeatureStats> usedBuiltInAnalyzers = new HashMap<>();
for (IndexMetaData indexMetaData : state.metaData()) {
Set<String> indexAnalyzers = new HashSet<>();
MappingMetaData mappingMetaData = indexMetaData.mapping();
if (mappingMetaData != null) {
MappingVisitor.visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> {
for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) {
Object analyzerO = fieldMapping.get(key);
if (analyzerO != null) {
final String analyzer = analyzerO.toString();
IndexFeatureStats stats = usedBuiltInAnalyzers.computeIfAbsent(analyzer, IndexFeatureStats::new);
stats.count++;
if (indexAnalyzers.add(analyzer)) {
stats.indexCount++;
}
}
}
});
}
Set<String> indexCharFilters = new HashSet<>();
Set<String> indexTokenizers = new HashSet<>();
Set<String> indexTokenFilters = new HashSet<>();
Set<String> indexAnalyzerTypes = new HashSet<>();
Set<String> indexCharFilterTypes = new HashSet<>();
Set<String> indexTokenizerTypes = new HashSet<>();
Set<String> indexTokenFilterTypes = new HashSet<>();
Settings indexSettings = indexMetaData.getSettings();
Map<String, Settings> analyzerSettings = indexSettings.getGroups("index.analysis.analyzer");
usedBuiltInAnalyzers.keySet().removeAll(analyzerSettings.keySet());
for (Settings analyzerSetting : analyzerSettings.values()) {
final String analyzerType = analyzerSetting.get("type", "custom");
IndexFeatureStats stats = usedAnalyzerTypes.computeIfAbsent(analyzerType, IndexFeatureStats::new);
stats.count++;
if (indexAnalyzerTypes.add(analyzerType)) {
stats.indexCount++;
}
for (String charFilter : analyzerSetting.getAsList("char_filter")) {
stats = usedBuiltInCharFilters.computeIfAbsent(charFilter, IndexFeatureStats::new);
stats.count++;
if (indexCharFilters.add(charFilter)) {
stats.indexCount++;
}
}
String tokenizer = analyzerSetting.get("tokenizer");
if (tokenizer != null) {
stats = usedBuiltInTokenizers.computeIfAbsent(tokenizer, IndexFeatureStats::new);
stats.count++;
if (indexTokenizers.add(tokenizer)) {
stats.indexCount++;
}
}
for (String filter : analyzerSetting.getAsList("filter")) {
stats = usedBuiltInTokenFilters.computeIfAbsent(filter, IndexFeatureStats::new);
stats.count++;
if (indexTokenFilters.add(filter)) {
stats.indexCount++;
}
}
}
Map<String, Settings> charFilterSettings = indexSettings.getGroups("index.analysis.char_filter");
usedBuiltInCharFilters.keySet().removeAll(charFilterSettings.keySet());
aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilterTypes, indexCharFilterTypes);
Map<String, Settings> tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer");
usedBuiltInTokenizers.keySet().removeAll(tokenizerSettings.keySet());
aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizerTypes, indexTokenizerTypes);
Map<String, Settings> tokenFilterSettings = indexSettings.getGroups("index.analysis.filter");
usedBuiltInTokenFilters.keySet().removeAll(tokenFilterSettings.keySet());
aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilterTypes, indexTokenFilterTypes);
}
return new AnalysisStats(usedCharFilterTypes.values(), usedTokenizerTypes.values(), usedTokenFilterTypes.values(),
usedAnalyzerTypes.values(), usedBuiltInCharFilters.values(), usedBuiltInTokenizers.values(),
usedBuiltInTokenFilters.values(), usedBuiltInAnalyzers.values());
}
private static void aggregateAnalysisTypes(
Collection<Settings> settings,
Map<String, IndexFeatureStats> stats,
Set<String> indexTypes) {
for (Settings analysisComponentSettings : settings) {
final String type = analysisComponentSettings.get("type");
if (type != null) {
IndexFeatureStats s = stats.computeIfAbsent(type, IndexFeatureStats::new);
s.count++;
if (indexTypes.add(type)) {
s.indexCount++;
}
}
}
}
private static Set<IndexFeatureStats> sort(Collection<IndexFeatureStats> set) {
List<IndexFeatureStats> list = new ArrayList<>(set);
list.sort(Comparator.comparing(IndexFeatureStats::getName));
return Collections.unmodifiableSet(new LinkedHashSet<>(list));
}
private final Set<IndexFeatureStats> usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers;
private final Set<IndexFeatureStats> usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers;
AnalysisStats(
Collection<IndexFeatureStats> usedCharFilters,
Collection<IndexFeatureStats> usedTokenizers,
Collection<IndexFeatureStats> usedTokenFilters,
Collection<IndexFeatureStats> usedAnalyzers,
Collection<IndexFeatureStats> usedBuiltInCharFilters,
Collection<IndexFeatureStats> usedBuiltInTokenizers,
Collection<IndexFeatureStats> usedBuiltInTokenFilters,
Collection<IndexFeatureStats> usedBuiltInAnalyzers) {
this.usedCharFilters = sort(usedCharFilters);
this.usedTokenizers = sort(usedTokenizers);
this.usedTokenFilters = sort(usedTokenFilters);
this.usedAnalyzers = sort(usedAnalyzers);
this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters);
this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers);
this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters);
this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers);
}
public AnalysisStats(StreamInput input) throws IOException {
usedCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedBuiltInCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedBuiltInTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedBuiltInTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
usedBuiltInAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new)));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(usedCharFilters);
out.writeCollection(usedTokenizers);
out.writeCollection(usedTokenFilters);
out.writeCollection(usedAnalyzers);
out.writeCollection(usedBuiltInCharFilters);
out.writeCollection(usedBuiltInTokenizers);
out.writeCollection(usedBuiltInTokenFilters);
out.writeCollection(usedBuiltInAnalyzers);
}
/**
* Return the set of used char filters in the cluster.
*/
public Set<IndexFeatureStats> getUsedCharFilterTypes() {
return usedCharFilters;
}
/**
* Return the set of used tokenizers in the cluster.
*/
public Set<IndexFeatureStats> getUsedTokenizerTypes() {
return usedTokenizers;
}
/**
* Return the set of used token filters in the cluster.
*/
public Set<IndexFeatureStats> getUsedTokenFilterTypes() {
return usedTokenFilters;
}
/**
* Return the set of used analyzers in the cluster.
*/
public Set<IndexFeatureStats> getUsedAnalyzerTypes() {
return usedAnalyzers;
}
/**
* Return the set of used built-in char filters in the cluster.
*/
public Set<IndexFeatureStats> getUsedBuiltInCharFilters() {
return usedBuiltInCharFilters;
}
/**
* Return the set of used built-in tokenizers in the cluster.
*/
public Set<IndexFeatureStats> getUsedBuiltInTokenizers() {
return usedBuiltInTokenizers;
}
/**
* Return the set of used built-in token filters in the cluster.
*/
public Set<IndexFeatureStats> getUsedBuiltInTokenFilters() {
return usedBuiltInTokenFilters;
}
/**
* Return the set of used built-in analyzers in the cluster.
*/
public Set<IndexFeatureStats> getUsedBuiltInAnalyzers() {
return usedBuiltInAnalyzers;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AnalysisStats that = (AnalysisStats) o;
return Objects.equals(usedCharFilters, that.usedCharFilters) &&
Objects.equals(usedTokenizers, that.usedTokenizers) &&
Objects.equals(usedTokenFilters, that.usedTokenFilters) &&
Objects.equals(usedAnalyzers, that.usedAnalyzers) &&
Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) &&
Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) &&
Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) &&
Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers);
}
@Override
public int hashCode() {
return Objects.hash(usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers, usedBuiltInCharFilters,
usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers);
}
private void toXContentCollection(XContentBuilder builder, Params params, String name, Collection<? extends ToXContent> coll)
throws IOException {
builder.startArray(name);
for (ToXContent toXContent : coll) {
toXContent.toXContent(builder, params);
}
builder.endArray();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("analysis");
toXContentCollection(builder, params, "char_filter_types", usedCharFilters);
toXContentCollection(builder, params, "tokenizer_types", usedTokenizers);
toXContentCollection(builder, params, "filter_types", usedTokenFilters);
toXContentCollection(builder, params, "analyzer_types", usedAnalyzers);
toXContentCollection(builder, params, "built_in_char_filters", usedBuiltInCharFilters);
toXContentCollection(builder, params, "built_in_tokenizers", usedBuiltInTokenizers);
toXContentCollection(builder, params, "built_in_filters", usedBuiltInTokenFilters);
toXContentCollection(builder, params, "built_in_analyzers", usedBuiltInAnalyzers);
builder.endObject();
return builder;
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
}

View File

@ -45,8 +45,12 @@ public class ClusterStatsIndices implements ToXContentFragment {
private QueryCacheStats queryCache;
private CompletionStats completion;
private SegmentsStats segments;
private AnalysisStats analysis;
private MappingStats mappings;
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses) {
public ClusterStatsIndices(List<ClusterStatsNodeResponse> nodeResponses,
MappingStats mappingStats,
AnalysisStats analysisStats) {
ObjectObjectHashMap<String, ShardStats> countsPerIndex = new ObjectObjectHashMap<>();
this.docs = new DocsStats();
@ -85,6 +89,9 @@ public class ClusterStatsIndices implements ToXContentFragment {
for (ObjectObjectCursor<String, ShardStats> indexCountsCursor : countsPerIndex) {
shards.addIndexShardCount(indexCountsCursor.value);
}
this.mappings = mappingStats;
this.analysis = analysisStats;
}
public int getIndexCount() {
@ -119,6 +126,14 @@ public class ClusterStatsIndices implements ToXContentFragment {
return segments;
}
public MappingStats getMappings() {
return mappings;
}
public AnalysisStats getAnalysis() {
return analysis;
}
static final class Fields {
static final String COUNT = "count";
}
@ -133,6 +148,12 @@ public class ClusterStatsIndices implements ToXContentFragment {
queryCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
if (mappings != null) {
mappings.toXContent(builder, params);
}
if (analysis != null) {
analysis.toXContent(builder, params);
}
return builder;
}

View File

@ -19,9 +19,11 @@
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.Version;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
@ -36,29 +38,45 @@ import java.util.Locale;
public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResponse> implements ToXContentFragment {
ClusterStatsNodes nodesStats;
ClusterStatsIndices indicesStats;
ClusterHealthStatus status;
long timestamp;
String clusterUUID;
final ClusterStatsNodes nodesStats;
final ClusterStatsIndices indicesStats;
final ClusterHealthStatus status;
final long timestamp;
final String clusterUUID;
public ClusterStatsResponse(StreamInput in) throws IOException {
super(in);
timestamp = in.readVLong();
// it may be that the master switched on us while doing the operation. In this case the status may be null.
status = in.readOptionalWriteable(ClusterHealthStatus::readFrom);
String clusterUUID = null;
MappingStats mappingStats = null;
AnalysisStats analysisStats = null;
if (in.getVersion().onOrAfter(Version.V_7_7_0)) {
clusterUUID = in.readOptionalString();
mappingStats = in.readOptionalWriteable(MappingStats::new);
analysisStats = in.readOptionalWriteable(AnalysisStats::new);
}
this.clusterUUID = clusterUUID;
// built from nodes rather than from the stream directly
nodesStats = new ClusterStatsNodes(getNodes());
indicesStats = new ClusterStatsIndices(getNodes(), mappingStats, analysisStats);
}
public ClusterStatsResponse(long timestamp,
String clusterUUID,
ClusterName clusterName,
List<ClusterStatsNodeResponse> nodes,
List<FailedNodeException> failures) {
List<FailedNodeException> failures,
ClusterState state) {
super(clusterName, nodes, failures);
this.clusterUUID = clusterUUID;
this.timestamp = timestamp;
nodesStats = new ClusterStatsNodes(nodes);
indicesStats = new ClusterStatsIndices(nodes);
indicesStats = new ClusterStatsIndices(nodes, MappingStats.of(state), AnalysisStats.of(state));
ClusterHealthStatus status = null;
for (ClusterStatsNodeResponse response : nodes) {
// only the master node populates the status
if (response.clusterStatus() != null) {
@ -66,6 +84,7 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
break;
}
}
this.status = status;
}
public String getClusterUUID() {
@ -93,17 +112,16 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
super.writeTo(out);
out.writeVLong(timestamp);
out.writeOptionalWriteable(status);
if (out.getVersion().onOrAfter(Version.V_7_7_0)) {
out.writeOptionalString(clusterUUID);
out.writeOptionalWriteable(indicesStats.getMappings());
out.writeOptionalWriteable(indicesStats.getAnalysis());
}
}
@Override
protected List<ClusterStatsNodeResponse> readNodesFrom(StreamInput in) throws IOException {
List<ClusterStatsNodeResponse> nodes = in.readList(ClusterStatsNodeResponse::readNodeResponse);
// built from nodes rather than from the stream directly
nodesStats = new ClusterStatsNodes(nodes);
indicesStats = new ClusterStatsIndices(nodes);
return nodes;
return in.readList(ClusterStatsNodeResponse::readNodeResponse);
}
@Override

View File

@ -0,0 +1,102 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Objects;
/**
* Statistics about an index feature.
*/
public final class IndexFeatureStats implements ToXContent, Writeable {
final String name;
int count;
int indexCount;
IndexFeatureStats(String name) {
this.name = Objects.requireNonNull(name);
}
IndexFeatureStats(StreamInput in) throws IOException {
this.name = in.readString();
this.count = in.readVInt();
this.indexCount = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeVInt(count);
out.writeVInt(indexCount);
}
/**
* Return the name of the field type.
*/
public String getName() {
return name;
}
/**
* Return the number of times this feature is used across the cluster.
*/
public int getCount() {
return count;
}
/**
* Return the number of indices that use this feature across the cluster.
*/
public int getIndexCount() {
return indexCount;
}
@Override
public boolean equals(Object other) {
if (other instanceof IndexFeatureStats == false) {
return false;
}
IndexFeatureStats that = (IndexFeatureStats) other;
return name.equals(that.name) && count == that.count && indexCount == that.indexCount;
}
@Override
public int hashCode() {
return Objects.hash(name, count, indexCount);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("name", name);
builder.field("count", count);
builder.field("index_count", indexCount);
builder.endObject();
return builder;
}
}

View File

@ -0,0 +1,134 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
* Usage statistics about mappings usage.
*/
public final class MappingStats implements ToXContentFragment, Writeable {
/**
* Create {@link MappingStats} from the given cluster state.
*/
public static MappingStats of(ClusterState state) {
Map<String, IndexFeatureStats> fieldTypes = new HashMap<>();
for (IndexMetaData indexMetaData : state.metaData()) {
Set<String> indexFieldTypes = new HashSet<>();
MappingMetaData mappingMetaData = indexMetaData.mapping();
if (mappingMetaData != null) {
MappingVisitor.visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> {
String type = null;
Object typeO = fieldMapping.get("type");
if (typeO != null) {
type = typeO.toString();
} else if (fieldMapping.containsKey("properties")) {
type = "object";
}
if (type != null) {
IndexFeatureStats stats = fieldTypes.computeIfAbsent(type, IndexFeatureStats::new);
stats.count++;
if (indexFieldTypes.add(type)) {
stats.indexCount++;
}
}
});
}
}
return new MappingStats(fieldTypes.values());
}
private final Set<IndexFeatureStats> fieldTypeStats;
MappingStats(Collection<IndexFeatureStats> fieldTypeStats) {
List<IndexFeatureStats> stats = new ArrayList<>(fieldTypeStats);
stats.sort(Comparator.comparing(IndexFeatureStats::getName));
this.fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet<IndexFeatureStats>(stats));
}
MappingStats(StreamInput in) throws IOException {
fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet<>(in.readList(IndexFeatureStats::new)));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeCollection(fieldTypeStats);
}
/**
* Return stats about field types.
*/
public Set<IndexFeatureStats> getFieldTypeStats() {
return fieldTypeStats;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("mappings");
builder.startArray("field_types");
for (IndexFeatureStats st : fieldTypeStats) {
st.toXContent(builder, params);
}
builder.endArray();
builder.endObject();
return builder;
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
@Override
public boolean equals(Object o) {
if (o instanceof MappingStats == false) {
return false;
}
MappingStats that = (MappingStats) o;
return fieldTypeStats.equals(that.fieldTypeStats);
}
@Override
public int hashCode() {
return fieldTypeStats.hashCode();
}
}

View File

@ -0,0 +1,60 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import java.util.Map;
import java.util.function.Consumer;
final class MappingVisitor {
private MappingVisitor() {}
static void visitMapping(Map<String, ?> mapping, Consumer<Map<String, ?>> fieldMappingConsumer) {
Object properties = mapping.get("properties");
if (properties != null && properties instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> propertiesAsMap = (Map<String, ?>) properties;
for (Object v : propertiesAsMap.values()) {
if (v != null && v instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fieldMapping = (Map<String, ?>) v;
fieldMappingConsumer.accept(fieldMapping);
visitMapping(fieldMapping, fieldMappingConsumer);
// Multi fields
Object fieldsO = fieldMapping.get("fields");
if (fieldsO != null && fieldsO instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fields = (Map<String, ?>) fieldsO;
for (Object v2 : fields.values()) {
if (v2 instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fieldMapping2 = (Map<String, ?>) v2;
fieldMappingConsumer.accept(fieldMapping2);
}
}
}
}
}
}
}
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.service.ClusterService;
@ -72,12 +73,14 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
@Override
protected ClusterStatsResponse newResponse(ClusterStatsRequest request,
List<ClusterStatsNodeResponse> responses, List<FailedNodeException> failures) {
ClusterState state = clusterService.state();
return new ClusterStatsResponse(
System.currentTimeMillis(),
clusterService.state().metaData().clusterUUID(),
state.metaData().clusterUUID(),
clusterService.getClusterName(),
responses,
failures);
failures,
state);
}
@Override

View File

@ -0,0 +1,170 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
public class AnalysisStatsTests extends AbstractWireSerializingTestCase<AnalysisStats> {
@Override
protected Reader<AnalysisStats> instanceReader() {
return AnalysisStats::new;
}
private static IndexFeatureStats randomStats(String name) {
IndexFeatureStats stats = new IndexFeatureStats(name);
stats.indexCount = randomIntBetween(1, 5);
stats.count = randomIntBetween(stats.indexCount, 10);
return stats;
}
@Override
protected AnalysisStats createTestInstance() {
Set<IndexFeatureStats> charFilters = new HashSet<>();
if (randomBoolean()) {
charFilters.add(randomStats("pattern_replace"));
}
Set<IndexFeatureStats> tokenizers = new HashSet<>();
if (randomBoolean()) {
tokenizers.add(randomStats("whitespace"));
}
Set<IndexFeatureStats> tokenFilters = new HashSet<>();
if (randomBoolean()) {
tokenFilters.add(randomStats("stop"));
}
Set<IndexFeatureStats> analyzers = new HashSet<>();
if (randomBoolean()) {
tokenFilters.add(randomStats("english"));
}
Set<IndexFeatureStats> builtInCharFilters = new HashSet<>();
if (randomBoolean()) {
builtInCharFilters.add(randomStats("html_strip"));
}
Set<IndexFeatureStats> builtInTokenizers = new HashSet<>();
if (randomBoolean()) {
builtInTokenizers.add(randomStats("keyword"));
}
Set<IndexFeatureStats> builtInTokenFilters = new HashSet<>();
if (randomBoolean()) {
builtInTokenFilters.add(randomStats("trim"));
}
Set<IndexFeatureStats> builtInAnalyzers = new HashSet<>();
if (randomBoolean()) {
builtInAnalyzers.add(randomStats("french"));
}
return new AnalysisStats(charFilters, tokenizers, tokenFilters, analyzers,
builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers);
}
@Override
protected AnalysisStats mutateInstance(AnalysisStats instance) throws IOException {
switch (randomInt(7)) {
case 0:
Set<IndexFeatureStats> charFilters = new HashSet<>(instance.getUsedCharFilterTypes());
if (charFilters.removeIf(s -> s.getName().equals("pattern_replace")) == false) {
charFilters.add(randomStats("pattern_replace"));
}
return new AnalysisStats(charFilters, instance.getUsedTokenizerTypes(),
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 1:
Set<IndexFeatureStats> tokenizers = new HashSet<>(instance.getUsedTokenizerTypes());
if (tokenizers.removeIf(s -> s.getName().equals("whitespace")) == false) {
tokenizers.add(randomStats("whitespace"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(), tokenizers,
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 2:
Set<IndexFeatureStats> tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes());
if (tokenFilters.removeIf(s -> s.getName().equals("stop")) == false) {
tokenFilters.add(randomStats("stop"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(),
tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 3:
Set<IndexFeatureStats> analyzers = new HashSet<>(instance.getUsedAnalyzerTypes());
if (analyzers.removeIf(s -> s.getName().equals("english")) == false) {
analyzers.add(randomStats("english"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers,
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 4:
Set<IndexFeatureStats> builtInCharFilters = new HashSet<>(instance.getUsedBuiltInCharFilters());
if (builtInCharFilters.removeIf(s -> s.getName().equals("html_strip")) == false) {
builtInCharFilters.add(randomStats("html_strip"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(),
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters,
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 5:
Set<IndexFeatureStats> builtInTokenizers = new HashSet<>(instance.getUsedBuiltInTokenizers());
if (builtInTokenizers.removeIf(s -> s.getName().equals("keyword")) == false) {
builtInTokenizers.add(randomStats("keyword"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 6:
Set<IndexFeatureStats> builtInTokenFilters = new HashSet<>(instance.getUsedBuiltInTokenFilters());
if (builtInTokenFilters.removeIf(s -> s.getName().equals("trim")) == false) {
builtInTokenFilters.add(randomStats("trim"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters,
instance.getUsedBuiltInAnalyzers());
case 7:
Set<IndexFeatureStats> builtInAnalyzers = new HashSet<>(instance.getUsedBuiltInAnalyzers());
if (builtInAnalyzers.removeIf(s -> s.getName().equals("french")) == false) {
builtInAnalyzers.add(randomStats("french"));
}
return new AnalysisStats(instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
builtInAnalyzers);
default:
throw new AssertionError();
}
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
public class MappingStatsTests extends AbstractWireSerializingTestCase<MappingStats> {
@Override
protected Reader<MappingStats> instanceReader() {
return MappingStats::new;
}
@Override
protected MappingStats createTestInstance() {
Collection<IndexFeatureStats> stats = new ArrayList<>();
if (randomBoolean()) {
IndexFeatureStats s = new IndexFeatureStats("keyword");
s.count = 10;
s.indexCount = 7;
stats.add(s);
}
if (randomBoolean()) {
IndexFeatureStats s = new IndexFeatureStats("integer");
s.count = 3;
s.indexCount = 3;
stats.add(s);
}
return new MappingStats(stats);
}
@Override
protected MappingStats mutateInstance(MappingStats instance) throws IOException {
List<IndexFeatureStats> fieldTypes = new ArrayList<>(instance.getFieldTypeStats());
boolean remove = fieldTypes.size() > 0 && randomBoolean();
if (remove) {
fieldTypes.remove(randomInt(fieldTypes.size() - 1));
}
if (remove == false || randomBoolean()) {
IndexFeatureStats s = new IndexFeatureStats("float");
s.count = 13;
s.indexCount = 2;
fieldTypes.add(s);
}
return new MappingStats(fieldTypes);
}
}

View File

@ -1,10 +1,23 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.xpack.oss;
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.test.ESTestCase;
@ -15,10 +28,10 @@ import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class IndexFeatureSetTests extends ESTestCase {
public class MappingVisitorTests extends ESTestCase {
private static void collectTypes(Map<String, ?> mapping, Set<String> types) {
IndexFeatureSet.visitMapping(mapping,
MappingVisitor.visitMapping(mapping,
m -> {
if (m.containsKey("type")) {
types.add(m.get("type").toString());
@ -102,5 +115,4 @@ public class IndexFeatureSetTests extends ESTestCase {
collectTypes(mapping, fields);
assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields);
}
}

View File

@ -252,7 +252,6 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction;
import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction;
import org.elasticsearch.xpack.oss.IndexFeatureSetUsage;
import java.util.ArrayList;
import java.util.Arrays;
@ -611,8 +610,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
// analytics
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new),
// Enrich
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new),
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX, IndexFeatureSetUsage::new)
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new)
).stream(),
MlEvaluationNamedXContentProvider.getNamedWriteables().stream()
).collect(toList());

View File

@ -53,8 +53,6 @@ public final class XPackField {
public static final String ANALYTICS = "analytics";
/** Name constant for the enrich plugin. */
public static final String ENRICH = "enrich";
/** Name constant for indices. */
public static final String INDEX = "index";
private XPackField() {}

View File

@ -68,7 +68,6 @@ import org.elasticsearch.xpack.core.security.authc.TokenMetaData;
import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader;
import org.elasticsearch.xpack.core.ssl.SSLService;
import org.elasticsearch.xpack.core.watcher.WatcherMetaData;
import org.elasticsearch.xpack.oss.IndexFeatureSet;
import java.nio.file.Files;
import java.nio.file.Path;
@ -246,8 +245,6 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin,
if (transportClientMode) {
modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null)));
} else {
modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexFeatureSet.class));
}
return modules;
}

View File

@ -1,160 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.oss;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.core.XPackFeatureSet;
import org.elasticsearch.xpack.core.XPackField;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.function.Consumer;
public class IndexFeatureSet implements XPackFeatureSet {
private final ClusterService clusterService;
@Inject
public IndexFeatureSet(ClusterService clusterService) {
this.clusterService = clusterService;
}
@Override
public String name() {
return XPackField.INDEX;
}
@Override
public boolean available() {
return true;
}
@Override
public boolean enabled() {
return true;
}
@Override
public Map<String, Object> nativeCodeInfo() {
return null;
}
@Override
public void usage(ActionListener<Usage> listener) {
final Set<String> usedFieldTypes = new HashSet<>();
final Set<String> usedCharFilters = new HashSet<>();
final Set<String> usedTokenizers = new HashSet<>();
final Set<String> usedTokenFilters = new HashSet<>();
final Set<String> usedAnalyzers = new HashSet<>();
final Set<String> usedBuiltInCharFilters = new HashSet<>();
final Set<String> usedBuiltInTokenizers = new HashSet<>();
final Set<String> usedBuiltInTokenFilters = new HashSet<>();
final Set<String> usedBuiltInAnalyzers = new HashSet<>();
ClusterState state = clusterService.state();
if (state != null) {
for (IndexMetaData indexMetaData : state.metaData()) {
MappingMetaData mappingMetaData = indexMetaData.mapping();
if (mappingMetaData != null) {
visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> {
Object type = fieldMapping.get("type");
if (type != null) {
usedFieldTypes.add(type.toString());
} else if (fieldMapping.containsKey("properties")) {
usedFieldTypes.add("object");
}
for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) {
Object analyzer = fieldMapping.get(key);
if (analyzer != null) {
usedBuiltInAnalyzers.add(analyzer.toString());
}
}
});
}
Settings indexSettings = indexMetaData.getSettings();
Map<String, Settings> analyzerSettings = indexSettings.getGroups("index.analysis.analyzer");
usedBuiltInAnalyzers.removeAll(analyzerSettings.keySet());
for (Settings analyzerSetting : analyzerSettings.values()) {
usedAnalyzers.add(analyzerSetting.get("type", "custom"));
usedBuiltInCharFilters.addAll(analyzerSetting.getAsList("char_filter"));
String tokenizer = analyzerSetting.get("tokenizer");
if (tokenizer != null) {
usedBuiltInTokenizers.add(tokenizer);
}
usedBuiltInTokenFilters.addAll(analyzerSetting.getAsList("filter"));
}
Map<String, Settings> charFilterSettings = indexSettings.getGroups("index.analysis.char_filter");
usedBuiltInCharFilters.removeAll(charFilterSettings.keySet());
aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilters);
Map<String, Settings> tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer");
usedBuiltInTokenizers.removeAll(tokenizerSettings.keySet());
aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizers);
Map<String, Settings> tokenFilterSettings = indexSettings.getGroups("index.analysis.filter");
usedBuiltInTokenFilters.removeAll(tokenFilterSettings.keySet());
aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilters);
}
}
listener.onResponse(new IndexFeatureSetUsage(usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters,
usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers));
}
static void visitMapping(Map<String, ?> mapping, Consumer<Map<String, ?>> fieldMappingConsumer) {
Object properties = mapping.get("properties");
if (properties != null && properties instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> propertiesAsMap = (Map<String, ?>) properties;
for (Object v : propertiesAsMap.values()) {
if (v != null && v instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fieldMapping = (Map<String, ?>) v;
fieldMappingConsumer.accept(fieldMapping);
visitMapping(fieldMapping, fieldMappingConsumer);
// Multi fields
Object fieldsO = fieldMapping.get("fields");
if (fieldsO != null && fieldsO instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fields = (Map<String, ?>) fieldsO;
for (Object v2 : fields.values()) {
if (v2 instanceof Map) {
@SuppressWarnings("unchecked")
Map<String, ?> fieldMapping2 = (Map<String, ?>) v2;
fieldMappingConsumer.accept(fieldMapping2);
}
}
}
}
}
}
}
static void aggregateAnalysisTypes(Collection<Settings> analysisComponents, Set<String> usedTypes) {
for (Settings settings : analysisComponents) {
String type = settings.get("type");
if (type != null) {
usedTypes.add(type);
}
}
}
}

View File

@ -1,191 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.oss;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.xpack.core.XPackFeatureSet;
import org.elasticsearch.xpack.core.XPackField;
import java.io.IOException;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
public class IndexFeatureSetUsage extends XPackFeatureSet.Usage {
private static Set<String> sort(Set<String> set) {
return Collections.unmodifiableSet(new TreeSet<>(set));
}
private final Set<String> usedFieldTypes;
private final Set<String> usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers;
private final Set<String> usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers;
public IndexFeatureSetUsage(Set<String> usedFieldTypes,
Set<String> usedCharFilters, Set<String> usedTokenizers, Set<String> usedTokenFilters, Set<String> usedAnalyzers,
Set<String> usedBuiltInCharFilters, Set<String> usedBuiltInTokenizers, Set<String> usedBuiltInTokenFilters,
Set<String> usedBuiltInAnalyzers) {
super(XPackField.INDEX, true, true);
this.usedFieldTypes = sort(usedFieldTypes);
this.usedCharFilters = sort(usedCharFilters);
this.usedTokenizers = sort(usedTokenizers);
this.usedTokenFilters = sort(usedTokenFilters);
this.usedAnalyzers = sort(usedAnalyzers);
this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters);
this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers);
this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters);
this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers);
}
public IndexFeatureSetUsage(StreamInput input) throws IOException {
super(input);
usedFieldTypes = input.readSet(StreamInput::readString);
usedCharFilters = input.readSet(StreamInput::readString);
usedTokenizers = input.readSet(StreamInput::readString);
usedTokenFilters = input.readSet(StreamInput::readString);
usedAnalyzers = input.readSet(StreamInput::readString);
usedBuiltInCharFilters = input.readSet(StreamInput::readString);
usedBuiltInTokenizers = input.readSet(StreamInput::readString);
usedBuiltInTokenFilters = input.readSet(StreamInput::readString);
usedBuiltInAnalyzers = input.readSet(StreamInput::readString);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeCollection(usedFieldTypes, StreamOutput::writeString);
out.writeCollection(usedCharFilters, StreamOutput::writeString);
out.writeCollection(usedTokenizers, StreamOutput::writeString);
out.writeCollection(usedTokenFilters, StreamOutput::writeString);
out.writeCollection(usedAnalyzers, StreamOutput::writeString);
out.writeCollection(usedBuiltInCharFilters, StreamOutput::writeString);
out.writeCollection(usedBuiltInTokenizers, StreamOutput::writeString);
out.writeCollection(usedBuiltInTokenFilters, StreamOutput::writeString);
out.writeCollection(usedBuiltInAnalyzers, StreamOutput::writeString);
}
/**
* Return the set of used field types in the cluster.
*/
public Set<String> getUsedFieldTypes() {
return usedFieldTypes;
}
/**
* Return the set of used char filters in the cluster.
*/
public Set<String> getUsedCharFilterTypes() {
return usedCharFilters;
}
/**
* Return the set of used tokenizers in the cluster.
*/
public Set<String> getUsedTokenizerTypes() {
return usedTokenizers;
}
/**
* Return the set of used token filters in the cluster.
*/
public Set<String> getUsedTokenFilterTypes() {
return usedTokenFilters;
}
/**
* Return the set of used analyzers in the cluster.
*/
public Set<String> getUsedAnalyzerTypes() {
return usedAnalyzers;
}
/**
* Return the set of used built-in char filters in the cluster.
*/
public Set<String> getUsedBuiltInCharFilters() {
return usedBuiltInCharFilters;
}
/**
* Return the set of used built-in tokenizers in the cluster.
*/
public Set<String> getUsedBuiltInTokenizers() {
return usedBuiltInTokenizers;
}
/**
* Return the set of used built-in token filters in the cluster.
*/
public Set<String> getUsedBuiltInTokenFilters() {
return usedBuiltInTokenFilters;
}
/**
* Return the set of used built-in analyzers in the cluster.
*/
public Set<String> getUsedBuiltInAnalyzers() {
return usedBuiltInAnalyzers;
}
@Override
protected void innerXContent(XContentBuilder builder, Params params) throws IOException {
super.innerXContent(builder, params);
builder.startObject("analysis");
{
builder.field("char_filter_types", usedCharFilters);
builder.field("tokenizer_types", usedTokenizers);
builder.field("filter_types", usedTokenFilters);
builder.field("analyzer_types", usedAnalyzers);
builder.field("built_in_char_filters", usedBuiltInCharFilters);
builder.field("built_in_tokenizers", usedBuiltInTokenizers);
builder.field("built_in_filters", usedBuiltInTokenFilters);
builder.field("built_in_analyzers", usedBuiltInAnalyzers);
}
builder.endObject();
builder.startObject("mappings");
{
builder.field("field_types", usedFieldTypes);
}
builder.endObject();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
IndexFeatureSetUsage that = (IndexFeatureSetUsage) o;
return available == that.available && enabled == that.enabled &&
Objects.equals(usedFieldTypes, that.usedFieldTypes) &&
Objects.equals(usedCharFilters, that.usedCharFilters) &&
Objects.equals(usedTokenizers, that.usedTokenizers) &&
Objects.equals(usedTokenFilters, that.usedTokenFilters) &&
Objects.equals(usedAnalyzers, that.usedAnalyzers) &&
Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) &&
Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) &&
Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) &&
Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers);
}
@Override
public int hashCode() {
return Objects.hash(available, enabled, usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters,
usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters,
usedBuiltInAnalyzers);
}
@Override
public String toString() {
return Strings.toString(this, true, true);
}
}

View File

@ -1,10 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/**
* Package containing usage information for features that are exposed in OSS.
*/
package org.elasticsearch.xpack.oss;

View File

@ -1,168 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.oss;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.test.AbstractWireSerializingTestCase;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
public class IndexFeatureSetUsageTests extends AbstractWireSerializingTestCase<IndexFeatureSetUsage> {
@Override
protected Reader<IndexFeatureSetUsage> instanceReader() {
return IndexFeatureSetUsage::new;
}
@Override
protected IndexFeatureSetUsage createTestInstance() {
Set<String> fields = new HashSet<>();
if (randomBoolean()) {
fields.add("keyword");
}
if (randomBoolean()) {
fields.add("integer");
}
Set<String> charFilters = new HashSet<>();
if (randomBoolean()) {
charFilters.add("pattern_replace");
}
Set<String> tokenizers = new HashSet<>();
if (randomBoolean()) {
tokenizers.add("whitespace");
}
Set<String> tokenFilters = new HashSet<>();
if (randomBoolean()) {
tokenFilters.add("stop");
}
Set<String> analyzers = new HashSet<>();
if (randomBoolean()) {
tokenFilters.add("english");
}
Set<String> builtInCharFilters = new HashSet<>();
if (randomBoolean()) {
builtInCharFilters.add("html_strip");
}
Set<String> builtInTokenizers = new HashSet<>();
if (randomBoolean()) {
builtInTokenizers.add("keyword");
}
Set<String> builtInTokenFilters = new HashSet<>();
if (randomBoolean()) {
builtInTokenFilters.add("trim");
}
Set<String> builtInAnalyzers = new HashSet<>();
if (randomBoolean()) {
builtInAnalyzers.add("french");
}
return new IndexFeatureSetUsage(fields,
charFilters, tokenizers, tokenFilters, analyzers,
builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers);
}
@Override
protected IndexFeatureSetUsage mutateInstance(IndexFeatureSetUsage instance) throws IOException {
switch (randomInt(8)) {
case 0:
Set<String> fields = new HashSet<>(instance.getUsedFieldTypes());
if (fields.add("keyword") == false) {
fields.remove("keyword");
}
return new IndexFeatureSetUsage(fields, instance.getUsedCharFilterTypes(), instance.getUsedTokenizerTypes(),
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 1:
Set<String> charFilters = new HashSet<>(instance.getUsedCharFilterTypes());
if (charFilters.add("pattern_replace") == false) {
charFilters.remove("pattern_replace");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), charFilters, instance.getUsedTokenizerTypes(),
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 2:
Set<String> tokenizers = new HashSet<>(instance.getUsedTokenizerTypes());
if (tokenizers.add("whitespace") == false) {
tokenizers.remove("whitespace");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), tokenizers,
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 3:
Set<String> tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes());
if (tokenFilters.add("stop") == false) {
tokenFilters.remove("stop");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(),
tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 4:
Set<String> analyzers = new HashSet<>(instance.getUsedAnalyzerTypes());
if (analyzers.add("english") == false) {
analyzers.remove("english");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers,
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 5:
Set<String> builtInCharFilters = new HashSet<>(instance.getUsedBuiltInCharFilters());
if (builtInCharFilters.add("html_strip") == false) {
builtInCharFilters.remove("html_strip");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(),
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters,
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 6:
Set<String> builtInTokenizers = new HashSet<>(instance.getUsedBuiltInTokenizers());
if (builtInTokenizers.add("keyword") == false) {
builtInTokenizers.remove("keyword");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(),
instance.getUsedBuiltInAnalyzers());
case 7:
Set<String> builtInTokenFilters = new HashSet<>(instance.getUsedBuiltInTokenFilters());
if (builtInTokenFilters.add("trim") == false) {
builtInTokenFilters.remove("trim");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters,
instance.getUsedBuiltInAnalyzers());
case 8:
Set<String> builtInAnalyzers = new HashSet<>(instance.getUsedBuiltInAnalyzers());
if (builtInAnalyzers.add("french") == false) {
builtInAnalyzers.remove("french");
}
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
builtInAnalyzers);
default:
throw new AssertionError();
}
}
}

View File

@ -323,7 +323,8 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
"_cluster",
clusterName,
singletonList(mockNodeResponse),
emptyList());
emptyList(),
clusterState);
final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", 1504169190855L);
@ -437,6 +438,19 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
+ "\"fixed_bit_set_memory_in_bytes\":0,"
+ "\"max_unsafe_auto_id_timestamp\":-9223372036854775808,"
+ "\"file_sizes\":{}"
+ "},"
+ "\"mappings\":{"
+ "\"field_types\":[]"
+ "},"
+ "\"analysis\":{"
+ "\"char_filter_types\":[],"
+ "\"tokenizer_types\":[],"
+ "\"filter_types\":[],"
+ "\"analyzer_types\":[],"
+ "\"built_in_char_filters\":[],"
+ "\"built_in_tokenizers\":[],"
+ "\"built_in_filters\":[],"
+ "\"built_in_analyzers\":[]"
+ "}"
+ "},"
+ "\"nodes\":{"

View File

@ -1,262 +0,0 @@
# Integration tests xpack info and usage API
#
"X-Pack Info and Usage":
- do:
cluster.health:
wait_for_status: yellow
- do:
license.delete: {}
- match: { acknowledged: true }
# we don't have a license now
- do:
xpack.info:
categories: "license,features"
- is_false: license
- is_true: features
- is_true: features.watcher
- is_true: features.watcher.enabled
# - is_false: features.watcher.available TODO fix once licensing is fixed
- is_true: features.security
- is_true: features.security.enabled
# - is_false: features.security.available TODO fix once licensing is fixed
- is_true: features.graph
- is_true: features.graph.enabled
# - is_false: features.graph.available TODO fix once licensing is fixed
- is_true: features.monitoring
- is_true: features.monitoring.enabled
# - is_false: features.monitoring.available TODO fix once licensing is fixed
- is_true: features.analytics
- is_true: features.analytics.enabled
- do:
license.post:
body: >
{
"license": {
"uid": "893361dc-9749-4997-93cb-802e3dofh7aa",
"type": "internal",
"subscription_type": "none",
"issue_date_in_millis": 1443484800000,
"feature": "watcher",
"expiry_date_in_millis": 1914278399999,
"max_nodes": 1,
"issued_to": "issuedTo",
"issuer": "issuer",
"signature": "AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6"
}
}
- match: { license_status: "valid" }
- do:
license.get: {}
- match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" }
- match: { license.type: "internal" }
- match: { license.status: "active" }
- do:
xpack.info: {}
- is_true: build.hash
- is_true: build.date
- is_true: license
- match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" }
- match: { license.type: "internal" }
- match: { license.mode: "trial" }
- match: { license.status: "active" }
- match: { license.expiry_date_in_millis: 1914278399999 }
- is_true: features
- is_true: features.watcher
- is_true: features.watcher.enabled
- is_true: features.watcher.available
- is_true: features.security
- is_true: features.security.enabled
- is_true: features.security.available
- is_true: features.graph
- is_true: features.graph.enabled
- is_true: features.graph.available
- is_true: features.monitoring
- is_true: features.monitoring.enabled
- is_true: features.monitoring.available
- is_true: features.analytics.enabled
- is_true: features.analytics.available
- is_true: features.enrich.available
- is_true: features.enrich.enabled
- is_true: tagline
- do:
xpack.usage: {}
- is_true: watcher.enabled
- is_true: watcher.available
- is_true: security.enabled
- is_true: security.available
- is_true: graph.enabled
- is_true: graph.available
- is_true: monitoring.enabled
- is_true: monitoring.available
- is_true: analytics.available
- do:
xpack.info:
categories: "_none"
- is_false: build
- is_false: features
- is_false: license
- match: { tagline: "You know, for X" }
- do:
xpack.info:
categories: "_none"
human: false
- is_false: build
- is_false: features
- is_false: license
- is_false: tagline
- do:
xpack.info:
categories: "build"
- is_true: build
- is_true: build.hash
- is_true: build.date
- is_true: tagline
- is_false: features
- is_false: license
- do:
xpack.info:
categories: "build,license"
- is_true: build.hash
- is_true: build.date
- is_true: tagline
- is_false: features
- is_true: license
- match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" }
- match: { license.type: "internal" }
- match: { license.mode: "trial" }
- match: { license.status: "active" }
- match: { license.expiry_date_in_millis: 1914278399999 }
- do:
xpack.info:
categories: "build,license,features"
human: false
- is_true: build.hash
- is_true: build.date
- is_true: license
- match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" }
- match: { license.type: "internal" }
- match: { license.mode: "trial" }
- match: { license.status: "active" }
- match: { license.expiry_date_in_millis: 1914278399999 }
- is_true: features
- is_true: features.watcher
- is_true: features.watcher.enabled
- is_true: features.watcher.available
- is_true: features.security
- is_true: features.security.enabled
- is_true: features.security.available
- is_true: features.graph
- is_true: features.graph.enabled
- is_true: features.graph.available
- is_true: features.monitoring
- is_true: features.monitoring.enabled
- is_true: features.monitoring.available
- is_false: tagline
---
"Usage stats for mappings":
- skip:
version: "all"
reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/51127"
- do:
xpack.usage: {}
- match: { index.mappings.field_types: [] }
- do:
indices.create:
index: test-index1
body:
mappings:
properties:
foo:
type: keyword
- do:
indices.create:
index: test-index2
body:
mappings:
properties:
foo:
type: keyword
bar:
properties:
quux:
type: integer
- do:
xpack.usage: {}
- match: { index.mappings.field_types: [ "integer", "keyword", "object" ] }
---
"Usage stats for analysis":
- do:
xpack.usage: {}
- match: { index.analysis.char_filter_types: [] }
- match: { index.analysis.tokenizer_types: [] }
- match: { index.analysis.filter_types: [] }
- match: { index.analysis.analyzer_types: [] }
- do:
indices.create:
index: test-index1
body:
settings:
analysis:
char_filter:
c:
type: mapping
mappings: [ "a => b" ]
tokenizer:
tok:
type: pattern
pattern: ","
filter:
st:
type: stop
stopwords: [ "a" ]
analyzer:
en:
type: standard
stopwords: "_english_"
cust:
char_filter: [ "html_strip" ]
tokenizer: "keyword"
filter: [ "trim" ]
mappings:
properties:
message:
type: "text"
analyzer: french
search_analyzer: spanish
search_quote_analyzer: german
- do:
xpack.usage: {}
- match: { index.analysis.char_filter_types: [ "mapping" ] }
- match: { index.analysis.tokenizer_types: [ "pattern" ] }
- match: { index.analysis.filter_types: [ "stop" ] }
- match: { index.analysis.analyzer_types: [ "custom", "standard" ] }
- match: { index.analysis.built_in_char_filters: [ "html_strip" ] }
- match: { index.analysis.built_in_tokenizers: [ "keyword" ] }
- match: { index.analysis.built_in_filters: [ "trim" ] }
- match: { index.analysis.built_in_analyzers: [ "french", "german", "spanish" ] }