From 45d7bdcfd7847d47eb5be178c887d9d5dc09bd25 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Thu, 16 Jan 2020 09:56:41 +0100 Subject: [PATCH] Add analysis components and mapping types to the usage API. (#51062) Knowing about used analysis components and mapping types would be incredibly useful in order to know which ones may be deprecated or should get more love. Some field types also act as a proxy to know about feature usage of some APIs like the `percolator` or `completion` fields types for percolation and the completion suggester, respectively. --- docs/reference/rest-api/info.asciidoc | 4 + .../xpack/core/XPackClientPlugin.java | 4 +- .../elasticsearch/xpack/core/XPackField.java | 2 + .../elasticsearch/xpack/core/XPackPlugin.java | 3 + .../xpack/oss/IndexFeatureSet.java | 160 +++++++++++++++ .../xpack/oss/IndexFeatureSetUsage.java | 185 ++++++++++++++++++ .../elasticsearch/xpack/oss/package-info.java | 10 + .../xpack/oss/IndexFeatureSetTests.java | 106 ++++++++++ .../xpack/oss/IndexFeatureSetUsageTests.java | 168 ++++++++++++++++ .../rest-api-spec/test/xpack/15_basic.yml | 88 +++++++++ 10 files changed, 729 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSet.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetTests.java create mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index bfd7cad1389..30f75522dd4 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -91,6 +91,10 @@ Example response: "available" : true, "enabled" : true }, + "index" : { + "available" : true, + "enabled" : true + }, "logstash" : { "available" : true, "enabled" : true diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 8c4b5af8c8c..ff29e216c95 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -252,6 +252,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; +import org.elasticsearch.xpack.oss.IndexFeatureSetUsage; import java.util.ArrayList; import java.util.Arrays; @@ -608,7 +609,8 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl // analytics new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), // Enrich - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new) + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new), + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX, IndexFeatureSetUsage::new) ).stream(), MlEvaluationNamedXContentProvider.getNamedWriteables().stream() ).collect(toList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 931a55db350..228be8fdf6c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -53,6 +53,8 @@ public final class XPackField { public static final String ANALYTICS = "analytics"; /** Name constant for the enrich plugin. */ public static final String ENRICH = "enrich"; + /** Name constant for indices. */ + public static final String INDEX = "index"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 23424a4ed39..dfdae6f79f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -68,6 +68,7 @@ import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; +import org.elasticsearch.xpack.oss.IndexFeatureSet; import java.nio.file.Files; import java.nio.file.Path; @@ -245,6 +246,8 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin, if (transportClientMode) { modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null))); + } else { + modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexFeatureSet.class)); } return modules; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSet.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSet.java new file mode 100644 index 00000000000..c026478ae31 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSet.java @@ -0,0 +1,160 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; + +public class IndexFeatureSet implements XPackFeatureSet { + + private final ClusterService clusterService; + + @Inject + public IndexFeatureSet(ClusterService clusterService) { + this.clusterService = clusterService; + } + + @Override + public String name() { + return XPackField.INDEX; + } + + @Override + public boolean available() { + return true; + } + + @Override + public boolean enabled() { + return true; + } + + @Override + public Map nativeCodeInfo() { + return null; + } + + @Override + public void usage(ActionListener listener) { + final Set usedFieldTypes = new HashSet<>(); + final Set usedCharFilters = new HashSet<>(); + final Set usedTokenizers = new HashSet<>(); + final Set usedTokenFilters = new HashSet<>(); + final Set usedAnalyzers = new HashSet<>(); + final Set usedBuiltInCharFilters = new HashSet<>(); + final Set usedBuiltInTokenizers = new HashSet<>(); + final Set usedBuiltInTokenFilters = new HashSet<>(); + final Set usedBuiltInAnalyzers = new HashSet<>(); + + ClusterState state = clusterService.state(); + if (state != null) { + + for (IndexMetaData indexMetaData : state.metaData()) { + MappingMetaData mappingMetaData = indexMetaData.mapping(); + if (mappingMetaData != null) { + visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> { + Object type = fieldMapping.get("type"); + if (type != null) { + usedFieldTypes.add(type.toString()); + } else if (fieldMapping.containsKey("properties")) { + usedFieldTypes.add("object"); + } + + for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { + Object analyzer = fieldMapping.get(key); + if (analyzer != null) { + usedBuiltInAnalyzers.add(analyzer.toString()); + } + } + }); + } + + Settings indexSettings = indexMetaData.getSettings(); + + Map analyzerSettings = indexSettings.getGroups("index.analysis.analyzer"); + usedBuiltInAnalyzers.removeAll(analyzerSettings.keySet()); + for (Settings analyzerSetting : analyzerSettings.values()) { + usedAnalyzers.add(analyzerSetting.get("type", "custom")); + usedBuiltInCharFilters.addAll(analyzerSetting.getAsList("char_filter")); + String tokenizer = analyzerSetting.get("tokenizer"); + if (tokenizer != null) { + usedBuiltInTokenizers.add(tokenizer); + } + usedBuiltInTokenFilters.addAll(analyzerSetting.getAsList("filter")); + } + + Map charFilterSettings = indexSettings.getGroups("index.analysis.char_filter"); + usedBuiltInCharFilters.removeAll(charFilterSettings.keySet()); + aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilters); + + Map tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer"); + usedBuiltInTokenizers.removeAll(tokenizerSettings.keySet()); + aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizers); + + Map tokenFilterSettings = indexSettings.getGroups("index.analysis.filter"); + usedBuiltInTokenFilters.removeAll(tokenFilterSettings.keySet()); + aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilters); + } + } + + listener.onResponse(new IndexFeatureSetUsage(usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters, + usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers)); + } + + static void visitMapping(Map mapping, Consumer> fieldMappingConsumer) { + Object properties = mapping.get("properties"); + if (properties != null && properties instanceof Map) { + @SuppressWarnings("unchecked") + Map propertiesAsMap = (Map) properties; + for (Object v : propertiesAsMap.values()) { + if (v != null && v instanceof Map) { + + @SuppressWarnings("unchecked") + Map fieldMapping = (Map) v; + fieldMappingConsumer.accept(fieldMapping); + visitMapping(fieldMapping, fieldMappingConsumer); + + // Multi fields + Object fieldsO = fieldMapping.get("fields"); + if (fieldsO != null && fieldsO instanceof Map) { + @SuppressWarnings("unchecked") + Map fields = (Map) fieldsO; + for (Object v2 : fields.values()) { + if (v2 instanceof Map) { + @SuppressWarnings("unchecked") + Map fieldMapping2 = (Map) v2; + fieldMappingConsumer.accept(fieldMapping2); + } + } + } + } + } + } + } + + static void aggregateAnalysisTypes(Collection analysisComponents, Set usedTypes) { + for (Settings settings : analysisComponents) { + String type = settings.get("type"); + if (type != null) { + usedTypes.add(type); + } + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java new file mode 100644 index 00000000000..cd779d09d52 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java @@ -0,0 +1,185 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.xpack.core.XPackFeatureSet; +import org.elasticsearch.xpack.core.XPackField; + +import java.io.IOException; +import java.util.Collections; +import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; + +public class IndexFeatureSetUsage extends XPackFeatureSet.Usage { + + private static Set sort(Set set) { + return Collections.unmodifiableSet(new TreeSet<>(set)); + } + + private final Set usedFieldTypes; + private final Set usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers; + private final Set usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers; + + public IndexFeatureSetUsage(Set usedFieldTypes, + Set usedCharFilters, Set usedTokenizers, Set usedTokenFilters, Set usedAnalyzers, + Set usedBuiltInCharFilters, Set usedBuiltInTokenizers, Set usedBuiltInTokenFilters, + Set usedBuiltInAnalyzers) { + super(XPackField.INDEX, true, true); + this.usedFieldTypes = sort(usedFieldTypes); + this.usedCharFilters = sort(usedCharFilters); + this.usedTokenizers = sort(usedTokenizers); + this.usedTokenFilters = sort(usedTokenFilters); + this.usedAnalyzers = sort(usedAnalyzers); + this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters); + this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers); + this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters); + this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers); + } + + public IndexFeatureSetUsage(StreamInput input) throws IOException { + super(input); + usedFieldTypes = input.readSet(StreamInput::readString); + usedCharFilters = input.readSet(StreamInput::readString); + usedTokenizers = input.readSet(StreamInput::readString); + usedTokenFilters = input.readSet(StreamInput::readString); + usedAnalyzers = input.readSet(StreamInput::readString); + usedBuiltInCharFilters = input.readSet(StreamInput::readString); + usedBuiltInTokenizers = input.readSet(StreamInput::readString); + usedBuiltInTokenFilters = input.readSet(StreamInput::readString); + usedBuiltInAnalyzers = input.readSet(StreamInput::readString); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeCollection(usedFieldTypes, StreamOutput::writeString); + out.writeCollection(usedCharFilters, StreamOutput::writeString); + out.writeCollection(usedTokenizers, StreamOutput::writeString); + out.writeCollection(usedTokenFilters, StreamOutput::writeString); + out.writeCollection(usedAnalyzers, StreamOutput::writeString); + out.writeCollection(usedBuiltInCharFilters, StreamOutput::writeString); + out.writeCollection(usedBuiltInTokenizers, StreamOutput::writeString); + out.writeCollection(usedBuiltInTokenFilters, StreamOutput::writeString); + out.writeCollection(usedBuiltInAnalyzers, StreamOutput::writeString); + } + + /** + * Return the set of used field types in the cluster. + */ + public Set getUsedFieldTypes() { + return usedFieldTypes; + } + + /** + * Return the set of used char filters in the cluster. + */ + public Set getUsedCharFilterTypes() { + return usedCharFilters; + } + + /** + * Return the set of used tokenizers in the cluster. + */ + public Set getUsedTokenizerTypes() { + return usedTokenizers; + } + + /** + * Return the set of used token filters in the cluster. + */ + public Set getUsedTokenFilterTypes() { + return usedTokenFilters; + } + + /** + * Return the set of used analyzers in the cluster. + */ + public Set getUsedAnalyzerTypes() { + return usedAnalyzers; + } + + /** + * Return the set of used built-in char filters in the cluster. + */ + public Set getUsedBuiltInCharFilters() { + return usedCharFilters; + } + + /** + * Return the set of used built-in tokenizers in the cluster. + */ + public Set getUsedBuiltInTokenizers() { + return usedTokenizers; + } + + /** + * Return the set of used built-in token filters in the cluster. + */ + public Set getUsedBuiltInTokenFilters() { + return usedTokenFilters; + } + + /** + * Return the set of used built-in analyzers in the cluster. + */ + public Set getUsedBuiltInAnalyzers() { + return usedAnalyzers; + } + + @Override + protected void innerXContent(XContentBuilder builder, Params params) throws IOException { + super.innerXContent(builder, params); + + builder.startObject("analysis"); + { + builder.field("char_filter_types", usedCharFilters); + builder.field("tokenizer_types", usedTokenizers); + builder.field("filter_types", usedTokenFilters); + builder.field("analyzer_types", usedAnalyzers); + + builder.field("built_in_char_filters", usedBuiltInCharFilters); + builder.field("built_in_tokenizers", usedBuiltInTokenizers); + builder.field("built_in_filters", usedBuiltInTokenFilters); + builder.field("built_in_analyzers", usedBuiltInAnalyzers); + } + builder.endObject(); + + builder.startObject("mappings"); + { + builder.field("field_types", usedFieldTypes); + } + builder.endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndexFeatureSetUsage that = (IndexFeatureSetUsage) o; + return available == that.available && enabled == that.enabled && + Objects.equals(usedFieldTypes, that.usedFieldTypes) && + Objects.equals(usedCharFilters, that.usedCharFilters) && + Objects.equals(usedTokenizers, that.usedTokenizers) && + Objects.equals(usedTokenFilters, that.usedTokenFilters) && + Objects.equals(usedAnalyzers, that.usedAnalyzers) && + Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) && + Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) && + Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) && + Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers); + } + + @Override + public int hashCode() { + return Objects.hash(available, enabled, usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters, + usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, + usedBuiltInAnalyzers); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java new file mode 100644 index 00000000000..56582e07467 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/** + * Package containing usage information for features that are exposed in OSS. + */ +package org.elasticsearch.xpack.oss; \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetTests.java new file mode 100644 index 00000000000..0807ce6ca08 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetTests.java @@ -0,0 +1,106 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +public class IndexFeatureSetTests extends ESTestCase { + + private static void collectTypes(Map mapping, Set types) { + IndexFeatureSet.visitMapping(mapping, + m -> { + if (m.containsKey("type")) { + types.add(m.get("type").toString()); + } else { + types.add("object"); + } + }); + } + + public void testCountTopLevelFields() { + Map mapping = new HashMap<>(); + Set fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(Collections.emptySet(), fields); + + Map properties = new HashMap<>(); + mapping.put("properties", properties); + + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + properties.put("foo", keywordField); + collectTypes(mapping, fields); + assertEquals(Collections.singleton("keyword"), fields); + + Map IndexField = new HashMap<>(); + IndexField.put("type", "integer"); + properties.put("bar", IndexField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields); + + properties.put("baz", IndexField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields); + } + + public void testCountMultiFields() { + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + + Map textField = new HashMap<>(); + textField.put("type", "text"); + + Map fields = new HashMap<>(); + fields.put("keyword", keywordField); + textField.put("fields", fields); + + Map properties = new HashMap<>(); + properties.put("foo", textField); + + Map mapping = new HashMap<>(); + mapping.put("properties", properties); + + Set usedFields = new HashSet<>(); + collectTypes(mapping, usedFields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "text")), usedFields); + } + + public void testCountInnerFields() { + Map keywordField = new HashMap<>(); + keywordField.put("type", "keyword"); + + Map properties = new HashMap<>(); + properties.put("foo", keywordField); + + Map objectMapping = new HashMap<>(); + objectMapping.put("properties", properties); + + Map mapping = new HashMap<>(); + + properties = new HashMap<>(); + properties.put("obj", objectMapping); + mapping.put("properties", properties); + Set fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields); + + properties.put("bar", keywordField); + fields = new HashSet<>(); + collectTypes(mapping, fields); + assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields); + } + +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java new file mode 100644 index 00000000000..078a17a7aa5 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java @@ -0,0 +1,168 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.oss; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class IndexFeatureSetUsageTests extends AbstractWireSerializingTestCase { + + @Override + protected Reader instanceReader() { + return IndexFeatureSetUsage::new; + } + + @Override + protected IndexFeatureSetUsage createTestInstance() { + Set fields = new HashSet<>(); + if (randomBoolean()) { + fields.add("keyword"); + } + if (randomBoolean()) { + fields.add("integer"); + } + + Set charFilters = new HashSet<>(); + if (randomBoolean()) { + charFilters.add("pattern_replace"); + } + + Set tokenizers = new HashSet<>(); + if (randomBoolean()) { + tokenizers.add("whitespace"); + } + + Set tokenFilters = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add("stop"); + } + + Set analyzers = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add("english"); + } + + Set builtInCharFilters = new HashSet<>(); + if (randomBoolean()) { + builtInCharFilters.add("html_strip"); + } + + Set builtInTokenizers = new HashSet<>(); + if (randomBoolean()) { + builtInTokenizers.add("keyword"); + } + + Set builtInTokenFilters = new HashSet<>(); + if (randomBoolean()) { + builtInTokenFilters.add("trim"); + } + + Set builtInAnalyzers = new HashSet<>(); + if (randomBoolean()) { + builtInAnalyzers.add("french"); + } + + return new IndexFeatureSetUsage(fields, + charFilters, tokenizers, tokenFilters, analyzers, + builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers); + } + + @Override + protected IndexFeatureSetUsage mutateInstance(IndexFeatureSetUsage instance) throws IOException { + switch (randomInt(8)) { + case 0: + Set fields = new HashSet<>(instance.getUsedFieldTypes()); + if (fields.add("keyword") == false) { + fields.remove("keyword"); + } + return new IndexFeatureSetUsage(fields, instance.getUsedCharFilterTypes(), instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 1: + Set charFilters = new HashSet<>(instance.getUsedCharFilterTypes()); + if (charFilters.add("pattern_replace") == false) { + charFilters.remove("pattern_replace"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), charFilters, instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 2: + Set tokenizers = new HashSet<>(instance.getUsedTokenizerTypes()); + if (tokenizers.add("whitespace") == false) { + tokenizers.remove("whitespace"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), tokenizers, + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 3: + Set tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes()); + if (tokenFilters.add("stop") == false) { + tokenFilters.remove("stop"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 4: + Set analyzers = new HashSet<>(instance.getUsedAnalyzerTypes()); + if (analyzers.add("english") == false) { + analyzers.remove("english"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers, + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 5: + Set builtInCharFilters = new HashSet<>(); + if (builtInCharFilters.add("html_strip") == false) { + builtInCharFilters.remove("html_strip"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters, + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 6: + Set builtInTokenizers = new HashSet<>(); + if (builtInTokenizers.add("keyword") == false) { + builtInTokenizers.remove("keyword"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 7: + Set builtInTokenFilters = new HashSet<>(); + if (builtInTokenFilters.add("trim") == false) { + builtInTokenFilters.remove("trim"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters, + instance.getUsedBuiltInAnalyzers()); + case 8: + Set builtInAnalyzers = new HashSet<>(); + if (builtInAnalyzers.add("french") == false) { + builtInAnalyzers.remove("french"); + } + return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + builtInAnalyzers); + default: + throw new AssertionError(); + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml index d8b25a29531..4757ee00360 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml @@ -166,4 +166,92 @@ - is_true: features.monitoring.available - is_false: tagline +--- +"Usage stats for mappings": + - do: + xpack.usage: {} + - match: { index.mappings.field_types: [] } + + - do: + indices.create: + index: test-index1 + body: + mappings: + properties: + foo: + type: keyword + + - do: + indices.create: + index: test-index2 + body: + mappings: + properties: + foo: + type: keyword + bar: + properties: + quux: + type: integer + + - do: + xpack.usage: {} + + - match: { index.mappings.field_types: [ "integer", "keyword", "object" ] } + +--- +"Usage stats for analysis": + - do: + xpack.usage: {} + + - match: { index.analysis.char_filter_types: [] } + - match: { index.analysis.tokenizer_types: [] } + - match: { index.analysis.filter_types: [] } + - match: { index.analysis.analyzer_types: [] } + + - do: + indices.create: + index: test-index1 + body: + settings: + analysis: + char_filter: + c: + type: mapping + mappings: [ "a => b" ] + tokenizer: + tok: + type: pattern + pattern: "," + filter: + st: + type: stop + stopwords: [ "a" ] + analyzer: + en: + type: standard + stopwords: "_english_" + cust: + char_filter: [ "html_strip" ] + tokenizer: "keyword" + filter: [ "trim" ] + mappings: + properties: + message: + type: "text" + analyzer: french + search_analyzer: spanish + search_quote_analyzer: german + + - do: + xpack.usage: {} + + - match: { index.analysis.char_filter_types: [ "mapping" ] } + - match: { index.analysis.tokenizer_types: [ "pattern" ] } + - match: { index.analysis.filter_types: [ "stop" ] } + - match: { index.analysis.analyzer_types: [ "custom", "standard" ] } + - match: { index.analysis.built_in_char_filters: [ "html_strip" ] } + - match: { index.analysis.built_in_tokenizers: [ "keyword" ] } + - match: { index.analysis.built_in_filters: [ "trim" ] } + - match: { index.analysis.built_in_analyzers: [ "french", "german", "spanish" ] }