Add analysis components and mapping types to the usage API. (#51062)
Knowing about used analysis components and mapping types would be incredibly useful in order to know which ones may be deprecated or should get more love. Some field types also act as a proxy to know about feature usage of some APIs like the `percolator` or `completion` fields types for percolation and the completion suggester, respectively.
This commit is contained in:
parent
ac6602a156
commit
45d7bdcfd7
|
@ -91,6 +91,10 @@ Example response:
|
|||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"index" : {
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
},
|
||||
"logstash" : {
|
||||
"available" : true,
|
||||
"enabled" : true
|
||||
|
|
|
@ -252,6 +252,7 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction
|
|||
import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction;
|
||||
import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction;
|
||||
import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction;
|
||||
import org.elasticsearch.xpack.oss.IndexFeatureSetUsage;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -608,7 +609,8 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
|||
// analytics
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new),
|
||||
// Enrich
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new)
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ENRICH, EnrichFeatureSet.Usage::new),
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX, IndexFeatureSetUsage::new)
|
||||
).stream(),
|
||||
MlEvaluationNamedXContentProvider.getNamedWriteables().stream()
|
||||
).collect(toList());
|
||||
|
|
|
@ -53,6 +53,8 @@ public final class XPackField {
|
|||
public static final String ANALYTICS = "analytics";
|
||||
/** Name constant for the enrich plugin. */
|
||||
public static final String ENRICH = "enrich";
|
||||
/** Name constant for indices. */
|
||||
public static final String INDEX = "index";
|
||||
|
||||
private XPackField() {}
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.elasticsearch.xpack.core.security.authc.TokenMetaData;
|
|||
import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader;
|
||||
import org.elasticsearch.xpack.core.ssl.SSLService;
|
||||
import org.elasticsearch.xpack.core.watcher.WatcherMetaData;
|
||||
import org.elasticsearch.xpack.oss.IndexFeatureSet;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
@ -245,6 +246,8 @@ public class XPackPlugin extends XPackClientPlugin implements ExtensiblePlugin,
|
|||
|
||||
if (transportClientMode) {
|
||||
modules.add(b -> b.bind(XPackLicenseState.class).toProvider(Providers.of(null)));
|
||||
} else {
|
||||
modules.add(b -> XPackPlugin.bindFeatureSet(b, IndexFeatureSet.class));
|
||||
}
|
||||
return modules;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.oss;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.core.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public class IndexFeatureSet implements XPackFeatureSet {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public IndexFeatureSet(ClusterService clusterService) {
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return XPackField.INDEX;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean available() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean enabled() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> nativeCodeInfo() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void usage(ActionListener<Usage> listener) {
|
||||
final Set<String> usedFieldTypes = new HashSet<>();
|
||||
final Set<String> usedCharFilters = new HashSet<>();
|
||||
final Set<String> usedTokenizers = new HashSet<>();
|
||||
final Set<String> usedTokenFilters = new HashSet<>();
|
||||
final Set<String> usedAnalyzers = new HashSet<>();
|
||||
final Set<String> usedBuiltInCharFilters = new HashSet<>();
|
||||
final Set<String> usedBuiltInTokenizers = new HashSet<>();
|
||||
final Set<String> usedBuiltInTokenFilters = new HashSet<>();
|
||||
final Set<String> usedBuiltInAnalyzers = new HashSet<>();
|
||||
|
||||
ClusterState state = clusterService.state();
|
||||
if (state != null) {
|
||||
|
||||
for (IndexMetaData indexMetaData : state.metaData()) {
|
||||
MappingMetaData mappingMetaData = indexMetaData.mapping();
|
||||
if (mappingMetaData != null) {
|
||||
visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> {
|
||||
Object type = fieldMapping.get("type");
|
||||
if (type != null) {
|
||||
usedFieldTypes.add(type.toString());
|
||||
} else if (fieldMapping.containsKey("properties")) {
|
||||
usedFieldTypes.add("object");
|
||||
}
|
||||
|
||||
for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) {
|
||||
Object analyzer = fieldMapping.get(key);
|
||||
if (analyzer != null) {
|
||||
usedBuiltInAnalyzers.add(analyzer.toString());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Settings indexSettings = indexMetaData.getSettings();
|
||||
|
||||
Map<String, Settings> analyzerSettings = indexSettings.getGroups("index.analysis.analyzer");
|
||||
usedBuiltInAnalyzers.removeAll(analyzerSettings.keySet());
|
||||
for (Settings analyzerSetting : analyzerSettings.values()) {
|
||||
usedAnalyzers.add(analyzerSetting.get("type", "custom"));
|
||||
usedBuiltInCharFilters.addAll(analyzerSetting.getAsList("char_filter"));
|
||||
String tokenizer = analyzerSetting.get("tokenizer");
|
||||
if (tokenizer != null) {
|
||||
usedBuiltInTokenizers.add(tokenizer);
|
||||
}
|
||||
usedBuiltInTokenFilters.addAll(analyzerSetting.getAsList("filter"));
|
||||
}
|
||||
|
||||
Map<String, Settings> charFilterSettings = indexSettings.getGroups("index.analysis.char_filter");
|
||||
usedBuiltInCharFilters.removeAll(charFilterSettings.keySet());
|
||||
aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilters);
|
||||
|
||||
Map<String, Settings> tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer");
|
||||
usedBuiltInTokenizers.removeAll(tokenizerSettings.keySet());
|
||||
aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizers);
|
||||
|
||||
Map<String, Settings> tokenFilterSettings = indexSettings.getGroups("index.analysis.filter");
|
||||
usedBuiltInTokenFilters.removeAll(tokenFilterSettings.keySet());
|
||||
aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilters);
|
||||
}
|
||||
}
|
||||
|
||||
listener.onResponse(new IndexFeatureSetUsage(usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters,
|
||||
usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers));
|
||||
}
|
||||
|
||||
static void visitMapping(Map<String, ?> mapping, Consumer<Map<String, ?>> fieldMappingConsumer) {
|
||||
Object properties = mapping.get("properties");
|
||||
if (properties != null && properties instanceof Map) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, ?> propertiesAsMap = (Map<String, ?>) properties;
|
||||
for (Object v : propertiesAsMap.values()) {
|
||||
if (v != null && v instanceof Map) {
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, ?> fieldMapping = (Map<String, ?>) v;
|
||||
fieldMappingConsumer.accept(fieldMapping);
|
||||
visitMapping(fieldMapping, fieldMappingConsumer);
|
||||
|
||||
// Multi fields
|
||||
Object fieldsO = fieldMapping.get("fields");
|
||||
if (fieldsO != null && fieldsO instanceof Map) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, ?> fields = (Map<String, ?>) fieldsO;
|
||||
for (Object v2 : fields.values()) {
|
||||
if (v2 instanceof Map) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, ?> fieldMapping2 = (Map<String, ?>) v2;
|
||||
fieldMappingConsumer.accept(fieldMapping2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void aggregateAnalysisTypes(Collection<Settings> analysisComponents, Set<String> usedTypes) {
|
||||
for (Settings settings : analysisComponents) {
|
||||
String type = settings.get("type");
|
||||
if (type != null) {
|
||||
usedTypes.add(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,185 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.oss;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.core.XPackFeatureSet;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
public class IndexFeatureSetUsage extends XPackFeatureSet.Usage {
|
||||
|
||||
private static Set<String> sort(Set<String> set) {
|
||||
return Collections.unmodifiableSet(new TreeSet<>(set));
|
||||
}
|
||||
|
||||
private final Set<String> usedFieldTypes;
|
||||
private final Set<String> usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers;
|
||||
private final Set<String> usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers;
|
||||
|
||||
public IndexFeatureSetUsage(Set<String> usedFieldTypes,
|
||||
Set<String> usedCharFilters, Set<String> usedTokenizers, Set<String> usedTokenFilters, Set<String> usedAnalyzers,
|
||||
Set<String> usedBuiltInCharFilters, Set<String> usedBuiltInTokenizers, Set<String> usedBuiltInTokenFilters,
|
||||
Set<String> usedBuiltInAnalyzers) {
|
||||
super(XPackField.INDEX, true, true);
|
||||
this.usedFieldTypes = sort(usedFieldTypes);
|
||||
this.usedCharFilters = sort(usedCharFilters);
|
||||
this.usedTokenizers = sort(usedTokenizers);
|
||||
this.usedTokenFilters = sort(usedTokenFilters);
|
||||
this.usedAnalyzers = sort(usedAnalyzers);
|
||||
this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters);
|
||||
this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers);
|
||||
this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters);
|
||||
this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers);
|
||||
}
|
||||
|
||||
public IndexFeatureSetUsage(StreamInput input) throws IOException {
|
||||
super(input);
|
||||
usedFieldTypes = input.readSet(StreamInput::readString);
|
||||
usedCharFilters = input.readSet(StreamInput::readString);
|
||||
usedTokenizers = input.readSet(StreamInput::readString);
|
||||
usedTokenFilters = input.readSet(StreamInput::readString);
|
||||
usedAnalyzers = input.readSet(StreamInput::readString);
|
||||
usedBuiltInCharFilters = input.readSet(StreamInput::readString);
|
||||
usedBuiltInTokenizers = input.readSet(StreamInput::readString);
|
||||
usedBuiltInTokenFilters = input.readSet(StreamInput::readString);
|
||||
usedBuiltInAnalyzers = input.readSet(StreamInput::readString);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeCollection(usedFieldTypes, StreamOutput::writeString);
|
||||
out.writeCollection(usedCharFilters, StreamOutput::writeString);
|
||||
out.writeCollection(usedTokenizers, StreamOutput::writeString);
|
||||
out.writeCollection(usedTokenFilters, StreamOutput::writeString);
|
||||
out.writeCollection(usedAnalyzers, StreamOutput::writeString);
|
||||
out.writeCollection(usedBuiltInCharFilters, StreamOutput::writeString);
|
||||
out.writeCollection(usedBuiltInTokenizers, StreamOutput::writeString);
|
||||
out.writeCollection(usedBuiltInTokenFilters, StreamOutput::writeString);
|
||||
out.writeCollection(usedBuiltInAnalyzers, StreamOutput::writeString);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used field types in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedFieldTypes() {
|
||||
return usedFieldTypes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used char filters in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedCharFilterTypes() {
|
||||
return usedCharFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used tokenizers in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedTokenizerTypes() {
|
||||
return usedTokenizers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used token filters in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedTokenFilterTypes() {
|
||||
return usedTokenFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used analyzers in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedAnalyzerTypes() {
|
||||
return usedAnalyzers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used built-in char filters in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedBuiltInCharFilters() {
|
||||
return usedCharFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used built-in tokenizers in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedBuiltInTokenizers() {
|
||||
return usedTokenizers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used built-in token filters in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedBuiltInTokenFilters() {
|
||||
return usedTokenFilters;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the set of used built-in analyzers in the cluster.
|
||||
*/
|
||||
public Set<String> getUsedBuiltInAnalyzers() {
|
||||
return usedAnalyzers;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void innerXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
super.innerXContent(builder, params);
|
||||
|
||||
builder.startObject("analysis");
|
||||
{
|
||||
builder.field("char_filter_types", usedCharFilters);
|
||||
builder.field("tokenizer_types", usedTokenizers);
|
||||
builder.field("filter_types", usedTokenFilters);
|
||||
builder.field("analyzer_types", usedAnalyzers);
|
||||
|
||||
builder.field("built_in_char_filters", usedBuiltInCharFilters);
|
||||
builder.field("built_in_tokenizers", usedBuiltInTokenizers);
|
||||
builder.field("built_in_filters", usedBuiltInTokenFilters);
|
||||
builder.field("built_in_analyzers", usedBuiltInAnalyzers);
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("mappings");
|
||||
{
|
||||
builder.field("field_types", usedFieldTypes);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
IndexFeatureSetUsage that = (IndexFeatureSetUsage) o;
|
||||
return available == that.available && enabled == that.enabled &&
|
||||
Objects.equals(usedFieldTypes, that.usedFieldTypes) &&
|
||||
Objects.equals(usedCharFilters, that.usedCharFilters) &&
|
||||
Objects.equals(usedTokenizers, that.usedTokenizers) &&
|
||||
Objects.equals(usedTokenFilters, that.usedTokenFilters) &&
|
||||
Objects.equals(usedAnalyzers, that.usedAnalyzers) &&
|
||||
Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) &&
|
||||
Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) &&
|
||||
Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) &&
|
||||
Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(available, enabled, usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters,
|
||||
usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters,
|
||||
usedBuiltInAnalyzers);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Package containing usage information for features that are exposed in OSS.
|
||||
*/
|
||||
package org.elasticsearch.xpack.oss;
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.oss;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class IndexFeatureSetTests extends ESTestCase {
|
||||
|
||||
private static void collectTypes(Map<String, ?> mapping, Set<String> types) {
|
||||
IndexFeatureSet.visitMapping(mapping,
|
||||
m -> {
|
||||
if (m.containsKey("type")) {
|
||||
types.add(m.get("type").toString());
|
||||
} else {
|
||||
types.add("object");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testCountTopLevelFields() {
|
||||
Map<String, Object> mapping = new HashMap<>();
|
||||
Set<String> fields = new HashSet<>();
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(Collections.emptySet(), fields);
|
||||
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
mapping.put("properties", properties);
|
||||
|
||||
Map<String, Object> keywordField = new HashMap<>();
|
||||
keywordField.put("type", "keyword");
|
||||
properties.put("foo", keywordField);
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(Collections.singleton("keyword"), fields);
|
||||
|
||||
Map<String, Object> IndexField = new HashMap<>();
|
||||
IndexField.put("type", "integer");
|
||||
properties.put("bar", IndexField);
|
||||
fields = new HashSet<>();
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields);
|
||||
|
||||
properties.put("baz", IndexField);
|
||||
fields = new HashSet<>();
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(new HashSet<>(Arrays.asList("keyword", "integer")), fields);
|
||||
}
|
||||
|
||||
public void testCountMultiFields() {
|
||||
Map<String, Object> keywordField = new HashMap<>();
|
||||
keywordField.put("type", "keyword");
|
||||
|
||||
Map<String, Object> textField = new HashMap<>();
|
||||
textField.put("type", "text");
|
||||
|
||||
Map<String, Object> fields = new HashMap<>();
|
||||
fields.put("keyword", keywordField);
|
||||
textField.put("fields", fields);
|
||||
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
properties.put("foo", textField);
|
||||
|
||||
Map<String, Object> mapping = new HashMap<>();
|
||||
mapping.put("properties", properties);
|
||||
|
||||
Set<String> usedFields = new HashSet<>();
|
||||
collectTypes(mapping, usedFields);
|
||||
assertEquals(new HashSet<>(Arrays.asList("keyword", "text")), usedFields);
|
||||
}
|
||||
|
||||
public void testCountInnerFields() {
|
||||
Map<String, Object> keywordField = new HashMap<>();
|
||||
keywordField.put("type", "keyword");
|
||||
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
properties.put("foo", keywordField);
|
||||
|
||||
Map<String, Object> objectMapping = new HashMap<>();
|
||||
objectMapping.put("properties", properties);
|
||||
|
||||
Map<String, Object> mapping = new HashMap<>();
|
||||
|
||||
properties = new HashMap<>();
|
||||
properties.put("obj", objectMapping);
|
||||
mapping.put("properties", properties);
|
||||
Set<String> fields = new HashSet<>();
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields);
|
||||
|
||||
properties.put("bar", keywordField);
|
||||
fields = new HashSet<>();
|
||||
collectTypes(mapping, fields);
|
||||
assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.oss;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.test.AbstractWireSerializingTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
public class IndexFeatureSetUsageTests extends AbstractWireSerializingTestCase<IndexFeatureSetUsage> {
|
||||
|
||||
@Override
|
||||
protected Reader<IndexFeatureSetUsage> instanceReader() {
|
||||
return IndexFeatureSetUsage::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexFeatureSetUsage createTestInstance() {
|
||||
Set<String> fields = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
fields.add("keyword");
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
fields.add("integer");
|
||||
}
|
||||
|
||||
Set<String> charFilters = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
charFilters.add("pattern_replace");
|
||||
}
|
||||
|
||||
Set<String> tokenizers = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
tokenizers.add("whitespace");
|
||||
}
|
||||
|
||||
Set<String> tokenFilters = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
tokenFilters.add("stop");
|
||||
}
|
||||
|
||||
Set<String> analyzers = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
tokenFilters.add("english");
|
||||
}
|
||||
|
||||
Set<String> builtInCharFilters = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
builtInCharFilters.add("html_strip");
|
||||
}
|
||||
|
||||
Set<String> builtInTokenizers = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
builtInTokenizers.add("keyword");
|
||||
}
|
||||
|
||||
Set<String> builtInTokenFilters = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
builtInTokenFilters.add("trim");
|
||||
}
|
||||
|
||||
Set<String> builtInAnalyzers = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
builtInAnalyzers.add("french");
|
||||
}
|
||||
|
||||
return new IndexFeatureSetUsage(fields,
|
||||
charFilters, tokenizers, tokenFilters, analyzers,
|
||||
builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexFeatureSetUsage mutateInstance(IndexFeatureSetUsage instance) throws IOException {
|
||||
switch (randomInt(8)) {
|
||||
case 0:
|
||||
Set<String> fields = new HashSet<>(instance.getUsedFieldTypes());
|
||||
if (fields.add("keyword") == false) {
|
||||
fields.remove("keyword");
|
||||
}
|
||||
return new IndexFeatureSetUsage(fields, instance.getUsedCharFilterTypes(), instance.getUsedTokenizerTypes(),
|
||||
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
|
||||
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 1:
|
||||
Set<String> charFilters = new HashSet<>(instance.getUsedCharFilterTypes());
|
||||
if (charFilters.add("pattern_replace") == false) {
|
||||
charFilters.remove("pattern_replace");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), charFilters, instance.getUsedTokenizerTypes(),
|
||||
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
|
||||
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 2:
|
||||
Set<String> tokenizers = new HashSet<>(instance.getUsedTokenizerTypes());
|
||||
if (tokenizers.add("whitespace") == false) {
|
||||
tokenizers.remove("whitespace");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), tokenizers,
|
||||
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
|
||||
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 3:
|
||||
Set<String> tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes());
|
||||
if (tokenFilters.add("stop") == false) {
|
||||
tokenFilters.remove("stop");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(),
|
||||
tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(),
|
||||
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 4:
|
||||
Set<String> analyzers = new HashSet<>(instance.getUsedAnalyzerTypes());
|
||||
if (analyzers.add("english") == false) {
|
||||
analyzers.remove("english");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers,
|
||||
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 5:
|
||||
Set<String> builtInCharFilters = new HashSet<>();
|
||||
if (builtInCharFilters.add("html_strip") == false) {
|
||||
builtInCharFilters.remove("html_strip");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(),
|
||||
instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters,
|
||||
instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 6:
|
||||
Set<String> builtInTokenizers = new HashSet<>();
|
||||
if (builtInTokenizers.add("keyword") == false) {
|
||||
builtInTokenizers.remove("keyword");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
|
||||
instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(),
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 7:
|
||||
Set<String> builtInTokenFilters = new HashSet<>();
|
||||
if (builtInTokenFilters.add("trim") == false) {
|
||||
builtInTokenFilters.remove("trim");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
|
||||
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters,
|
||||
instance.getUsedBuiltInAnalyzers());
|
||||
case 8:
|
||||
Set<String> builtInAnalyzers = new HashSet<>();
|
||||
if (builtInAnalyzers.add("french") == false) {
|
||||
builtInAnalyzers.remove("french");
|
||||
}
|
||||
return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(),
|
||||
instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(),
|
||||
instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(),
|
||||
builtInAnalyzers);
|
||||
default:
|
||||
throw new AssertionError();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -166,4 +166,92 @@
|
|||
- is_true: features.monitoring.available
|
||||
- is_false: tagline
|
||||
|
||||
---
|
||||
"Usage stats for mappings":
|
||||
- do:
|
||||
xpack.usage: {}
|
||||
|
||||
- match: { index.mappings.field_types: [] }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test-index1
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
foo:
|
||||
type: keyword
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test-index2
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
foo:
|
||||
type: keyword
|
||||
bar:
|
||||
properties:
|
||||
quux:
|
||||
type: integer
|
||||
|
||||
- do:
|
||||
xpack.usage: {}
|
||||
|
||||
- match: { index.mappings.field_types: [ "integer", "keyword", "object" ] }
|
||||
|
||||
---
|
||||
"Usage stats for analysis":
|
||||
- do:
|
||||
xpack.usage: {}
|
||||
|
||||
- match: { index.analysis.char_filter_types: [] }
|
||||
- match: { index.analysis.tokenizer_types: [] }
|
||||
- match: { index.analysis.filter_types: [] }
|
||||
- match: { index.analysis.analyzer_types: [] }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test-index1
|
||||
body:
|
||||
settings:
|
||||
analysis:
|
||||
char_filter:
|
||||
c:
|
||||
type: mapping
|
||||
mappings: [ "a => b" ]
|
||||
tokenizer:
|
||||
tok:
|
||||
type: pattern
|
||||
pattern: ","
|
||||
filter:
|
||||
st:
|
||||
type: stop
|
||||
stopwords: [ "a" ]
|
||||
analyzer:
|
||||
en:
|
||||
type: standard
|
||||
stopwords: "_english_"
|
||||
cust:
|
||||
char_filter: [ "html_strip" ]
|
||||
tokenizer: "keyword"
|
||||
filter: [ "trim" ]
|
||||
mappings:
|
||||
properties:
|
||||
message:
|
||||
type: "text"
|
||||
analyzer: french
|
||||
search_analyzer: spanish
|
||||
search_quote_analyzer: german
|
||||
|
||||
- do:
|
||||
xpack.usage: {}
|
||||
|
||||
- match: { index.analysis.char_filter_types: [ "mapping" ] }
|
||||
- match: { index.analysis.tokenizer_types: [ "pattern" ] }
|
||||
- match: { index.analysis.filter_types: [ "stop" ] }
|
||||
- match: { index.analysis.analyzer_types: [ "custom", "standard" ] }
|
||||
- match: { index.analysis.built_in_char_filters: [ "html_strip" ] }
|
||||
- match: { index.analysis.built_in_tokenizers: [ "keyword" ] }
|
||||
- match: { index.analysis.built_in_filters: [ "trim" ] }
|
||||
- match: { index.analysis.built_in_analyzers: [ "french", "german", "spanish" ] }
|
||||
|
|
Loading…
Reference in New Issue