Replace group map settings with affix setting (#26819)

We use group settings historically instead of using a prefix setting which is more restrictive and type safe. The majority of the usecases needs to access a key, value map based on the _leave node_ of the setting ie. the setting `index.tag.*` might be used to tag an index with `index.tag.test=42` and `index.tag.staging=12` which then would be turned into a `{"test": 42, "staging": 12}` map. The group settings would always use `Settings#getAsMap` which is loosing type information and uses internal representation of the settings. Using prefix settings allows now to access such a method type-safe and natively.
This commit is contained in:
Simon Willnauer 2017-09-30 14:27:21 +02:00 committed by GitHub
parent 3cb99aeec1
commit 7b8d036ab5
20 changed files with 237 additions and 79 deletions

View File

@ -240,14 +240,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require";
public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include";
public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude";
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
Setting.groupSetting("index.routing.allocation.initial_recovery."); // this is only setable internally not a registered setting!!
public static final Setting.AffixSetting<String> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.IndexScope));
public static final Setting.AffixSetting<String> INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING =
Setting.prefixKeySetting("index.routing.allocation.initial_recovery.", key -> Setting.simpleString(key));
// this is only setable internally not a registered setting!!
/**
* The number of active shard copies to check for before proceeding with a write operation.
@ -1012,28 +1016,28 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
filledInSyncAllocationIds.put(i, Collections.emptySet());
}
}
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(settings).getAsMap();
final Map<String, String> requireMap = INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters requireFilters;
if (requireMap.isEmpty()) {
requireFilters = null;
} else {
requireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
}
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> includeMap = INDEX_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters includeFilters;
if (includeMap.isEmpty()) {
includeFilters = null;
} else {
includeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> excludeMap = INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters excludeFilters;
if (excludeMap.isEmpty()) {
excludeFilters = null;
} else {
excludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
}
Map<String, String> initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.get(settings).getAsMap();
Map<String, String> initialRecoveryMap = INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getAsMap(settings);
final DiscoveryNodeFilters initialRecoveryFilters;
if (initialRecoveryMap.isEmpty()) {
initialRecoveryFilters = null;

View File

@ -161,21 +161,20 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
final Settings normalizedSettings = Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
Settings.Builder settingsForClosedIndices = Settings.builder();
Settings.Builder settingsForOpenIndices = Settings.builder();
Settings.Builder skipppedSettings = Settings.builder();
final Set<String> skippedSettings = new HashSet<>();
indexScopedSettings.validate(normalizedSettings);
// never allow to change the number of shards
for (Map.Entry<String, String> entry : normalizedSettings.getAsMap().entrySet()) {
Setting setting = indexScopedSettings.get(entry.getKey());
for (String key : normalizedSettings.getKeys()) {
Setting setting = indexScopedSettings.get(key);
assert setting != null; // we already validated the normalized settings
settingsForClosedIndices.put(entry.getKey(), entry.getValue());
settingsForClosedIndices.copy(key, normalizedSettings);
if (setting.isDynamic()) {
settingsForOpenIndices.put(entry.getKey(), entry.getValue());
settingsForOpenIndices.copy(key, normalizedSettings);
} else {
skipppedSettings.put(entry.getKey(), entry.getValue());
skippedSettings.add(key);
}
}
final Settings skippedSettigns = skipppedSettings.build();
final Settings closedSettings = settingsForClosedIndices.build();
final Settings openSettings = settingsForOpenIndices.build();
final boolean preserveExisting = request.isPreserveExisting();
@ -210,11 +209,9 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
}
}
if (!skippedSettigns.isEmpty() && !openIndices.isEmpty()) {
if (!skippedSettings.isEmpty() && !openIndices.isEmpty()) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update non dynamic settings [%s] for open indices %s",
skippedSettigns.getAsMap().keySet(),
openIndices
"Can't update non dynamic settings [%s] for open indices %s", skippedSettings, openIndices
));
}

View File

@ -189,9 +189,8 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
/** Creates a DiscoveryNode representing the local node. */
public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeId) {
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap());
Map<String, String> attributes = Node.NODE_ATTRIBUTES.getAsMap(settings);
Set<Role> roles = getRolesFromSettings(settings);
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT);
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.transport.TransportAddress;
import java.util.HashMap;
import java.util.Map;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
public class DiscoveryNodeFilters {
@ -43,15 +44,10 @@ public class DiscoveryNodeFilters {
* "_ip", "_host_ip", and "_publish_ip" and ensuring each of their comma separated values
* that has no wildcards is a valid IP address.
*/
public static final Consumer<Settings> IP_VALIDATOR = (settings) -> {
Map<String, String> settingsMap = settings.getAsMap();
for (Map.Entry<String, String> entry : settingsMap.entrySet()) {
String propertyKey = entry.getKey();
if (entry.getValue() == null) {
continue; // this setting gets reset
}
if ("_ip".equals(propertyKey) || "_host_ip".equals(propertyKey) || "_publish_ip".equals(propertyKey)) {
for (String value : Strings.tokenizeToStringArray(entry.getValue(), ",")) {
public static final BiConsumer<String, String> IP_VALIDATOR = (propertyKey, rawValue) -> {
if (rawValue != null) {
if (propertyKey.endsWith("._ip") || propertyKey.endsWith("._host_ip") || propertyKey.endsWith("_publish_ip")) {
for (String value : Strings.tokenizeToStringArray(rawValue, ",")) {
if (Regex.isSimpleMatchPattern(value) == false && InetAddresses.isInetAddress(value) == false) {
throw new IllegalArgumentException("invalid IP address [" + value + "] for [" + propertyKey + "]");
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.EnumSet;
import java.util.Map;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
@ -70,12 +71,15 @@ public class FilterAllocationDecider extends AllocationDecider {
private static final String CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX = "cluster.routing.allocation.require";
private static final String CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX = "cluster.routing.allocation.include";
private static final String CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX = "cluster.routing.allocation.exclude";
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
Setting.groupSetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
Setting.groupSetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);
public static final Setting.AffixSetting<String> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
Setting.prefixKeySetting(CLUSTER_ROUTING_REQUIRE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope));
public static final Setting.AffixSetting<String> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(CLUSTER_ROUTING_INCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope));
public static final Setting.AffixSetting<String>CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.prefixKeySetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", (key) ->
Setting.simpleString(key, (value, map) -> IP_VALIDATOR.accept(key, value), Property.Dynamic, Property.NodeScope));
/**
* The set of {@link RecoverySource.Type} values for which the
@ -94,12 +98,12 @@ public class FilterAllocationDecider extends AllocationDecider {
public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings);
setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings));
setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings));
setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters);
setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(settings));
setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getAsMap(settings));
setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.getAsMap(settings));
clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters, (a,b)-> {}, true);
clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters, (a,b)-> {}, true);
clusterSettings.addAffixMapUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters, (a,b)-> {}, true);
}
@Override
@ -196,13 +200,13 @@ public class FilterAllocationDecider extends AllocationDecider {
return null;
}
private void setClusterRequireFilters(Settings settings) {
clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap());
private void setClusterRequireFilters(Map<String, String> filters) {
clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, filters);
}
private void setClusterIncludeFilters(Settings settings) {
clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
private void setClusterIncludeFilters(Map<String, String> filters) {
clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, filters);
}
private void setClusterExcludeFilters(Settings settings) {
clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
private void setClusterExcludeFilters(Map<String, String> filters) {
clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, filters);
}
}

View File

@ -207,6 +207,20 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
addSettingsUpdater(setting.newAffixUpdater(consumer, logger, validator));
}
/**
* Adds a settings consumer for affix settings. Affix settings have a namespace associated to it that needs to be available to the
* consumer in order to be processed correctly. This consumer will get a namespace to value map instead of each individual namespace
* and value as in {@link #addAffixUpdateConsumer(Setting.AffixSetting, BiConsumer, BiConsumer)}
*/
public synchronized <T> void addAffixMapUpdateConsumer(Setting.AffixSetting<T> setting, Consumer<Map<String, T>> consumer,
BiConsumer<String, T> validator, boolean omitDefaults) {
final Setting<?> registeredSetting = this.complexMatchers.get(setting.getKey());
if (setting != registeredSetting) {
throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]");
}
addSettingsUpdater(setting.newAffixMapUpdater(consumer, logger, validator, omitDefaults));
}
synchronized void addSettingsUpdater(SettingUpdater<?> updater) {
this.settingUpdaters.add(updater);
}

View File

@ -547,8 +547,9 @@ public class Setting<T> implements ToXContentObject {
final Map<AbstractScopedSettings.SettingUpdater<T>, T> result = new IdentityHashMap<>();
Stream.concat(matchStream(current), matchStream(previous)).distinct().forEach(aKey -> {
String namespace = key.getNamespace(aKey);
Setting<T> concreteSetting = getConcreteSetting(aKey);
AbstractScopedSettings.SettingUpdater<T> updater =
getConcreteSetting(aKey).newUpdater((v) -> consumer.accept(namespace, v), logger,
concreteSetting.newUpdater((v) -> consumer.accept(namespace, v), logger,
(v) -> validator.accept(namespace, v));
if (updater.hasChanged(current, previous)) {
// only the ones that have changed otherwise we might get too many updates
@ -569,6 +570,43 @@ public class Setting<T> implements ToXContentObject {
};
}
AbstractScopedSettings.SettingUpdater<Map<String, T>> newAffixMapUpdater(Consumer<Map<String, T>> consumer, Logger logger,
BiConsumer<String, T> validator, boolean omitDefaults) {
return new AbstractScopedSettings.SettingUpdater<Map<String, T>>() {
@Override
public boolean hasChanged(Settings current, Settings previous) {
return Stream.concat(matchStream(current), matchStream(previous)).findAny().isPresent();
}
@Override
public Map<String, T> getValue(Settings current, Settings previous) {
// we collect all concrete keys and then delegate to the actual setting for validation and settings extraction
final Map<String, T> result = new IdentityHashMap<>();
Stream.concat(matchStream(current), matchStream(previous)).distinct().forEach(aKey -> {
String namespace = key.getNamespace(aKey);
Setting<T> concreteSetting = getConcreteSetting(aKey);
AbstractScopedSettings.SettingUpdater<T> updater =
concreteSetting.newUpdater((v) -> {}, logger, (v) -> validator.accept(namespace, v));
if (updater.hasChanged(current, previous)) {
// only the ones that have changed otherwise we might get too many updates
// the hasChanged above checks only if there are any changes
T value = updater.getValue(current, previous);
if ((omitDefaults && value.equals(concreteSetting.getDefault(current))) == false) {
result.put(namespace, value);
}
}
});
return result;
}
@Override
public void apply(Map<String, T> value, Settings current, Settings previous) {
consumer.accept(value);
}
};
}
@Override
public T get(Settings settings) {
throw new UnsupportedOperationException("affix settings can't return values" +
@ -618,6 +656,18 @@ public class Setting<T> implements ToXContentObject {
public Stream<Setting<T>> getAllConcreteSettings(Settings settings) {
return matchStream(settings).distinct().map(this::getConcreteSetting);
}
/**
* Returns a map of all namespaces to it's values give the provided settings
*/
public Map<String, T> getAsMap(Settings settings) {
Map<String, T> map = new HashMap<>();
matchStream(settings).distinct().forEach(key -> {
Setting<T> concreteSetting = getConcreteSetting(key);
map.put(getNamespace(concreteSetting), concreteSetting.get(settings));
});
return Collections.unmodifiableMap(map);
}
}
/**
@ -872,6 +922,10 @@ public class Setting<T> implements ToXContentObject {
return new Setting<>(key, s -> "", Function.identity(), properties);
}
public static Setting<String> simpleString(String key, Validator<String> validator, Property... properties) {
return new Setting<>(new SimpleKey(key), null, s -> "", Function.identity(), validator, properties);
}
public static int parseInt(String s, int minValue, String key) {
return parseInt(s, minValue, Integer.MAX_VALUE, key);
}

View File

@ -320,6 +320,13 @@ public final class Settings implements ToXContentFragment {
}
}
/**
* Returns a set of all keys in this settings object
*/
public Set<String> getKeys() {
return Collections.unmodifiableSet(settings.keySet());
}
/**
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
* leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any
@ -894,6 +901,10 @@ public final class Settings implements ToXContentFragment {
return this;
}
public Builder copy(String key, Settings source) {
return put(key, source.get(key));
}
/**
* Sets a null value for the given setting key
*/

View File

@ -190,16 +190,15 @@ public class Node implements Closeable {
*/
public static final Setting<Boolean> NODE_LOCAL_STORAGE_SETTING = Setting.boolSetting("node.local_storage", true, Property.NodeScope);
public static final Setting<String> NODE_NAME_SETTING = Setting.simpleString("node.name", Property.NodeScope);
public static final Setting<Settings> NODE_ATTRIBUTES = Setting.groupSetting("node.attr.", (settings) -> {
Map<String, String> settingsMap = settings.getAsMap();
for (Map.Entry<String, String> entry : settingsMap.entrySet()) {
String value = entry.getValue();
if (Character.isWhitespace(value.charAt(0)) || Character.isWhitespace(value.charAt(value.length() - 1))) {
throw new IllegalArgumentException("node.attr." + entry.getKey() + " cannot have leading or trailing whitespace " +
public static final Setting.AffixSetting<String> NODE_ATTRIBUTES = Setting.prefixKeySetting("node.attr.", (key) ->
new Setting<>(key, "", (value) -> {
if (value.length() > 0
&& (Character.isWhitespace(value.charAt(0)) || Character.isWhitespace(value.charAt(value.length() - 1)))) {
throw new IllegalArgumentException(key + " cannot have leading or trailing whitespace " +
"[" + value + "]");
}
}
}, Property.NodeScope);
return value;
}, Property.NodeScope));
public static final Setting<String> BREAKER_TYPE_KEY = new Setting<>("indices.breaker.type", "hierarchy", (s) -> {
switch (s) {
case "hierarchy":

View File

@ -110,7 +110,6 @@ import static java.util.Collections.emptyList;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.settings.Setting.affixKeySetting;
import static org.elasticsearch.common.settings.Setting.boolSetting;
import static org.elasticsearch.common.settings.Setting.groupSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.settings.Setting.listSetting;
import static org.elasticsearch.common.settings.Setting.timeSetting;

View File

@ -149,12 +149,12 @@ public class FilteringAllocationIT extends ESIntegTestCase {
public void testInvalidIPFilterClusterSettings() {
String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip");
Setting<Settings> filterSetting = randomFrom(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
Setting<String> filterSetting = randomFrom(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1."))
.execute().actionGet());
assertEquals("invalid IP address [192.168.1.1.] for [" + ipKey + "]", e.getMessage());
assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage());
}
}

View File

@ -183,7 +183,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
public void testInvalidIPFilter() {
String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip");
Setting<Settings> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
Setting<String> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING);
String invalidIP = randomFrom("192..168.1.1", "192.300.1.1");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
@ -191,12 +191,12 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
indexScopedSettings.updateDynamicSettings(Settings.builder().put(filterSetting.getKey() + ipKey, invalidIP).build(),
Settings.builder().put(Settings.EMPTY), Settings.builder(), "test ip validation");
});
assertEquals("invalid IP address [" + invalidIP + "] for [" + ipKey + "]", e.getMessage());
assertEquals("invalid IP address [" + invalidIP + "] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage());
}
public void testWildcardIPFilter() {
String ipKey = randomFrom("_ip", "_host_ip", "_publish_ip");
Setting<Settings> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
Setting<String> filterSetting = randomFrom(IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING,
IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_SETTING);
String wildcardIP = randomFrom("192.168.*", "192.*.1.1");
IndexScopedSettings indexScopedSettings = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);

View File

@ -43,6 +43,7 @@ import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import static org.hamcrest.CoreMatchers.containsString;
@ -115,7 +116,7 @@ public class ScopedSettingsTests extends ESTestCase {
IndexScopedSettings settings = new IndexScopedSettings(currentSettings,
new HashSet<>(Arrays.asList(dynamicSetting, IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING)));
Settings s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(currentSettings);
Map<String, String> s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(currentSettings);
assertEquals(1, s.size());
assertEquals("192.168.0.1,127.0.0.1", s.get("_ip"));
Settings.Builder builder = Settings.builder();
@ -125,7 +126,7 @@ public class ScopedSettingsTests extends ESTestCase {
settings.updateDynamicSettings(updates,
Settings.builder().put(currentSettings), builder, "node");
currentSettings = builder.build();
s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.get(currentSettings);
s = IndexMetaData.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getAsMap(currentSettings);
assertEquals(0, s.size());
assertEquals(1, dynamicSetting.get(currentSettings).intValue());
assertEquals(1, currentSettings.size());
@ -205,6 +206,66 @@ public class ScopedSettingsTests extends ESTestCase {
assertEquals(1, intResults.size());
}
public void testAddConsumerAffixMap() {
Setting.AffixSetting<Integer> intSetting = Setting.affixKeySetting("foo.", "bar",
(k) -> Setting.intSetting(k, 1, Property.Dynamic, Property.NodeScope));
Setting.AffixSetting<List<Integer>> listSetting = Setting.affixKeySetting("foo.", "list",
(k) -> Setting.listSetting(k, Arrays.asList("1"), Integer::parseInt, Property.Dynamic, Property.NodeScope));
AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY,new HashSet<>(Arrays.asList(intSetting, listSetting)));
Map<String, List<Integer>> listResults = new HashMap<>();
Map<String, Integer> intResults = new HashMap<>();
Consumer<Map<String,Integer>> intConsumer = (map) -> {
intResults.clear();
intResults.putAll(map);
};
Consumer<Map<String, List<Integer>>> listConsumer = (map) -> {
listResults.clear();
listResults.putAll(map);
};
boolean omitDefaults = randomBoolean();
service.addAffixMapUpdateConsumer(listSetting, listConsumer, (s, k) -> {}, omitDefaults);
service.addAffixMapUpdateConsumer(intSetting, intConsumer, (s, k) -> {}, omitDefaults);
assertEquals(0, listResults.size());
assertEquals(0, intResults.size());
service.applySettings(Settings.builder()
.put("foo.test.bar", 2)
.put("foo.test_1.bar", 7)
.putArray("foo.test_list.list", "16", "17")
.putArray("foo.test_list_1.list", "18", "19", "20")
.build());
assertEquals(2, intResults.get("test").intValue());
assertEquals(7, intResults.get("test_1").intValue());
assertEquals(Arrays.asList(16, 17), listResults.get("test_list"));
assertEquals(Arrays.asList(18, 19, 20), listResults.get("test_list_1"));
assertEquals(2, listResults.size());
assertEquals(2, intResults.size());
listResults.clear();
intResults.clear();
service.applySettings(Settings.builder()
.put("foo.test.bar", 2)
.put("foo.test_1.bar", 8)
.putArray("foo.test_list.list", "16", "17")
.putNull("foo.test_list_1.list")
.build());
assertNull("test wasn't changed", intResults.get("test"));
assertEquals(8, intResults.get("test_1").intValue());
assertNull("test_list wasn't changed", listResults.get("test_list"));
if (omitDefaults) {
assertNull(listResults.get("test_list_1"));
assertFalse(listResults.containsKey("test_list_1"));
assertEquals(0, listResults.size());
assertEquals(1, intResults.size());
} else {
assertEquals(Arrays.asList(1), listResults.get("test_list_1")); // reset to default
assertEquals(1, listResults.size());
assertEquals(1, intResults.size());
}
}
public void testApply() {
Setting<Integer> testSetting = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope);
Setting<Integer> testSetting2 = Setting.intSetting("foo.bar.baz", 1, Property.Dynamic, Property.NodeScope);
@ -347,10 +408,10 @@ public class ScopedSettingsTests extends ESTestCase {
public void testGet() {
ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
// group setting - complex matcher
// affix setting - complex matcher
Setting setting = settings.get("cluster.routing.allocation.require.value");
assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING);
assertEquals(setting,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSetting("cluster.routing.allocation.require.value"));
setting = settings.get("cluster.routing.allocation.total_shards_per_node");
assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING);

View File

@ -577,6 +577,25 @@ public class SettingTests extends ESTestCase {
assertFalse(listAffixSetting.match("foo"));
}
public void testAffixAsMap() {
Setting.AffixSetting<String> setting = Setting.prefixKeySetting("foo.bar.", key ->
Setting.simpleString(key, Property.NodeScope));
Settings build = Settings.builder().put("foo.bar.baz", 2).put("foo.bar.foobar", 3).build();
Map<String, String> asMap = setting.getAsMap(build);
assertEquals(2, asMap.size());
assertEquals("2", asMap.get("baz"));
assertEquals("3", asMap.get("foobar"));
setting = Setting.prefixKeySetting("foo.bar.", key ->
Setting.simpleString(key, Property.NodeScope));
build = Settings.builder().put("foo.bar.baz", 2).put("foo.bar.foobar", 3).put("foo.bar.baz.deep", 45).build();
asMap = setting.getAsMap(build);
assertEquals(3, asMap.size());
assertEquals("2", asMap.get("baz"));
assertEquals("3", asMap.get("foobar"));
assertEquals("45", asMap.get("baz.deep"));
}
public void testGetAllConcreteSettings() {
Setting.AffixSetting<List<String>> listAffixSetting = Setting.affixKeySetting("foo.", "bar",
(key) -> Setting.listSetting(key, Collections.emptyList(), Function.identity(), Property.NodeScope));

View File

@ -145,7 +145,7 @@ public class ZenFaultDetectionTests extends ESTestCase {
TransportService.NOOP_TRANSPORT_INTERCEPTOR,
(boundAddress) ->
new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), boundAddress.publishAddress(),
Node.NODE_ATTRIBUTES.get(settings).getAsMap(), DiscoveryNode.getRolesFromSettings(settings), version),
Node.NODE_ATTRIBUTES.getAsMap(settings), DiscoveryNode.getRolesFromSettings(settings), version),
null);
transportService.start();
transportService.acceptIncomingRequests();

View File

@ -124,7 +124,7 @@ public class NodeTests extends ESTestCase {
Settings.Builder settings = baseSettings().put(Node.NODE_ATTRIBUTES.getKey() + "test_attr", attr);
try (Node node = new MockNode(settings.build(), Collections.singleton(getTestTransportPlugin()))) {
final Settings nodeSettings = randomBoolean() ? node.settings() : node.getEnvironment().settings();
assertEquals(attr, Node.NODE_ATTRIBUTES.get(nodeSettings).getAsMap().get("test_attr"));
assertEquals(attr, Node.NODE_ATTRIBUTES.getAsMap(nodeSettings).get("test_attr"));
}
// leading whitespace not allowed

View File

@ -964,7 +964,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
.put("location", randomRepoPath())));
logger.info("--> creating index that cannot be allocated");
prepareCreate("test-idx", 2, Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + ".tag", "nowhere").put("index.number_of_shards", 3)).setWaitForActiveShards(ActiveShardCount.NONE).get();
prepareCreate("test-idx", 2, Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "tag", "nowhere").put("index.number_of_shards", 3)).setWaitForActiveShards(ActiveShardCount.NONE).get();
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();

View File

@ -115,7 +115,8 @@ interface AwsEc2Service {
* instances with a tag key set to stage, and a value of dev. Several tags set will require all of those tags to be set for the
* instance to be included.
*/
Setting<Settings> TAG_SETTING = Setting.groupSetting("discovery.ec2.tag.", Property.NodeScope);
Setting.AffixSetting<String> TAG_SETTING = Setting.prefixKeySetting("discovery.ec2.tag.",
key -> Setting.simpleString(key, Property.NodeScope));
AmazonEC2 client();
}

View File

@ -85,7 +85,7 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos
this.groups = new HashSet<>();
this.groups.addAll(AwsEc2Service.GROUPS_SETTING.get(settings));
this.tags = AwsEc2Service.TAG_SETTING.get(settings).getAsMap();
this.tags = AwsEc2Service.TAG_SETTING.getAsMap(settings);
this.availabilityZones = new HashSet<>();
availabilityZones.addAll(AwsEc2Service.AVAILABILITY_ZONES_SETTING.get(settings));

View File

@ -118,7 +118,7 @@ public final class MockTransportService extends TransportService {
return new MockTransportService(settings, transport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR,
boundAddress ->
new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), UUIDs.randomBase64UUID(), boundAddress.publishAddress(),
Node.NODE_ATTRIBUTES.get(settings).getAsMap(), DiscoveryNode.getRolesFromSettings(settings), version),
Node.NODE_ATTRIBUTES.getAsMap(settings), DiscoveryNode.getRolesFromSettings(settings), version),
clusterSettings);
}