Merge pull request #16177 from jimferenczi/convert_indices_settings

Convert "indices.*" settings to new infra.
This commit is contained in:
Jim Ferenczi 2016-01-22 15:08:53 +01:00
commit ad7fec87b0
9 changed files with 49 additions and 38 deletions

View File

@ -42,8 +42,12 @@ import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
@ -168,5 +172,13 @@ public final class ClusterSettings extends AbstractScopedSettings {
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING)));
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT)));
}

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -70,9 +71,9 @@ import java.util.function.Function;
*/
public class HunspellService extends AbstractComponent {
public final static String HUNSPELL_LAZY_LOAD = "indices.analysis.hunspell.dictionary.lazy";
public final static String HUNSPELL_IGNORE_CASE = "indices.analysis.hunspell.dictionary.ignore_case";
private final static String OLD_HUNSPELL_LOCATION = "indices.analysis.hunspell.dictionary.location";
public final static Setting<Boolean> HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> HUNSPELL_IGNORE_CASE = Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, false, Setting.Scope.CLUSTER);
public final static Setting<Settings> HUNSPELL_DICTIONARY_OPTIONS = Setting.groupSetting("indices.analysis.hunspell.dictionary.", false, Setting.Scope.CLUSTER);
private final ConcurrentHashMap<String, Dictionary> dictionaries = new ConcurrentHashMap<>();
private final Map<String, Dictionary> knownDictionaries;
private final boolean defaultIgnoreCase;
@ -82,8 +83,8 @@ public class HunspellService extends AbstractComponent {
public HunspellService(final Settings settings, final Environment env, final Map<String, Dictionary> knownDictionaries) throws IOException {
super(settings);
this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
this.hunspellDir = resolveHunspellDirectory(settings, env);
this.defaultIgnoreCase = settings.getAsBoolean(HUNSPELL_IGNORE_CASE, false);
this.hunspellDir = resolveHunspellDirectory(env);
this.defaultIgnoreCase = HUNSPELL_IGNORE_CASE.get(settings);
this.loadingFunction = (locale) -> {
try {
return loadDictionary(locale, settings, env);
@ -91,7 +92,7 @@ public class HunspellService extends AbstractComponent {
throw new IllegalStateException("failed to load hunspell dictionary for locale: " + locale, e);
}
};
if (!settings.getAsBoolean(HUNSPELL_LAZY_LOAD, false)) {
if (!HUNSPELL_LAZY_LOAD.get(settings)) {
scanAndLoadDictionaries();
}
@ -110,11 +111,7 @@ public class HunspellService extends AbstractComponent {
return dictionary;
}
private Path resolveHunspellDirectory(Settings settings, Environment env) {
String location = settings.get(OLD_HUNSPELL_LOCATION, null);
if (location != null) {
throw new IllegalArgumentException("please, put your hunspell dictionaries under config/hunspell !");
}
private Path resolveHunspellDirectory(Environment env) {
return env.configFile().resolve("hunspell");
}
@ -162,7 +159,8 @@ public class HunspellService extends AbstractComponent {
}
// merging node settings with hunspell dictionary specific settings
nodeSettings = loadDictionarySettings(dicDir, nodeSettings.getByPrefix("indices.analysis.hunspell.dictionary." + locale + "."));
Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));
boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

View File

@ -40,6 +40,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.MemorySizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -80,10 +81,10 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
* since we are checking on the cluster state IndexMetaData always.
*/
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", true, true, Setting.Scope.INDEX);
public static final String INDICES_CACHE_REQUEST_CLEAN_INTERVAL = "indices.requests.cache.clean_interval";
public static final Setting<TimeValue> INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER);
public static final String INDICES_CACHE_QUERY_SIZE = "indices.requests.cache.size";
public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.requests.cache.expire";
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), false, Setting.Scope.CLUSTER);
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
@ -98,7 +99,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
//TODO make these changes configurable on the cluster level
private final String size;
private final ByteSizeValue size;
private final TimeValue expire;
private volatile Cache<Key, Value> cache;
@ -108,11 +109,11 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
super(settings);
this.clusterService = clusterService;
this.threadPool = threadPool;
this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));
this.cleanInterval = INDICES_CACHE_REQUEST_CLEAN_INTERVAL.get(settings);
this.size = settings.get(INDICES_CACHE_QUERY_SIZE, "1%");
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
buildCache();
this.reaper = new Reaper();
@ -121,7 +122,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
private void buildCache() {
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();
long sizeInBytes = size.bytes();
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@ -53,8 +54,8 @@ import java.util.function.ToLongBiFunction;
*/
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable> {
public static final String FIELDDATA_CLEAN_INTERVAL_SETTING = "indices.fielddata.cache.cleanup_interval";
public static final String INDICES_FIELDDATA_CACHE_SIZE_KEY = "indices.fielddata.cache.size";
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
@ -68,18 +69,16 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
super(settings);
this.threadPool = threadPool;
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1");
final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes();
final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).bytes();
CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.<Key, Accountable>builder()
.removalListener(this);
if (sizeInBytes > 0) {
cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
}
logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes));
cache = cacheBuilder.build();
this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1));
this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings);
// Start thread that will manage cleaning the field data cache periodically
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -68,7 +69,7 @@ import java.util.concurrent.atomic.AtomicInteger;
public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable {
// TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a seperate public service
public static final String INDICES_STORE_DELETE_SHARD_TIMEOUT = "indices.store.delete.shard.timeout";
public static final Setting<TimeValue> INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists";
private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED);
private final IndicesService indicesService;
@ -85,7 +86,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
this.clusterService = clusterService;
this.transportService = transportService;
transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest::new, ThreadPool.Names.SAME, new ShardActiveRequestHandler());
this.deleteShardTimeout = settings.getAsTime(INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS));
this.deleteShardTimeout = INDICES_STORE_DELETE_SHARD_TIMEOUT.get(settings);
clusterService.addLast(this);
}

View File

@ -40,8 +40,8 @@ public class HunspellServiceIT extends ESIntegTestCase {
public void testLocaleDirectoryWithNodeLevelConfig() throws Exception {
Settings settings = Settings.settingsBuilder()
.put("path.conf", getDataPath("/indices/analyze/conf_dir"))
.put(HUNSPELL_LAZY_LOAD, randomBoolean())
.put(HUNSPELL_IGNORE_CASE, true)
.put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean())
.put(HUNSPELL_IGNORE_CASE.getKey(), true)
.build();
internalCluster().startNode(settings);
@ -53,8 +53,8 @@ public class HunspellServiceIT extends ESIntegTestCase {
public void testLocaleDirectoryWithLocaleSpecificConfig() throws Exception {
Settings settings = Settings.settingsBuilder()
.put("path.conf", getDataPath("/indices/analyze/conf_dir"))
.put(HUNSPELL_LAZY_LOAD, randomBoolean())
.put(HUNSPELL_IGNORE_CASE, true)
.put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean())
.put(HUNSPELL_IGNORE_CASE.getKey(), true)
.put("indices.analysis.hunspell.dictionary.en_US.strict_affix_parsing", false)
.put("indices.analysis.hunspell.dictionary.en_US.ignore_case", false)
.build();
@ -75,7 +75,7 @@ public class HunspellServiceIT extends ESIntegTestCase {
public void testDicWithNoAff() throws Exception {
Settings settings = Settings.settingsBuilder()
.put("path.conf", getDataPath("/indices/analyze/no_aff_conf_dir"))
.put(HUNSPELL_LAZY_LOAD, randomBoolean())
.put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean())
.build();
Dictionary dictionary = null;
@ -93,7 +93,7 @@ public class HunspellServiceIT extends ESIntegTestCase {
public void testDicWithTwoAffs() throws Exception {
Settings settings = Settings.settingsBuilder()
.put("path.conf", getDataPath("/indices/analyze/two_aff_conf_dir"))
.put(HUNSPELL_LAZY_LOAD, randomBoolean())
.put(HUNSPELL_LAZY_LOAD.getKey(), randomBoolean())
.build();
Dictionary dictionary = null;

View File

@ -78,7 +78,7 @@ public class IndexStatsIT extends ESIntegTestCase {
protected Settings nodeSettings(int nodeOrdinal) {
//Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad
return Settings.settingsBuilder().put(super.nodeSettings(nodeOrdinal))
.put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL, "1ms")
.put(IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL.getKey(), "1ms")
.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), true)
.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), IndexModule.INDEX_QUERY_CACHE)
.build();

View File

@ -86,7 +86,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
// by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here
// which is between 1 and 2 sec can cause each of the shard deletion requests to timeout.
// to prevent this we are setting the timeout here to something highish ie. the default in practice
.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS))
.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(30, TimeUnit.SECONDS))
.build();
}

View File

@ -1685,7 +1685,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
.put("script.indexed", "on")
.put("script.inline", "on")
// wait short time for other active shards before actually deleting, default 30s not needed in tests
.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(1, TimeUnit.SECONDS));
.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT.getKey(), new TimeValue(1, TimeUnit.SECONDS));
return builder.build();
}