Merge pull request #16172 from s1monw/convert_settings

Convert several node and test level settings
This commit is contained in:
Simon Willnauer 2016-01-22 16:09:32 +01:00
commit 37d7d6c23c
8 changed files with 23 additions and 24 deletions

View File

@ -129,7 +129,7 @@ class NodeInfo {
'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
]
args.add("-Des.tests.portsfile=true")
args.add("-Des.node.portsfile=true")
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) {

View File

@ -55,6 +55,7 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.node.Node;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
@ -218,6 +219,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING
)));
}

View File

@ -46,6 +46,7 @@ import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule;
@ -120,6 +121,7 @@ public class Node implements Releasable {
private static final String CLIENT_TYPE = "node";
public static final String HTTP_ENABLED = "http.enabled";
public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER);
private final Lifecycle lifecycle = new Lifecycle();
private final Injector injector;
private final Settings settings;
@ -275,7 +277,7 @@ public class Node implements Releasable {
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(TribeService.class).start();
if (System.getProperty("es.tests.portsfile", "false").equals("true")) {
if (WRITE_PORTS_FIELD_SETTING.get(settings)) {
if (settings.getAsBoolean("http.enabled", true)) {
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile("http", http.boundAddress());

View File

@ -21,7 +21,6 @@ package org.elasticsearch.node.internal;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal;
@ -109,7 +108,6 @@ public class InternalSettingsPreparer {
// we put back the path.logs so we can use it in the logging configuration file
output.put(Environment.PATH_LOGS_SETTING.getKey(), cleanPath(environment.logsFile().toAbsolutePath().toString()));
return new Environment(output.build());
}

View File

@ -122,8 +122,9 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
public class SearchService extends AbstractLifecycleComponent<SearchService> implements IndexEventListener {
public static final Setting<Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", Loading.LAZY.toString(), (s) -> Loading.parse(s, Loading.LAZY), false, Setting.Scope.INDEX);
public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);
public static final Setting<TimeValue> DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER);
@ -183,9 +184,8 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
this.fetchPhase = fetchPhase;
this.indicesQueryCache = indicesQueryCache;
TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1));
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis();
TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis();
Map<String, SearchParseElement> elementParsers = new HashMap<>();
elementParsers.putAll(dfsPhase.parseElements());

View File

@ -40,7 +40,7 @@ public class StressSearchServiceReaperIT extends ESIntegTestCase {
protected Settings nodeSettings(int nodeOrdinal) {
// very frequent checks
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(1)).build();
.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(1)).build();
}
// see issue #5165 - this test fails each time without the fix in pull #5170

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.recycler.Recycler.V;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.threadpool.ThreadPool;
import java.lang.reflect.Array;
@ -63,8 +62,10 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
@Inject
public MockPageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings, threadPool);
final long seed = settings.getAsLong(InternalTestCluster.SETTING_CLUSTER_NODE_SEED, 0L);
random = new Random(seed);
// we always initialize with 0 here since we really only wanna have some random bytes / ints / longs
// and given the fact that it's called concurrently it won't reproduces anyway the same order other than in a unittest
// for the latter 0 is just fine
random = new Random(0);
}
private <T> V<T> wrap(final V<T> v) {

View File

@ -146,11 +146,6 @@ public final class InternalTestCluster extends TestCluster {
private final ESLogger logger = Loggers.getLogger(getClass());
/**
* A node level setting that holds a per node random seed that is consistent across node restarts
*/
public static final String SETTING_CLUSTER_NODE_SEED = "test.cluster.node.seed";
/**
* The number of ports in the range used for this JVM
*/
@ -381,8 +376,7 @@ public final class InternalTestCluster extends TestCluster {
private Settings getRandomNodeSettings(long seed) {
Random random = new Random(seed);
Builder builder = Settings.settingsBuilder()
.put(SETTING_CLUSTER_NODE_SEED, seed);
Builder builder = Settings.settingsBuilder();
if (isLocalTransportConfigured() == false) {
builder.put(Transport.TRANSPORT_TCP_COMPRESS.getKey(), rarely(random));
}
@ -390,12 +384,12 @@ public final class InternalTestCluster extends TestCluster {
builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values()));
}
if (random.nextInt(10) == 0) { // 10% of the nodes have a very frequent check interval
builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueMillis(10 + random.nextInt(2000)));
builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(10 + random.nextInt(2000)));
} else if (random.nextInt(10) != 0) { // 90% of the time - 10% of the time we don't set anything
builder.put(SearchService.KEEPALIVE_INTERVAL_KEY, TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
builder.put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueSeconds(10 + random.nextInt(5 * 60)));
}
if (random.nextBoolean()) { // sometimes set a
builder.put(SearchService.DEFAULT_KEEPALIVE_KEY, TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
builder.put(SearchService.DEFAULT_KEEPALIVE_SETTING.getKey(), TimeValue.timeValueSeconds(100 + random.nextInt(5 * 60)));
}
if (random.nextInt(10) == 0) {