Merge branch 'master' into feature/ingest

This commit is contained in:
javanna 2016-01-25 18:01:09 +01:00 committed by Luca Cavanna
commit 36d98478bf
262 changed files with 1907 additions and 2555 deletions

View File

@ -112,9 +112,6 @@ public class PluginBuildPlugin extends BuildPlugin {
include 'config/**'
include 'bin/**'
}
from('src/site') {
include '_site/**'
}
}
project.assemble.dependsOn(bundle)

View File

@ -36,15 +36,9 @@ class PluginPropertiesExtension {
@Input
String description
@Input
boolean jvm = true
@Input
String classname
@Input
boolean site = false
@Input
boolean isolated = true

View File

@ -51,11 +51,11 @@ class PluginPropertiesTask extends Copy {
if (extension.description == null) {
throw new InvalidUserDataException('description is a required setting for esplugin')
}
if (extension.jvm && extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin with jvm=true')
if (extension.classname == null) {
throw new InvalidUserDataException('classname is a required setting for esplugin')
}
doFirst {
if (extension.jvm && extension.isolated == false) {
if (extension.isolated == false) {
String warning = "WARNING: Disabling plugin isolation in ${project.path} is deprecated and will be removed in the future"
logger.warn("${'=' * warning.length()}\n${warning}\n${'=' * warning.length()}")
}
@ -74,10 +74,8 @@ class PluginPropertiesTask extends Copy {
'version': extension.version,
'elasticsearchVersion': VersionProperties.elasticsearch,
'javaVersion': project.targetCompatibility as String,
'jvm': extension.jvm as String,
'site': extension.site as String,
'isolated': extension.isolated as String,
'classname': extension.jvm ? extension.classname : 'NA'
'classname': extension.classname
]
}
}

View File

@ -129,7 +129,7 @@ class NodeInfo {
'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
]
args.add("-Des.tests.portsfile=true")
args.add("-Des.node.portsfile=true")
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) {

View File

@ -2,26 +2,13 @@
# This file must exist as 'plugin-descriptor.properties' at
# the root directory of all plugins.
#
# A plugin can be 'site', 'jvm', or both.
#
### example site plugin for "foo":
#
# foo.zip <-- zip file for the plugin, with this structure:
# _site/ <-- the contents that will be served
# plugin-descriptor.properties <-- example contents below:
#
# site=true
# description=My cool plugin
# version=1.0
#
### example jvm plugin for "foo"
### example plugin for "foo"
#
# foo.zip <-- zip file for the plugin, with this structure:
# <arbitrary name1>.jar <-- classes, resources, dependencies
# <arbitrary nameN>.jar <-- any number of jars
# plugin-descriptor.properties <-- example contents below:
#
# jvm=true
# classname=foo.bar.BazPlugin
# description=My cool plugin
# version=2.0
@ -38,21 +25,6 @@ version=${version}
#
# 'name': the plugin name
name=${name}
### mandatory elements for site plugins:
#
# 'site': set to true to indicate contents of the _site/
# directory in the root of the plugin should be served.
site=${site}
#
### mandatory elements for jvm plugins :
#
# 'jvm': true if the 'classname' class should be loaded
# from jar files in the root directory of the plugin.
# Note that only jar files in the root directory are
# added to the classpath for the plugin! If you need
# other resources, package them into a resources jar.
jvm=${jvm}
#
# 'classname': the name of the class to load, fully-qualified.
classname=${classname}

View File

@ -241,26 +241,26 @@ final class Security {
*/
static void addFilePermissions(Permissions policy, Environment environment) {
// read-only dirs
addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, "path.home", environment.modulesFile(), "read,readlink");
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.binFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libFile(), "read,readlink");
addPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesFile(), "read,readlink");
addPath(policy, Environment.PATH_PLUGINS_SETTING.getKey(), environment.pluginsFile(), "read,readlink");
addPath(policy, Environment.PATH_CONF_SETTING.getKey(), environment.configFile(), "read,readlink");
addPath(policy, Environment.PATH_SCRIPTS_SETTING.getKey(), environment.scriptsFile(), "read,readlink");
// read-write dirs
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
addPath(policy, Environment.PATH_LOGS_SETTING.getKey(), environment.logsFile(), "read,readlink,write,delete");
if (environment.sharedDataFile() != null) {
addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
addPath(policy, Environment.PATH_SHARED_DATA_SETTING.getKey(), environment.sharedDataFile(), "read,readlink,write,delete");
}
for (Path path : environment.dataFiles()) {
addPath(policy, "path.data", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.dataWithClusterFiles()) {
addPath(policy, "path.data", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, "path.repo", path, "read,readlink,write,delete");
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}
if (environment.pidFile() != null) {
// we just need permission to remove the file if its elsewhere.

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -101,6 +102,11 @@ public class TransportClientNodesService extends AbstractComponent {
private volatile boolean closed;
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,
ThreadPool threadPool, Headers headers, Version version) {
@ -111,9 +117,9 @@ public class TransportClientNodesService extends AbstractComponent {
this.minCompatibilityVersion = version.minimumCompatibilityVersion();
this.headers = headers;
this.nodesSamplerInterval = this.settings.getAsTime("client.transport.nodes_sampler_interval", timeValueSeconds(5));
this.pingTimeout = this.settings.getAsTime("client.transport.ping_timeout", timeValueSeconds(5)).millis();
this.ignoreClusterName = this.settings.getAsBoolean("client.transport.ignore_cluster_name", false);
this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();
this.ignoreClusterName = CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME.get(this.settings);
if (logger.isDebugEnabled()) {
logger.debug("node_sampler_interval[" + nodesSamplerInterval + "]");

View File

@ -89,7 +89,7 @@ public class ShardStateAction extends AbstractComponent {
logger.warn("{} no master known for action [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
waitForNewMasterAndRetry(actionName, observer, shardRoutingEntry, listener);
} else {
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().getId(), actionName, masterNode.getId(), shardRoutingEntry);
logger.debug("{} sending [{}] to [{}] for shard [{}]", shardRoutingEntry.getShardRouting().shardId(), actionName, masterNode.getId(), shardRoutingEntry);
transportService.sendRequest(masterNode,
actionName, shardRoutingEntry, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) {
@Override
@ -144,7 +144,7 @@ public class ShardStateAction extends AbstractComponent {
@Override
public void onClusterServiceClose() {
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().getId(), actionName, shardRoutingEntry.getShardRouting());
logger.warn("{} node closed while execution action [{}] for shard [{}]", shardRoutingEntry.failure, shardRoutingEntry.getShardRouting().shardId(), actionName, shardRoutingEntry.getShardRouting());
listener.onFailure(new NodeClosedException(clusterService.localNode()));
}

View File

@ -20,11 +20,15 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.misc.IndexMergeTool;
import org.elasticsearch.Version;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
@ -32,6 +36,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.Collections;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.unmodifiableSet;
@ -48,11 +53,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
public class MetaDataIndexUpgradeService extends AbstractComponent {
private final MapperRegistry mapperRegistry;
private final IndexScopedSettings indexScopedSettigns;
@Inject
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry) {
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) {
super(settings);
this.mapperRegistry = mapperRegistry;
this.indexScopedSettigns = indexScopedSettings;
}
/**
@ -65,21 +72,25 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData) {
// Throws an exception if there are too-old segments:
if (isUpgraded(indexMetaData)) {
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
return indexMetaData;
}
checkSupportedVersion(indexMetaData);
IndexMetaData newMetaData = indexMetaData;
// we have to run this first otherwise in we try to create IndexSettings
// with broken settings and fail in checkMappingsCompatibility
newMetaData = archiveBrokenIndexSettings(newMetaData);
// only run the check with the upgraded settings!!
checkMappingsCompatibility(newMetaData);
newMetaData = markAsUpgraded(newMetaData);
return newMetaData;
return markAsUpgraded(newMetaData);
}
/**
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
*/
private boolean isUpgraded(IndexMetaData indexMetaData) {
return indexMetaData.getUpgradedVersion().onOrAfter(Version.V_3_0_0);
boolean isUpgraded(IndexMetaData indexMetaData) {
return indexMetaData.getUpgradedVersion().onOrAfter(Version.CURRENT);
}
/**
@ -171,4 +182,39 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
}
}
private static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
Settings settings = indexMetaData.getSettings();
Settings.Builder builder = Settings.builder();
boolean changed = false;
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
try {
Setting<?> setting = indexScopedSettigns.get(entry.getKey());
if (setting != null) {
setting.get(settings);
builder.put(entry.getKey(), entry.getValue());
} else {
if (indexScopedSettigns.isPrivateSetting(entry.getKey()) || entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX)) {
builder.put(entry.getKey(), entry.getValue());
} else {
changed = true;
logger.warn("[{}] found unknown index setting: {} value: {} - archiving", indexMetaData.getIndex(), entry.getKey(), entry.getValue());
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
// but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them.
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
}
}
} catch (IllegalArgumentException ex) {
changed = true;
logger.warn("[{}] found invalid index setting: {} value: {} - archiving",ex, indexMetaData.getIndex(), entry.getKey(), entry.getValue());
// we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
// but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them.
builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
}
}
return changed ? IndexMetaData.builder(indexMetaData).settings(builder.build()).build() : indexMetaData;
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.common;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.lang.reflect.Method;
@ -40,7 +41,7 @@ import java.util.concurrent.ThreadLocalRandom;
* setting a reproducible seed. When running the Elasticsearch server
* process, non-reproducible sources of randomness are provided (unless
* a setting is provided for a module that exposes a seed setting (e.g.,
* DiscoveryService#SETTING_DISCOVERY_SEED)).
* DiscoveryService#DISCOVERY_SEED_SETTING)).
*/
public final class Randomness {
private static final Method currentMethod;
@ -68,13 +69,12 @@ public final class Randomness {
* seed in the settings with the key setting.
*
* @param settings the settings containing the seed
* @param setting the key to access the seed
* @param setting the setting to access the seed
* @return a reproducible source of randomness
*/
public static Random get(Settings settings, String setting) {
Long maybeSeed = settings.getAsLong(setting, null);
if (maybeSeed != null) {
return new Random(maybeSeed);
public static Random get(Settings settings, Setting<Long> setting) {
if (setting.exists(settings)) {
return new Random(setting.get(settings));
} else {
return get();
}

View File

@ -117,12 +117,6 @@ public class Queries {
if (minimumShouldMatch == null) {
return query;
}
// Queries with a single word expanded with synonyms
// have their coordination factor disabled (@see org.apache.lucene.util.QueryBuilder#analyzeBoolean()).
// minimumShouldMatch should not be applicable in such case.
if (query.isCoordDisabled()) {
return query;
}
int optionalClauses = 0;
for (BooleanClause c : query.clauses()) {
if (c.getOccur() == BooleanClause.Occur.SHOULD) {

View File

@ -28,7 +28,9 @@ import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
@ -154,7 +156,7 @@ public class NetworkModule extends AbstractModule {
public static final String NETTY_TRANSPORT = "netty";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String HTTP_ENABLED = "http.enabled";
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
RestMainAction.class,
@ -378,7 +380,7 @@ public class NetworkModule extends AbstractModule {
bind(TransportProxyClient.class).asEagerSingleton();
bind(TransportClientNodesService.class).asEagerSingleton();
} else {
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
if (HTTP_ENABLED.get(settings)) {
bind(HttpServer.class).asEagerSingleton();
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
}

View File

@ -19,7 +19,9 @@
package org.elasticsearch.common.network;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@ -41,31 +43,30 @@ public class NetworkService extends AbstractComponent {
/** By default, we bind to loopback interfaces */
public static final String DEFAULT_NETWORK_HOST = "_local_";
private static final String GLOBAL_NETWORK_HOST_SETTING = "network.host";
private static final String GLOBAL_NETWORK_BINDHOST_SETTING = "network.bind_host";
private static final String GLOBAL_NETWORK_PUBLISHHOST_SETTING = "network.publish_host";
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST),
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final class TcpSettings {
public static final String TCP_NO_DELAY = "network.tcp.no_delay";
public static final String TCP_KEEP_ALIVE = "network.tcp.keep_alive";
public static final String TCP_REUSE_ADDRESS = "network.tcp.reuse_address";
public static final String TCP_SEND_BUFFER_SIZE = "network.tcp.send_buffer_size";
public static final String TCP_RECEIVE_BUFFER_SIZE = "network.tcp.receive_buffer_size";
public static final String TCP_BLOCKING = "network.tcp.blocking";
public static final String TCP_BLOCKING_SERVER = "network.tcp.blocking_server";
public static final String TCP_BLOCKING_CLIENT = "network.tcp.blocking_client";
public static final String TCP_CONNECT_TIMEOUT = "network.tcp.connect_timeout";
public static final ByteSizeValue TCP_DEFAULT_SEND_BUFFER_SIZE = null;
public static final ByteSizeValue TCP_DEFAULT_RECEIVE_BUFFER_SIZE = null;
public static final TimeValue TCP_DEFAULT_CONNECT_TIMEOUT = new TimeValue(30, TimeUnit.SECONDS);
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
}
/**
* A custom name resolver can support custom lookup keys (my_net_key:ipv4) and also change
* the default inet address used in case no settings is provided.
*/
public static interface CustomNameResolver {
public interface CustomNameResolver {
/**
* Resolves the default value if possible. If not, return <tt>null</tt>.
*/
@ -94,6 +95,7 @@ public class NetworkService extends AbstractComponent {
/**
* Resolves {@code bindHosts} to a list of internet addresses. The list will
* not contain duplicate addresses.
*
* @param bindHosts list of hosts to bind to. this may contain special pseudo-hostnames
* such as _local_ (see the documentation). if it is null, it will be populated
* based on global default settings.
@ -102,21 +104,22 @@ public class NetworkService extends AbstractComponent {
public InetAddress[] resolveBindHostAddresses(String bindHosts[]) throws IOException {
// first check settings
if (bindHosts == null) {
bindHosts = settings.getAsArray(GLOBAL_NETWORK_BINDHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
}
// next check any registered custom resolvers
if (bindHosts == null) {
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses;
if (GLOBAL_NETWORK_BINDHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
} else {
// next check any registered custom resolvers
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses;
}
}
// we know it's not here. get the defaults
bindHosts = GLOBAL_NETWORK_BINDHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
}
}
// finally, fill with our default
if (bindHosts == null) {
bindHosts = new String[] { DEFAULT_NETWORK_HOST };
}
InetAddress addresses[] = resolveInetAddresses(bindHosts);
// try to deal with some (mis)configuration
@ -138,6 +141,7 @@ public class NetworkService extends AbstractComponent {
* only one address is just a current limitation.
* <p>
* If {@code publishHosts} resolves to more than one address, <b>then one is selected with magic</b>
*
* @param publishHosts list of hosts to publish as. this may contain special pseudo-hostnames
* such as _local_ (see the documentation). if it is null, it will be populated
* based on global default settings.
@ -145,23 +149,23 @@ public class NetworkService extends AbstractComponent {
*/
// TODO: needs to be InetAddress[]
public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException {
// first check settings
if (publishHosts == null) {
publishHosts = settings.getAsArray(GLOBAL_NETWORK_PUBLISHHOST_SETTING, settings.getAsArray(GLOBAL_NETWORK_HOST_SETTING, null));
}
// next check any registered custom resolvers
if (publishHosts == null) {
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses[0];
if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
} else {
// next check any registered custom resolvers
for (CustomNameResolver customNameResolver : customNameResolvers) {
InetAddress addresses[] = customNameResolver.resolveDefault();
if (addresses != null) {
return addresses[0];
}
}
// we know it's not here. get the defaults
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
}
}
// finally, fill with our default
if (publishHosts == null) {
publishHosts = new String[] { DEFAULT_NETWORK_HOST };
}
InetAddress addresses[] = resolveInetAddresses(publishHosts);
// TODO: allow publishing multiple addresses
// for now... the hack begins
@ -184,17 +188,17 @@ public class NetworkService extends AbstractComponent {
throw new IllegalArgumentException("publish address: {" + NetworkAddress.format(address) + "} is wildcard, but multiple addresses specified: this makes no sense");
}
}
// 3. if we end out with multiple publish addresses, select by preference.
// don't warn the user, or they will get confused by bind_host vs publish_host etc.
if (addresses.length > 1) {
List<InetAddress> sorted = new ArrayList<>(Arrays.asList(addresses));
NetworkUtils.sortAddresses(sorted);
addresses = new InetAddress[] { sorted.get(0) };
addresses = new InetAddress[]{sorted.get(0)};
}
return addresses[0];
}
/** resolves (and deduplicates) host specification */
private InetAddress[] resolveInetAddresses(String hosts[]) throws IOException {
if (hosts.length == 0) {

View File

@ -20,6 +20,7 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData;
@ -35,16 +36,32 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.discovery.zen.fd.FaultDetection;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.node.Node;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.repositories.uri.URLRepository;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
@ -99,6 +116,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
@ -111,6 +131,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
@ -140,6 +163,19 @@ public final class ClusterSettings extends AbstractScopedSettings {
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS,
NettyHttpServerTransport.SETTING_CORS_ENABLED,
NettyHttpServerTransport.SETTING_CORS_MAX_AGE,
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
NettyHttpServerTransport.SETTING_PIPELINING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
@ -157,8 +193,65 @@ public final class ClusterSettings extends AbstractScopedSettings {
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_PROFILES_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
Node.NODE_INGEST_SETTING)));
}

View File

@ -152,4 +152,17 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
public IndexScopedSettings copy(Settings settings, IndexMetaData metaData) {
return new IndexScopedSettings(settings, this, metaData);
}
public boolean isPrivateSetting(String key) {
switch (key) {
case IndexMetaData.SETTING_CREATION_DATE:
case IndexMetaData.SETTING_INDEX_UUID:
case IndexMetaData.SETTING_VERSION_CREATED:
case IndexMetaData.SETTING_VERSION_UPGRADED:
case MergePolicyConfig.INDEX_MERGE_ENABLED:
return true;
default:
return false;
}
}
}

View File

@ -41,6 +41,7 @@ import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
@ -70,8 +71,21 @@ public class Setting<T> extends ToXContentToBytes {
this.scope = scope;
}
/**
* Creates a new Setting instance
* @param key the settings key for this setting.
* @param fallBackSetting a setting to fall back to if the current setting is not set.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
*/
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, boolean dynamic, Scope scope) {
this(key, fallBackSetting::getRaw, parser, dynamic, scope);
}
/**
* Returns the settings key or a prefix if this setting is a group setting
*
* @see #isGroupSetting()
*/
public final String getKey() {
@ -106,13 +120,21 @@ public class Setting<T> extends ToXContentToBytes {
}
/**
* Returns the default values string representation for this setting.
* Returns the default value string representation for this setting.
* @param settings a settings object for settings that has a default value depending on another setting if available
*/
public final String getDefault(Settings settings) {
public final String getDefaultRaw(Settings settings) {
return defaultValue.apply(settings);
}
/**
* Returns the default value for this setting.
* @param settings a settings object for settings that has a default value depending on another setting if available
*/
public final T getDefault(Settings settings) {
return parser.apply(getDefaultRaw(settings));
}
/**
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
*/
@ -309,6 +331,10 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
}
public static Setting<String> simpleString(String key, boolean dynamic, Scope scope) {
return new Setting<>(key, "", Function.identity(), dynamic, scope);
}
public static int parseInt(String s, int minValue, String key) {
int value = Integer.parseInt(s);
if (value < minValue) {
@ -333,6 +359,10 @@ public class Setting<T> extends ToXContentToBytes {
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
}
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, boolean dynamic, Scope scope) {
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
}
@ -348,25 +378,15 @@ public class Setting<T> extends ToXContentToBytes {
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
Function<String, List<T>> parser = (s) -> {
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){
XContentParser.Token token = xContentParser.nextToken();
if (token != XContentParser.Token.START_ARRAY) {
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
}
ArrayList<T> list = new ArrayList<>();
while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.VALUE_STRING) {
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
}
list.add(singleValueParser.apply(xContentParser.text()));
}
return list;
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse array", e);
}
};
Function<String, List<T>> parser = (s) ->
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
@Override
@ -387,6 +407,26 @@ public class Setting<T> extends ToXContentToBytes {
};
}
private static List<String> parseableStringToList(String parsableString) {
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(parsableString)) {
XContentParser.Token token = xContentParser.nextToken();
if (token != XContentParser.Token.START_ARRAY) {
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
}
ArrayList<String> list = new ArrayList<>();
while ((token = xContentParser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.VALUE_STRING) {
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
}
list.add(xContentParser.text());
}
return list;
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse array", e);
}
}
private static String arrayToParsableString(String[] array) {
try {
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.unit;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
@ -32,7 +33,7 @@ import java.io.IOException;
* the earth ellipsoid defined in {@link GeoUtils}. The default unit used within
* this project is <code>METERS</code> which is defined by <code>DEFAULT</code>
*/
public enum DistanceUnit {
public enum DistanceUnit implements Writeable<DistanceUnit> {
INCH(0.0254, "in", "inch"),
YARD(0.9144, "yd", "yards"),
FEET(0.3048, "ft", "feet"),
@ -322,4 +323,24 @@ public enum DistanceUnit {
return new Distance(Double.parseDouble(distance), defaultUnit);
}
}
private static final DistanceUnit PROTOTYPE = DEFAULT;
@Override
public DistanceUnit readFrom(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown DistanceUnit ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
public static DistanceUnit readUnitFrom(StreamInput in) throws IOException {
return PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.ordinal());
}
}

View File

@ -1,383 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.shard.ShardStateMetaData;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.FileStore;
import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
*/
public class MultiDataPathUpgrader {
private final NodeEnvironment nodeEnvironment;
private final ESLogger logger = Loggers.getLogger(getClass());
/**
* Creates a new upgrader instance
* @param nodeEnvironment the node env to operate on.
*
*/
public MultiDataPathUpgrader(NodeEnvironment nodeEnvironment) {
this.nodeEnvironment = nodeEnvironment;
}
/**
* Upgrades the given shard Id from multiple shard paths into the given target path.
*
* @see #pickShardPath(org.elasticsearch.index.shard.ShardId)
*/
public void upgrade(ShardId shard, ShardPath targetPath) throws IOException {
final Path[] paths = nodeEnvironment.availableShardPaths(shard); // custom data path doesn't need upgrading
if (isTargetPathConfigured(paths, targetPath) == false) {
throw new IllegalArgumentException("shard path must be one of the shards data paths");
}
assert needsUpgrading(shard) : "Should not upgrade a path that needs no upgrading";
logger.info("{} upgrading multi data dir to {}", shard, targetPath.getDataPath());
final ShardStateMetaData loaded = ShardStateMetaData.FORMAT.loadLatestState(logger, paths);
if (loaded == null) {
throw new IllegalStateException(shard + " no shard state found in any of: " + Arrays.toString(paths) + " please check and remove them if possible");
}
logger.info("{} loaded shard state {}", shard, loaded);
ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath());
Files.createDirectories(targetPath.resolveIndex());
try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) {
try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths);
} catch (LockObtainFailedException ex) {
throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex);
}
}
upgradeFiles(shard, targetPath, targetPath.resolveTranslog(), ShardPath.TRANSLOG_FOLDER_NAME, paths);
logger.info("{} wipe upgraded directories", shard);
for (Path path : paths) {
if (path.equals(targetPath.getShardStatePath()) == false) {
logger.info("{} wipe shard directories: [{}]", shard, path);
IOUtils.rm(path);
}
}
if (FileSystemUtils.files(targetPath.resolveIndex()).length == 0) {
throw new IllegalStateException("index folder [" + targetPath.resolveIndex() + "] is empty");
}
if (FileSystemUtils.files(targetPath.resolveTranslog()).length == 0) {
throw new IllegalStateException("translog folder [" + targetPath.resolveTranslog() + "] is empty");
}
}
/**
* Runs check-index on the target shard and throws an exception if it failed
*/
public void checkIndex(ShardPath targetPath) throws IOException {
BytesStreamOutput os = new BytesStreamOutput();
PrintStream out = new PrintStream(os, false, StandardCharsets.UTF_8.name());
try (Directory directory = new SimpleFSDirectory(targetPath.resolveIndex());
final CheckIndex checkIndex = new CheckIndex(directory)) {
checkIndex.setInfoStream(out);
CheckIndex.Status status = checkIndex.checkIndex();
out.flush();
if (!status.clean) {
logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), StandardCharsets.UTF_8));
throw new IllegalStateException("index check failure");
}
}
}
/**
* Returns true iff the given shard needs upgrading.
*/
public boolean needsUpgrading(ShardId shard) {
final Path[] paths = nodeEnvironment.availableShardPaths(shard);
// custom data path doesn't need upgrading neither single path envs
if (paths.length > 1) {
int numPathsExist = 0;
for (Path path : paths) {
if (Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) {
numPathsExist++;
if (numPathsExist > 1) {
return true;
}
}
}
}
return false;
}
/**
* Picks a target ShardPath to allocate and upgrade the given shard to. It picks the target based on a simple
* heuristic:
* <ul>
* <li>if the smallest datapath has 2x more space available that the shards total size the datapath with the most bytes for that shard is picked to minimize the amount of bytes to copy</li>
* <li>otherwise the largest available datapath is used as the target no matter how big of a slice of the shard it already holds.</li>
* </ul>
*/
public ShardPath pickShardPath(ShardId shard) throws IOException {
if (needsUpgrading(shard) == false) {
throw new IllegalStateException("Shard doesn't need upgrading");
}
final NodeEnvironment.NodePath[] paths = nodeEnvironment.nodePaths();
// if we need upgrading make sure we have all paths.
for (NodeEnvironment.NodePath path : paths) {
Files.createDirectories(path.resolve(shard));
}
final ShardFileInfo[] shardFileInfo = getShardFileInfo(shard, paths);
long totalBytesUsedByShard = 0;
long leastUsableSpace = Long.MAX_VALUE;
long mostUsableSpace = Long.MIN_VALUE;
assert shardFileInfo.length == nodeEnvironment.availableShardPaths(shard).length;
for (ShardFileInfo info : shardFileInfo) {
totalBytesUsedByShard += info.spaceUsedByShard;
leastUsableSpace = Math.min(leastUsableSpace, info.usableSpace + info.spaceUsedByShard);
mostUsableSpace = Math.max(mostUsableSpace, info.usableSpace + info.spaceUsedByShard);
}
if (mostUsableSpace < totalBytesUsedByShard) {
throw new IllegalStateException("Can't upgrade path available space: " + new ByteSizeValue(mostUsableSpace) + " required space: " + new ByteSizeValue(totalBytesUsedByShard));
}
ShardFileInfo target = shardFileInfo[0];
if (leastUsableSpace >= (2 * totalBytesUsedByShard)) {
for (ShardFileInfo info : shardFileInfo) {
if (info.spaceUsedByShard > target.spaceUsedByShard) {
target = info;
}
}
} else {
for (ShardFileInfo info : shardFileInfo) {
if (info.usableSpace > target.usableSpace) {
target = info;
}
}
}
return new ShardPath(false, target.path, target.path, IndexMetaData.INDEX_UUID_NA_VALUE /* we don't know */, shard);
}
private ShardFileInfo[] getShardFileInfo(ShardId shard, NodeEnvironment.NodePath[] paths) throws IOException {
final ShardFileInfo[] info = new ShardFileInfo[paths.length];
for (int i = 0; i < info.length; i++) {
Path path = paths[i].resolve(shard);
final long usabelSpace = getUsabelSpace(paths[i]);
info[i] = new ShardFileInfo(path, usabelSpace, getSpaceUsedByShard(path));
}
return info;
}
protected long getSpaceUsedByShard(Path path) throws IOException {
final long[] spaceUsedByShard = new long[] {0};
if (Files.exists(path)) {
Files.walkFileTree(path, new FileVisitor<Path>() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
spaceUsedByShard[0] += attrs.size();
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
return FileVisitResult.CONTINUE;
}
});
}
return spaceUsedByShard[0];
}
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
FileStore fileStore = path.fileStore;
return fileStore.getUsableSpace();
}
static class ShardFileInfo {
final Path path;
final long usableSpace;
final long spaceUsedByShard;
ShardFileInfo(Path path, long usableSpace, long spaceUsedByShard) {
this.path = path;
this.usableSpace = usableSpace;
this.spaceUsedByShard = spaceUsedByShard;
}
}
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException {
List<Path> movedFiles = new ArrayList<>();
for (Path path : paths) {
if (path.equals(targetPath.getDataPath()) == false) {
final Path sourceDir = path.resolve(folderName);
if (Files.exists(sourceDir)) {
logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) {
Files.createDirectories(targetDir);
for (Path file : stream) {
if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) {
continue; // skip write.lock
}
logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file));
final Path targetFile = targetDir.resolve(file.getFileName());
/* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that
in the worst case the file exists twice but is never lost or half written.*/
final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString());
Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING);
Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off
Files.delete(file);
movedFiles.add(targetFile);
}
}
}
}
}
if (movedFiles.isEmpty() == false) {
// fsync later it might be on disk already
logger.info("{} fsync files", shard);
for (Path moved : movedFiles) {
logger.info("{} syncing [{}]", shard, moved.getFileName());
IOUtils.fsync(moved, false);
}
logger.info("{} syncing directory [{}]", shard, targetDir);
IOUtils.fsync(targetDir, true);
}
}
/**
* Returns <code>true</code> iff the target path is one of the given paths.
*/
private boolean isTargetPathConfigured(final Path[] paths, ShardPath targetPath) {
for (Path path : paths) {
if (path.equals(targetPath.getDataPath())) {
return true;
}
}
return false;
}
/**
* Runs an upgrade on all shards located under the given node environment if there is more than 1 data.path configured
* otherwise this method will return immediately.
*/
public static void upgradeMultiDataPath(NodeEnvironment nodeEnv, ESLogger logger) throws IOException {
if (nodeEnv.nodeDataPaths().length > 1) {
final MultiDataPathUpgrader upgrader = new MultiDataPathUpgrader(nodeEnv);
final Set<String> allIndices = nodeEnv.findAllIndices();
for (String index : allIndices) {
for (ShardId shardId : findAllShardIds(nodeEnv.indexPaths(new Index(index)))) {
try (ShardLock lock = nodeEnv.shardLock(shardId, 0)) {
if (upgrader.needsUpgrading(shardId)) {
final ShardPath shardPath = upgrader.pickShardPath(shardId);
upgrader.upgrade(shardId, shardPath);
// we have to check if the index path exists since we might
// have only upgraded the shard state that is written under /indexname/shardid/_state
// in the case we upgraded a dedicated index directory index
if (Files.exists(shardPath.resolveIndex())) {
upgrader.checkIndex(shardPath);
}
} else {
logger.debug("{} no upgrade needed - already upgraded");
}
}
}
}
}
}
private static Set<ShardId> findAllShardIds(Path... locations) throws IOException {
final Set<ShardId> shardIds = new HashSet<>();
for (final Path location : locations) {
if (Files.isDirectory(location)) {
shardIds.addAll(findAllShardsForIndex(location));
}
}
return shardIds;
}
private static Set<ShardId> findAllShardsForIndex(Path indexPath) throws IOException {
Set<ShardId> shardIds = new HashSet<>();
if (Files.isDirectory(indexPath)) {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
String currentIndex = indexPath.getFileName().toString();
for (Path shardPath : stream) {
String fileName = shardPath.getFileName().toString();
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
int shardId = Integer.parseInt(fileName);
ShardId id = new ShardId(currentIndex, shardId);
shardIds.add(id);
}
}
}
}
return shardIds;
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.discovery;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery;
@ -36,14 +37,17 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
* A module for loading classes for node discovery.
*/
public class DiscoveryModule extends AbstractModule {
public static final String DISCOVERY_TYPE_KEY = "discovery.type";
public static final String ZEN_MASTER_SERVICE_TYPE_KEY = "discovery.zen.masterservice.type";
public static final Setting<String> DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type",
settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type",
"zen", Function.identity(), false, Setting.Scope.CLUSTER);
private final Settings settings;
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = new ArrayList<>();
@ -93,15 +97,14 @@ public class DiscoveryModule extends AbstractModule {
@Override
protected void configure() {
String defaultType = DiscoveryNode.localNode(settings) ? "local" : "zen";
String discoveryType = settings.get(DISCOVERY_TYPE_KEY, defaultType);
String discoveryType = DISCOVERY_TYPE_SETTING.get(settings);
Class<? extends Discovery> discoveryClass = discoveryTypes.get(discoveryType);
if (discoveryClass == null) {
throw new IllegalArgumentException("Unknown Discovery type [" + discoveryType + "]");
}
if (discoveryType.equals("local") == false) {
String masterServiceTypeKey = settings.get(ZEN_MASTER_SERVICE_TYPE_KEY, "zen");
String masterServiceTypeKey = ZEN_MASTER_SERVICE_TYPE_SETTING.get(settings);
final Class<? extends ElectMasterService> masterService = masterServiceType.get(masterServiceTypeKey);
if (masterService == null) {
throw new IllegalArgumentException("Unknown master service type [" + masterServiceTypeKey + "]");
@ -121,4 +124,4 @@ public class DiscoveryModule extends AbstractModule {
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
bind(DiscoveryService.class).asEagerSingleton();
}
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -39,8 +40,8 @@ import java.util.concurrent.TimeUnit;
*/
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0l, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private static class InitialStateListener implements InitialStateDiscoveryListener {
@ -71,7 +72,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
super(settings);
this.discoverySettings = discoverySettings;
this.discovery = discovery;
this.initialStateTimeout = settings.getAsTime(SETTING_INITIAL_STATE_TIMEOUT, TimeValue.timeValueSeconds(30));
this.initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings);
}
public ClusterBlock getNoMasterBlock() {
@ -132,7 +133,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
}
public static String generateNodeId(Settings settings) {
Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED);
Random random = Randomness.get(settings, DiscoveryService.DISCOVERY_SEED_SETTING);
return Strings.randomBase64UUID(random);
}
}

View File

@ -90,15 +90,17 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout";
public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout";
public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts";
public final static String SETTING_JOIN_RETRY_DELAY = "discovery.zen.join_retry_delay";
public final static String SETTING_MAX_PINGS_FROM_ANOTHER_MASTER = "discovery.zen.max_pings_from_another_master";
public final static String SETTING_SEND_LEAVE_REQUEST = "discovery.zen.send_leave_request";
public final static String SETTING_MASTER_ELECTION_FILTER_CLIENT = "discovery.zen.master_election.filter_client";
public final static String SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT = "discovery.zen.master_election.wait_for_joins_timeout";
public final static String SETTING_MASTER_ELECTION_FILTER_DATA = "discovery.zen.master_election.filter_data";
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
@ -164,26 +166,19 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.discoverySettings = discoverySettings;
this.pingService = pingService;
this.electMaster = electMasterService;
this.pingTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(3));
this.pingTimeout = PING_TIMEOUT_SETTING.get(settings);
this.joinTimeout = settings.getAsTime(SETTING_JOIN_TIMEOUT, TimeValue.timeValueMillis(this.pingTimeout.millis() * 20));
this.joinRetryAttempts = settings.getAsInt(SETTING_JOIN_RETRY_ATTEMPTS, 3);
this.joinRetryDelay = settings.getAsTime(SETTING_JOIN_RETRY_DELAY, TimeValue.timeValueMillis(100));
this.maxPingsFromAnotherMaster = settings.getAsInt(SETTING_MAX_PINGS_FROM_ANOTHER_MASTER, 3);
this.sendLeaveRequest = settings.getAsBoolean(SETTING_SEND_LEAVE_REQUEST, true);
this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings);
this.joinRetryAttempts = JOIN_RETRY_ATTEMPTS_SETTING.get(settings);
this.joinRetryDelay = JOIN_RETRY_DELAY_SETTING.get(settings);
this.maxPingsFromAnotherMaster = MAX_PINGS_FROM_ANOTHER_MASTER_SETTING.get(settings);
this.sendLeaveRequest = SEND_LEAVE_REQUEST_SETTING.get(settings);
this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true);
this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false);
this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2));
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
if (this.joinRetryAttempts < 1) {
throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]");
}
if (this.maxPingsFromAnotherMaster < 1) {
throw new IllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]");
}
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> {

View File

@ -21,6 +21,8 @@ package org.elasticsearch.discovery.zen.fd;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.threadpool.ThreadPool;
@ -35,11 +37,11 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
*/
public abstract class FaultDetection extends AbstractComponent {
public static final String SETTING_CONNECT_ON_NETWORK_DISCONNECT = "discovery.zen.fd.connect_on_network_disconnect";
public static final String SETTING_PING_INTERVAL = "discovery.zen.fd.ping_interval";
public static final String SETTING_PING_TIMEOUT = "discovery.zen.fd.ping_timeout";
public static final String SETTING_PING_RETRIES = "discovery.zen.fd.ping_retries";
public static final String SETTING_REGISTER_CONNECTION_LISTENER = "discovery.zen.fd.register_connection_listener";
public static final Setting<Boolean> CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, false, Scope.CLUSTER);
public static final Setting<TimeValue> PING_INTERVAL_SETTING = Setting.positiveTimeSetting("discovery.zen.fd.ping_interval", timeValueSeconds(1), false, Scope.CLUSTER);
public static final Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.fd.ping_timeout", timeValueSeconds(30), false, Scope.CLUSTER);
public static final Setting<Integer> PING_RETRIES_SETTING = Setting.intSetting("discovery.zen.fd.ping_retries", 3, false, Scope.CLUSTER);
public static final Setting<Boolean> REGISTER_CONNECTION_LISTENER_SETTING = Setting.boolSetting("discovery.zen.fd.register_connection_listener", true, false, Scope.CLUSTER);
protected final ThreadPool threadPool;
protected final ClusterName clusterName;
@ -60,11 +62,11 @@ public abstract class FaultDetection extends AbstractComponent {
this.transportService = transportService;
this.clusterName = clusterName;
this.connectOnNetworkDisconnect = settings.getAsBoolean(SETTING_CONNECT_ON_NETWORK_DISCONNECT, false);
this.pingInterval = settings.getAsTime(SETTING_PING_INTERVAL, timeValueSeconds(1));
this.pingRetryTimeout = settings.getAsTime(SETTING_PING_TIMEOUT, timeValueSeconds(30));
this.pingRetryCount = settings.getAsInt(SETTING_PING_RETRIES, 3);
this.registerConnectionListener = settings.getAsBoolean(SETTING_REGISTER_CONNECTION_LISTENER, true);
this.connectOnNetworkDisconnect = CONNECT_ON_NETWORK_DISCONNECT_SETTING.get(settings);
this.pingInterval = PING_INTERVAL_SETTING.get(settings);
this.pingRetryTimeout = PING_TIMEOUT_SETTING.get(settings);
this.pingRetryCount = PING_RETRIES_SETTING.get(settings);
this.registerConnectionListener = REGISTER_CONNECTION_LISTENER_SETTING.get(settings);
this.connectionListener = new FDConnectionListener();
if (registerConnectionListener) {

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -58,6 +59,7 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@ -72,6 +74,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
@ -83,7 +86,8 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
public static final Setting<List<String>> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = Setting.intSetting("discovery.zen.ping.unicast.concurrent_connects", 10, 0, false, Setting.Scope.CLUSTER);
// these limits are per-address
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
@ -135,13 +139,8 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
}
}
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
// trim the hosts
for (int i = 0; i < hostArr.length; i++) {
hostArr[i] = hostArr[i].trim();
}
List<String> hosts = CollectionUtils.arrayAsArrayList(hostArr);
this.concurrentConnects = DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING.get(settings);
List<String> hosts = DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.get(settings);
final int limitPortCounts;
if (hosts.isEmpty()) {
// if unicast hosts are not specified, fill with simple defaults on the local machine

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.Constants;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.io.IOException;
@ -33,6 +34,9 @@ import java.nio.file.FileStore;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
import static org.elasticsearch.common.Strings.cleanPath;
@ -43,6 +47,15 @@ import static org.elasticsearch.common.Strings.cleanPath;
// TODO: move PathUtils to be package-private here instead of
// public+forbidden api!
public class Environment {
public static final Setting<String> PATH_HOME_SETTING = Setting.simpleString("path.home", false, Setting.Scope.CLUSTER);
public static final Setting<String> PATH_CONF_SETTING = Setting.simpleString("path.conf", false, Setting.Scope.CLUSTER);
public static final Setting<String> PATH_SCRIPTS_SETTING = Setting.simpleString("path.scripts", false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> PATH_DATA_SETTING = Setting.listSetting("path.data", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> PATH_LOGS_SETTING = Setting.simpleString("path.logs", false, Setting.Scope.CLUSTER);
public static final Setting<String> PATH_PLUGINS_SETTING = Setting.simpleString("path.plugins", false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> PATH_REPO_SETTING = Setting.listSetting("path.repo", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> PATH_SHARED_DATA_SETTING = Setting.simpleString("path.shared_data", false, Setting.Scope.CLUSTER);
public static final Setting<String> PIDFILE_SETTING = Setting.simpleString("pidfile", false, Setting.Scope.CLUSTER);
private final Settings settings;
@ -95,64 +108,64 @@ public class Environment {
public Environment(Settings settings) {
this.settings = settings;
final Path homeFile;
if (settings.get("path.home") != null) {
homeFile = PathUtils.get(cleanPath(settings.get("path.home")));
if (PATH_HOME_SETTING.exists(settings)) {
homeFile = PathUtils.get(cleanPath(PATH_HOME_SETTING.get(settings)));
} else {
throw new IllegalStateException("path.home is not configured");
throw new IllegalStateException(PATH_HOME_SETTING.getKey() + " is not configured");
}
if (settings.get("path.conf") != null) {
configFile = PathUtils.get(cleanPath(settings.get("path.conf")));
if (PATH_CONF_SETTING.exists(settings)) {
configFile = PathUtils.get(cleanPath(PATH_CONF_SETTING.get(settings)));
} else {
configFile = homeFile.resolve("config");
}
if (settings.get("path.scripts") != null) {
scriptsFile = PathUtils.get(cleanPath(settings.get("path.scripts")));
if (PATH_SCRIPTS_SETTING.exists(settings)) {
scriptsFile = PathUtils.get(cleanPath(PATH_SCRIPTS_SETTING.get(settings)));
} else {
scriptsFile = configFile.resolve("scripts");
}
if (settings.get("path.plugins") != null) {
pluginsFile = PathUtils.get(cleanPath(settings.get("path.plugins")));
if (PATH_PLUGINS_SETTING.exists(settings)) {
pluginsFile = PathUtils.get(cleanPath(PATH_PLUGINS_SETTING.get(settings)));
} else {
pluginsFile = homeFile.resolve("plugins");
}
String[] dataPaths = settings.getAsArray("path.data");
if (dataPaths.length > 0) {
dataFiles = new Path[dataPaths.length];
dataWithClusterFiles = new Path[dataPaths.length];
for (int i = 0; i < dataPaths.length; i++) {
dataFiles[i] = PathUtils.get(dataPaths[i]);
List<String> dataPaths = PATH_DATA_SETTING.get(settings);
if (dataPaths.isEmpty() == false) {
dataFiles = new Path[dataPaths.size()];
dataWithClusterFiles = new Path[dataPaths.size()];
for (int i = 0; i < dataPaths.size(); i++) {
dataFiles[i] = PathUtils.get(dataPaths.get(i));
dataWithClusterFiles[i] = dataFiles[i].resolve(ClusterName.clusterNameFromSettings(settings).value());
}
} else {
dataFiles = new Path[]{homeFile.resolve("data")};
dataWithClusterFiles = new Path[]{homeFile.resolve("data").resolve(ClusterName.clusterNameFromSettings(settings).value())};
}
if (settings.get("path.shared_data") != null) {
sharedDataFile = PathUtils.get(cleanPath(settings.get("path.shared_data")));
if (PATH_SHARED_DATA_SETTING.exists(settings)) {
sharedDataFile = PathUtils.get(cleanPath(PATH_SHARED_DATA_SETTING.get(settings)));
} else {
sharedDataFile = null;
}
String[] repoPaths = settings.getAsArray("path.repo");
if (repoPaths.length > 0) {
repoFiles = new Path[repoPaths.length];
for (int i = 0; i < repoPaths.length; i++) {
repoFiles[i] = PathUtils.get(repoPaths[i]);
List<String> repoPaths = PATH_REPO_SETTING.get(settings);
if (repoPaths.isEmpty() == false) {
repoFiles = new Path[repoPaths.size()];
for (int i = 0; i < repoPaths.size(); i++) {
repoFiles[i] = PathUtils.get(repoPaths.get(i));
}
} else {
repoFiles = new Path[0];
}
if (settings.get("path.logs") != null) {
logsFile = PathUtils.get(cleanPath(settings.get("path.logs")));
if (PATH_LOGS_SETTING.exists(settings)) {
logsFile = PathUtils.get(cleanPath(PATH_LOGS_SETTING.get(settings)));
} else {
logsFile = homeFile.resolve("logs");
}
if (settings.get("pidfile") != null) {
pidFile = PathUtils.get(cleanPath(settings.get("pidfile")));
if (PIDFILE_SETTING.exists(settings)) {
pidFile = PathUtils.get(cleanPath(PIDFILE_SETTING.get(settings)));
} else {
pidFile = null;
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.MultiDataPathUpgrader;
import org.elasticsearch.env.NodeEnvironment;
import java.nio.file.DirectoryStream;
@ -77,7 +76,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
if (DiscoveryNode.dataNode(settings)) {
ensureNoPre019ShardState(nodeEnv);
MultiDataPathUpgrader.upgradeMultiDataPath(nodeEnv, logger);
}
if (DiscoveryNode.masterNode(settings) || DiscoveryNode.dataNode(settings)) {

View File

@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoveryService;
@ -49,6 +50,21 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
public class GatewayService extends AbstractLifecycleComponent<GatewayService> implements ClusterStateListener {
public static final Setting<Integer> EXPECTED_NODES_SETTING = Setting.intSetting(
"gateway.expected_nodes", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> EXPECTED_DATA_NODES_SETTING = Setting.intSetting(
"gateway.expected_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> EXPECTED_MASTER_NODES_SETTING = Setting.intSetting(
"gateway.expected_master_nodes", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> RECOVER_AFTER_TIME_SETTING = Setting.positiveTimeSetting(
"gateway.recover_after_time", TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> RECOVER_AFTER_NODES_SETTING = Setting.intSetting(
"gateway.recover_after_nodes", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> RECOVER_AFTER_DATA_NODES_SETTING = Setting.intSetting(
"gateway.recover_after_data_nodes", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> RECOVER_AFTER_MASTER_NODES_SETTING = Setting.intSetting(
"gateway.recover_after_master_nodes", 0, 0, false, Setting.Scope.CLUSTER);
public static final ClusterBlock STATE_NOT_RECOVERED_BLOCK = new ClusterBlock(1, "state not recovered / initialized", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL);
public static final TimeValue DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET = TimeValue.timeValueMinutes(5);
@ -84,20 +100,26 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
this.discoveryService = discoveryService;
this.threadPool = threadPool;
// allow to control a delay of when indices will get created
this.expectedNodes = this.settings.getAsInt("gateway.expected_nodes", -1);
this.expectedDataNodes = this.settings.getAsInt("gateway.expected_data_nodes", -1);
this.expectedMasterNodes = this.settings.getAsInt("gateway.expected_master_nodes", -1);
this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
this.expectedDataNodes = EXPECTED_DATA_NODES_SETTING.get(this.settings);
this.expectedMasterNodes = EXPECTED_MASTER_NODES_SETTING.get(this.settings);
TimeValue defaultRecoverAfterTime = null;
if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
defaultRecoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
if (RECOVER_AFTER_TIME_SETTING.exists(this.settings)) {
recoverAfterTime = RECOVER_AFTER_TIME_SETTING.get(this.settings);
} else if (expectedNodes >= 0 || expectedDataNodes >= 0 || expectedMasterNodes >= 0) {
recoverAfterTime = DEFAULT_RECOVER_AFTER_TIME_IF_EXPECTED_NODES_IS_SET;
} else {
recoverAfterTime = null;
}
this.recoverAfterTime = this.settings.getAsTime("gateway.recover_after_time", defaultRecoverAfterTime);
this.recoverAfterNodes = this.settings.getAsInt("gateway.recover_after_nodes", -1);
this.recoverAfterDataNodes = this.settings.getAsInt("gateway.recover_after_data_nodes", -1);
this.recoverAfterNodes = RECOVER_AFTER_NODES_SETTING.get(this.settings);
this.recoverAfterDataNodes = RECOVER_AFTER_DATA_NODES_SETTING.get(this.settings);
// default the recover after master nodes to the minimum master nodes in the discovery
this.recoverAfterMasterNodes = this.settings.getAsInt("gateway.recover_after_master_nodes", settings.getAsInt("discovery.zen.minimum_master_nodes", -1));
if (RECOVER_AFTER_MASTER_NODES_SETTING.exists(this.settings)) {
recoverAfterMasterNodes = RECOVER_AFTER_MASTER_NODES_SETTING.get(this.settings);
} else {
// TODO: change me once the minimum_master_nodes is changed too
recoverAfterMasterNodes = settings.getAsInt("discovery.zen.minimum_master_nodes", -1);
}
// Add the not recovered as initial state block, we don't allow anything until
this.clusterService.addInitialStateBlock(STATE_NOT_RECOVERED_BLOCK);

View File

@ -51,7 +51,7 @@ import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
import static org.elasticsearch.rest.RestStatus.OK;
/**
*
* A component to serve http requests, backed by rest handlers.
*/
public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
@ -63,10 +63,6 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
private final NodeService nodeService;
private final boolean disableSites;
private final PluginSiteFilter pluginSiteFilter = new PluginSiteFilter();
@Inject
public HttpServer(Settings settings, Environment environment, HttpServerTransport transport,
RestController restController,
@ -77,9 +73,6 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
this.restController = restController;
this.nodeService = nodeService;
nodeService.setHttpServer(this);
this.disableSites = this.settings.getAsBoolean("http.disable_sites", false);
transport.httpServerAdapter(new Dispatcher(this));
}
@ -126,27 +119,13 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
}
public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
String rawPath = request.rawPath();
if (rawPath.startsWith("/_plugin/")) {
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
filterChain.continueProcessing(request, channel);
return;
} else if (rawPath.equals("/favicon.ico")) {
if (request.rawPath().equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
}
restController.dispatchRequest(request, channel);
}
class PluginSiteFilter extends RestFilter {
@Override
public void process(RestRequest request, RestChannel channel, RestFilterChain filterChain) throws IOException {
handlePluginSite((HttpRequest) request, (HttpChannel) channel);
}
}
void handleFavicon(HttpRequest request, HttpChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
@ -163,129 +142,4 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
}
}
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
if (disableSites) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
return;
}
if (request.method() == RestRequest.Method.OPTIONS) {
// when we have OPTIONS request, simply send OK by default (with the Access Control Origin header which gets automatically added)
channel.sendResponse(new BytesRestResponse(OK));
return;
}
if (request.method() != RestRequest.Method.GET) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
return;
}
// TODO for a "/_plugin" endpoint, we should have a page that lists all the plugins?
String path = request.rawPath().substring("/_plugin/".length());
int i1 = path.indexOf('/');
String pluginName;
String sitePath;
if (i1 == -1) {
pluginName = path;
sitePath = null;
// If a trailing / is missing, we redirect to the right page #2654
String redirectUrl = request.rawPath() + "/";
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.MOVED_PERMANENTLY, "text/html", "<head><meta http-equiv=\"refresh\" content=\"0; URL=" + redirectUrl + "\"></head>");
restResponse.addHeader("Location", redirectUrl);
channel.sendResponse(restResponse);
return;
} else {
pluginName = path.substring(0, i1);
sitePath = path.substring(i1 + 1);
}
// we default to index.html, or what the plugin provides (as a unix-style path)
// this is a relative path under _site configured by the plugin.
if (sitePath.length() == 0) {
sitePath = "index.html";
} else {
// remove extraneous leading slashes, its not an absolute path.
while (sitePath.length() > 0 && sitePath.charAt(0) == '/') {
sitePath = sitePath.substring(1);
}
}
final Path siteFile = environment.pluginsFile().resolve(pluginName).resolve("_site");
final String separator = siteFile.getFileSystem().getSeparator();
// Convert file separators.
sitePath = sitePath.replace("/", separator);
Path file = siteFile.resolve(sitePath);
// return not found instead of forbidden to prevent malicious requests to find out if files exist or dont exist
if (!Files.exists(file) || FileSystemUtils.isHidden(file) || !file.toAbsolutePath().normalize().startsWith(siteFile.toAbsolutePath().normalize())) {
channel.sendResponse(new BytesRestResponse(NOT_FOUND));
return;
}
BasicFileAttributes attributes = Files.readAttributes(file, BasicFileAttributes.class);
if (!attributes.isRegularFile()) {
// If it's not a dir, we send a 403
if (!attributes.isDirectory()) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
return;
}
// We don't serve dir but if index.html exists in dir we should serve it
file = file.resolve("index.html");
if (!Files.exists(file) || FileSystemUtils.isHidden(file) || !Files.isRegularFile(file)) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
return;
}
}
try {
byte[] data = Files.readAllBytes(file);
channel.sendResponse(new BytesRestResponse(OK, guessMimeType(sitePath), data));
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
}
}
// TODO: Don't respond with a mime type that violates the request's Accept header
private String guessMimeType(String path) {
int lastDot = path.lastIndexOf('.');
if (lastDot == -1) {
return "";
}
String extension = path.substring(lastDot + 1).toLowerCase(Locale.ROOT);
String mimeType = DEFAULT_MIME_TYPES.get(extension);
if (mimeType == null) {
return "";
}
return mimeType;
}
static {
// This is not an exhaustive list, just the most common types. Call registerMimeType() to add more.
Map<String, String> mimeTypes = new HashMap<>();
mimeTypes.put("txt", "text/plain");
mimeTypes.put("css", "text/css");
mimeTypes.put("csv", "text/csv");
mimeTypes.put("htm", "text/html");
mimeTypes.put("html", "text/html");
mimeTypes.put("xml", "text/xml");
mimeTypes.put("js", "text/javascript"); // Technically it should be application/javascript (RFC 4329), but IE8 struggles with that
mimeTypes.put("xhtml", "application/xhtml+xml");
mimeTypes.put("json", "application/json");
mimeTypes.put("pdf", "application/pdf");
mimeTypes.put("zip", "application/zip");
mimeTypes.put("tar", "application/x-tar");
mimeTypes.put("gif", "image/gif");
mimeTypes.put("jpeg", "image/jpeg");
mimeTypes.put("jpg", "image/jpeg");
mimeTypes.put("tiff", "image/tiff");
mimeTypes.put("tif", "image/tiff");
mimeTypes.put("png", "image/png");
mimeTypes.put("svg", "image/svg+xml");
mimeTypes.put("ico", "image/vnd.microsoft.icon");
mimeTypes.put("mp3", "audio/mpeg");
DEFAULT_MIME_TYPES = unmodifiableMap(mimeTypes);
}
public static final Map<String, String> DEFAULT_MIME_TYPES;
}

View File

@ -113,7 +113,7 @@ public class NettyHttpChannel extends HttpChannel {
resp = new DefaultHttpResponse(HttpVersion.HTTP_1_1, status);
}
if (RestUtils.isBrowser(nettyRequest.headers().get(USER_AGENT))) {
if (transport.settings().getAsBoolean(SETTING_CORS_ENABLED, false)) {
if (SETTING_CORS_ENABLED.get(transport.settings())) {
String originHeader = request.header(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) {
if (corsPattern == null) {
@ -127,12 +127,12 @@ public class NettyHttpChannel extends HttpChannel {
}
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
// Allow Ajax requests based on the CORS "preflight" request
resp.headers().add(ACCESS_CONTROL_MAX_AGE, transport.settings().getAsInt(SETTING_CORS_MAX_AGE, 1728000));
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE"));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length"));
}
if (transport.settings().getAsBoolean(SETTING_CORS_ALLOW_CREDENTIALS, false)) {
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {
resp.headers().add(ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.http.netty;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -27,7 +26,8 @@ import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -75,9 +75,6 @@ import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_RECEIVE_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_SEND_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
@ -94,19 +91,19 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
NettyUtils.setup();
}
public static final String SETTING_CORS_ENABLED = "http.cors.enabled";
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin";
public static final String SETTING_CORS_MAX_AGE = "http.cors.max-age";
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods";
public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers";
public static final String SETTING_CORS_ALLOW_CREDENTIALS = "http.cors.allow-credentials";
public static final String SETTING_PIPELINING = "http.pipelining";
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events";
public static final String SETTING_HTTP_COMPRESSION = "http.compression";
public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level";
public static final String SETTING_HTTP_DETAILED_ERRORS_ENABLED = "http.detailed_errors.enabled";
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final boolean DEFAULT_SETTING_PIPELINING = true;
public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000;
public static final String DEFAULT_PORT_RANGE = "9200-9300";
@ -142,8 +139,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected int publishPort;
protected final String tcpNoDelay;
protected final String tcpKeepAlive;
protected final boolean tcpNoDelay;
protected final boolean tcpKeepAlive;
protected final boolean reuseAddress;
protected final ByteSizeValue tcpSendBufferSize;
@ -186,17 +183,17 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", settings.getAsBoolean(TCP_BLOCKING_SERVER, settings.getAsBoolean(TCP_BLOCKING, false)));
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
this.port = settings.get("http.netty.port", settings.get("http.port", DEFAULT_PORT_RANGE));
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
this.publishPort = settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0));
this.tcpNoDelay = settings.get("http.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
this.tcpKeepAlive = settings.get("http.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
this.detailedErrorsEnabled = settings.getAsBoolean(SETTING_HTTP_DETAILED_ERRORS_ENABLED, true);
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -216,7 +213,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false);
this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6);
this.pipelining = settings.getAsBoolean(SETTING_PIPELINING, DEFAULT_SETTING_PIPELINING);
this.pipelining = SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS);
// validate max content length
@ -257,16 +254,13 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
serverBootstrap.setPipelineFactory(configureServerChannelPipelineFactory());
if (!"default".equals(tcpNoDelay)) {
serverBootstrap.setOption("child.tcpNoDelay", Booleans.parseBoolean(tcpNoDelay, null));
}
if (!"default".equals(tcpKeepAlive)) {
serverBootstrap.setOption("child.keepAlive", Booleans.parseBoolean(tcpKeepAlive, null));
}
if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.tcpNoDelay", tcpNoDelay);
serverBootstrap.setOption("child.keepAlive", tcpKeepAlive);
if (tcpSendBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.sendBufferSize", tcpSendBufferSize.bytes());
}
if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
if (tcpReceiveBufferSize.bytes() > 0) {
serverBootstrap.setOption("child.receiveBufferSize", tcpReceiveBufferSize.bytes());
}
serverBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
@ -308,7 +302,8 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
throw new BindHttpException("Publish address [" + publishInetAddress + "] does not match any of the bound addresses [" + boundAddresses + "]");
}
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort);;
final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort);
;
this.boundAddress = new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[boundAddresses.size()]), new InetSocketTransportAddress(publishAddress));
}

View File

@ -1065,7 +1065,7 @@ public abstract class Engine implements Closeable {
}
}
public static class CommitId implements Writeable<CommitId> {
public static class CommitId implements Writeable {
private final byte[] id;

View File

@ -372,7 +372,10 @@ public class MatchQueryBuilder extends AbstractQueryBuilder<MatchQueryBuilder> {
return null;
}
if (query instanceof BooleanQuery) {
// If the coordination factor is disabled on a boolean query we don't apply the minimum should match.
// This is done to make sure that the minimum_should_match doesn't get applied when there is only one word
// and multiple variations of the same word in the query (synonyms for instance).
if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
} else if (query instanceof ExtendedCommonTermsQuery) {
((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch);

View File

@ -735,7 +735,10 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
}
query = Queries.fixNegativeQueryIfNeeded(query);
if (query instanceof BooleanQuery) {
// If the coordination factor is disabled on a boolean query we don't apply the minimum should match.
// This is done to make sure that the minimum_should_match doesn't get applied when there is only one word
// and multiple variations of the same word in the query (synonyms for instance).
if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, this.minimumShouldMatch());
}

View File

@ -285,7 +285,10 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder<SimpleQuerySt
sqp.setDefaultOperator(defaultOperator.toBooleanClauseOccur());
Query query = sqp.parse(queryText);
if (minimumShouldMatch != null && query instanceof BooleanQuery) {
// If the coordination factor is disabled on a boolean query we don't apply the minimum should match.
// This is done to make sure that the minimum_should_match doesn't get applied when there is only one word
// and multiple variations of the same word in the query (synonyms for instance).
if (minimumShouldMatch != null && query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
}
return query;

View File

@ -54,7 +54,10 @@ public class MultiMatchQuery extends MatchQuery {
private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {
Query query = parse(type, fieldName, value);
if (query instanceof BooleanQuery) {
// If the coordination factor is disabled on a boolean query we don't apply the minimum should match.
// This is done to make sure that the minimum_should_match doesn't get applied when there is only one word
// and multiple variations of the same word in the query (synonyms for instance).
if (query instanceof BooleanQuery && !((BooleanQuery) query).isCoordDisabled()) {
query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch);
}
if (query != null && boostValue != null && boostValue != AbstractQueryBuilder.DEFAULT_BOOST) {

View File

@ -192,7 +192,17 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
throw e;
}
if (closed.compareAndSet(false, true)) {
return new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter);
boolean success = false;
try {
final TranslogReader reader = new TranslogReader(generation, channel, path, firstOperationOffset, getWrittenOffset(), operationCounter);
success = true;
return reader;
} finally {
if (success == false) {
// close the channel, as we are closed and failed to create a new reader
IOUtils.closeWhileHandlingException(channel);
}
}
} else {
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy);
}

View File

@ -23,6 +23,7 @@ import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
@ -70,9 +71,9 @@ import java.util.function.Function;
*/
public class HunspellService extends AbstractComponent {
public final static String HUNSPELL_LAZY_LOAD = "indices.analysis.hunspell.dictionary.lazy";
public final static String HUNSPELL_IGNORE_CASE = "indices.analysis.hunspell.dictionary.ignore_case";
private final static String OLD_HUNSPELL_LOCATION = "indices.analysis.hunspell.dictionary.location";
public final static Setting<Boolean> HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> HUNSPELL_IGNORE_CASE = Setting.boolSetting("indices.analysis.hunspell.dictionary.ignore_case", Boolean.FALSE, false, Setting.Scope.CLUSTER);
public final static Setting<Settings> HUNSPELL_DICTIONARY_OPTIONS = Setting.groupSetting("indices.analysis.hunspell.dictionary.", false, Setting.Scope.CLUSTER);
private final ConcurrentHashMap<String, Dictionary> dictionaries = new ConcurrentHashMap<>();
private final Map<String, Dictionary> knownDictionaries;
private final boolean defaultIgnoreCase;
@ -82,8 +83,8 @@ public class HunspellService extends AbstractComponent {
public HunspellService(final Settings settings, final Environment env, final Map<String, Dictionary> knownDictionaries) throws IOException {
super(settings);
this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
this.hunspellDir = resolveHunspellDirectory(settings, env);
this.defaultIgnoreCase = settings.getAsBoolean(HUNSPELL_IGNORE_CASE, false);
this.hunspellDir = resolveHunspellDirectory(env);
this.defaultIgnoreCase = HUNSPELL_IGNORE_CASE.get(settings);
this.loadingFunction = (locale) -> {
try {
return loadDictionary(locale, settings, env);
@ -91,7 +92,7 @@ public class HunspellService extends AbstractComponent {
throw new IllegalStateException("failed to load hunspell dictionary for locale: " + locale, e);
}
};
if (!settings.getAsBoolean(HUNSPELL_LAZY_LOAD, false)) {
if (!HUNSPELL_LAZY_LOAD.get(settings)) {
scanAndLoadDictionaries();
}
@ -110,11 +111,7 @@ public class HunspellService extends AbstractComponent {
return dictionary;
}
private Path resolveHunspellDirectory(Settings settings, Environment env) {
String location = settings.get(OLD_HUNSPELL_LOCATION, null);
if (location != null) {
throw new IllegalArgumentException("please, put your hunspell dictionaries under config/hunspell !");
}
private Path resolveHunspellDirectory(Environment env) {
return env.configFile().resolve("hunspell");
}
@ -162,7 +159,8 @@ public class HunspellService extends AbstractComponent {
}
// merging node settings with hunspell dictionary specific settings
nodeSettings = loadDictionarySettings(dicDir, nodeSettings.getByPrefix("indices.analysis.hunspell.dictionary." + locale + "."));
Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale));
boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);

View File

@ -40,6 +40,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.MemorySizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -80,10 +81,10 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
* since we are checking on the cluster state IndexMetaData always.
*/
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", true, true, Setting.Scope.INDEX);
public static final String INDICES_CACHE_REQUEST_CLEAN_INTERVAL = "indices.requests.cache.clean_interval";
public static final Setting<TimeValue> INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER);
public static final String INDICES_CACHE_QUERY_SIZE = "indices.requests.cache.size";
public static final String INDICES_CACHE_QUERY_EXPIRE = "indices.requests.cache.expire";
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting("indices.requests.cache.expire", new TimeValue(0), false, Setting.Scope.CLUSTER);
private static final Set<SearchType> CACHEABLE_SEARCH_TYPES = EnumSet.of(SearchType.QUERY_THEN_FETCH, SearchType.QUERY_AND_FETCH);
@ -98,7 +99,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
//TODO make these changes configurable on the cluster level
private final String size;
private final ByteSizeValue size;
private final TimeValue expire;
private volatile Cache<Key, Value> cache;
@ -108,11 +109,11 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
super(settings);
this.clusterService = clusterService;
this.threadPool = threadPool;
this.cleanInterval = settings.getAsTime(INDICES_CACHE_REQUEST_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60));
this.cleanInterval = INDICES_CACHE_REQUEST_CLEAN_INTERVAL.get(settings);
this.size = settings.get(INDICES_CACHE_QUERY_SIZE, "1%");
this.size = INDICES_CACHE_QUERY_SIZE.get(settings);
this.expire = settings.getAsTime(INDICES_CACHE_QUERY_EXPIRE, null);
this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null;
buildCache();
this.reaper = new Reaper();
@ -121,7 +122,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
private void buildCache() {
long sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size, INDICES_CACHE_QUERY_SIZE).bytes();
long sizeInBytes = size.bytes();
CacheBuilder<Key, Value> cacheBuilder = CacheBuilder.<Key, Value>builder()
.setMaximumWeight(sizeInBytes).weigher((k, v) -> k.ramBytesUsed() + v.ramBytesUsed()).removalListener(this);

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@ -53,8 +54,8 @@ import java.util.function.ToLongBiFunction;
*/
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable> {
public static final String FIELDDATA_CLEAN_INTERVAL_SETTING = "indices.fielddata.cache.cleanup_interval";
public static final String INDICES_FIELDDATA_CACHE_SIZE_KEY = "indices.fielddata.cache.size";
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
@ -68,18 +69,16 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
super(settings);
this.threadPool = threadPool;
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1");
final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes();
final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).bytes();
CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.<Key, Accountable>builder()
.removalListener(this);
if (sizeInBytes > 0) {
cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
}
logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes));
cache = cacheBuilder.build();
this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1));
this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings);
// Start thread that will manage cleaning the field data cache periodically
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -68,7 +69,7 @@ import java.util.concurrent.atomic.AtomicInteger;
public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable {
// TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService there is no need for a seperate public service
public static final String INDICES_STORE_DELETE_SHARD_TIMEOUT = "indices.store.delete.shard.timeout";
public static final Setting<TimeValue> INDICES_STORE_DELETE_SHARD_TIMEOUT = Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists";
private static final EnumSet<IndexShardState> ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED);
private final IndicesService indicesService;
@ -85,7 +86,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
this.clusterService = clusterService;
this.transportService = transportService;
transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest::new, ThreadPool.Names.SAME, new ShardActiveRequestHandler());
this.deleteShardTimeout = settings.getAsTime(INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS));
this.deleteShardTimeout = INDICES_STORE_DELETE_SHARD_TIMEOUT.get(settings);
clusterService.addLast(this);
}

View File

@ -123,7 +123,7 @@ public class Node implements Releasable {
public static final Setting<Boolean> NODE_INGEST_SETTING = Setting.boolSetting("node.ingest", true, false, Setting.Scope.CLUSTER);
private static final String CLIENT_TYPE = "node";
public static final String HTTP_ENABLED = "http.enabled";
public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER);
private final Lifecycle lifecycle = new Lifecycle();
private final Injector injector;
private final Settings settings;
@ -286,7 +286,7 @@ public class Node implements Releasable {
injector.getInstance(ResourceWatcherService.class).start();
injector.getInstance(TribeService.class).start();
if (System.getProperty("es.tests.portsfile", "false").equals("true")) {
if (WRITE_PORTS_FIELD_SETTING.get(settings)) {
if (settings.getAsBoolean("http.enabled", true)) {
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
writePortsFile("http", http.boundAddress());

View File

@ -21,7 +21,6 @@ package org.elasticsearch.node.internal;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal;
@ -108,8 +107,7 @@ public class InternalSettingsPreparer {
environment = new Environment(output.build());
// we put back the path.logs so we can use it in the logging configuration file
output.put("path.logs", cleanPath(environment.logsFile().toAbsolutePath().toString()));
output.put(Environment.PATH_LOGS_SETTING.getKey(), cleanPath(environment.logsFile().toAbsolutePath().toString()));
return new Environment(output.build());
}

View File

@ -20,9 +20,9 @@ package org.elasticsearch.plugins;
public class DummyPluginInfo extends PluginInfo {
private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) {
super(name, description, site, version, jvm, classname, isolated);
private DummyPluginInfo(String name, String description, String version, String classname, boolean isolated) {
super(name, description, version, classname, isolated);
}
public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true);
public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", "dummy_plugin_version", "DummyPluginName", true);
}

View File

@ -42,19 +42,14 @@ public class PluginInfo implements Streamable, ToXContent {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
static final XContentBuilderString URL = new XContentBuilderString("url");
static final XContentBuilderString SITE = new XContentBuilderString("site");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString CLASSNAME = new XContentBuilderString("classname");
static final XContentBuilderString ISOLATED = new XContentBuilderString("isolated");
}
private String name;
private String description;
private boolean site;
private String version;
private boolean jvm;
private String classname;
private boolean isolated;
@ -66,15 +61,11 @@ public class PluginInfo implements Streamable, ToXContent {
*
* @param name Its name
* @param description Its description
* @param site true if it's a site plugin
* @param jvm true if it's a jvm plugin
* @param version Version number
*/
PluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) {
PluginInfo(String name, String description, String version, String classname, boolean isolated) {
this.name = name;
this.description = description;
this.site = site;
this.jvm = jvm;
this.version = version;
this.classname = classname;
this.isolated = isolated;
@ -101,43 +92,28 @@ public class PluginInfo implements Streamable, ToXContent {
throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]");
}
boolean jvm = Boolean.parseBoolean(props.getProperty("jvm"));
boolean site = Boolean.parseBoolean(props.getProperty("site"));
if (jvm == false && site == false) {
throw new IllegalArgumentException("Plugin [" + name + "] must be at least a jvm or site plugin");
String esVersionString = props.getProperty("elasticsearch.version");
if (esVersionString == null) {
throw new IllegalArgumentException("Property [elasticsearch.version] is missing for plugin [" + name + "]");
}
boolean isolated = true;
String classname = "NA";
if (jvm) {
String esVersionString = props.getProperty("elasticsearch.version");
if (esVersionString == null) {
throw new IllegalArgumentException("Property [elasticsearch.version] is missing for jvm plugin [" + name + "]");
}
Version esVersion = Version.fromString(esVersionString);
if (esVersion.equals(Version.CURRENT) == false) {
throw new IllegalArgumentException("Plugin [" + name + "] is incompatible with Elasticsearch [" + Version.CURRENT.toString() +
"]. Was designed for version [" + esVersionString + "]");
}
String javaVersionString = props.getProperty("java.version");
if (javaVersionString == null) {
throw new IllegalArgumentException("Property [java.version] is missing for jvm plugin [" + name + "]");
}
JarHell.checkVersionFormat(javaVersionString);
JarHell.checkJavaVersion(name, javaVersionString);
isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
classname = props.getProperty("classname");
if (classname == null) {
throw new IllegalArgumentException("Property [classname] is missing for jvm plugin [" + name + "]");
}
Version esVersion = Version.fromString(esVersionString);
if (esVersion.equals(Version.CURRENT) == false) {
throw new IllegalArgumentException("Plugin [" + name + "] is incompatible with Elasticsearch [" + Version.CURRENT.toString() +
"]. Was designed for version [" + esVersionString + "]");
}
String javaVersionString = props.getProperty("java.version");
if (javaVersionString == null) {
throw new IllegalArgumentException("Property [java.version] is missing for plugin [" + name + "]");
}
JarHell.checkVersionFormat(javaVersionString);
JarHell.checkJavaVersion(name, javaVersionString);
boolean isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
String classname = props.getProperty("classname");
if (classname == null) {
throw new IllegalArgumentException("Property [classname] is missing for plugin [" + name + "]");
}
if (site) {
if (!Files.exists(dir.resolve("_site"))) {
throw new IllegalArgumentException("Plugin [" + name + "] is a site plugin but has no '_site/' directory");
}
}
return new PluginInfo(name, description, site, version, jvm, classname, isolated);
return new PluginInfo(name, description, version, classname, isolated);
}
/**
@ -155,46 +131,19 @@ public class PluginInfo implements Streamable, ToXContent {
}
/**
* @return true if it's a site plugin
*/
public boolean isSite() {
return site;
}
/**
* @return true if it's a plugin running in the jvm
*/
public boolean isJvm() {
return jvm;
}
/**
* @return true if jvm plugin has isolated classloader
* @return true if plugin has isolated classloader
*/
public boolean isIsolated() {
return isolated;
}
/**
* @return jvm plugin's classname
* @return plugin's classname
*/
public String getClassname() {
return classname;
}
/**
* We compute the URL for sites: "/_plugin/" + name + "/"
*
* @return relative URL for site plugin
*/
public String getUrl() {
if (site) {
return ("/_plugin/" + name + "/");
} else {
return null;
}
}
/**
* @return Version number for the plugin
*/
@ -212,8 +161,6 @@ public class PluginInfo implements Streamable, ToXContent {
public void readFrom(StreamInput in) throws IOException {
this.name = in.readString();
this.description = in.readString();
this.site = in.readBoolean();
this.jvm = in.readBoolean();
this.version = in.readString();
this.classname = in.readString();
this.isolated = in.readBoolean();
@ -223,8 +170,6 @@ public class PluginInfo implements Streamable, ToXContent {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeString(description);
out.writeBoolean(site);
out.writeBoolean(jvm);
out.writeString(version);
out.writeString(classname);
out.writeBoolean(isolated);
@ -236,15 +181,8 @@ public class PluginInfo implements Streamable, ToXContent {
builder.field(Fields.NAME, name);
builder.field(Fields.VERSION, version);
builder.field(Fields.DESCRIPTION, description);
if (site) {
builder.field(Fields.URL, getUrl());
}
builder.field(Fields.JVM, jvm);
if (jvm) {
builder.field(Fields.CLASSNAME, classname);
builder.field(Fields.ISOLATED, isolated);
}
builder.field(Fields.SITE, site);
builder.field(Fields.CLASSNAME, classname);
builder.field(Fields.ISOLATED, isolated);
builder.endObject();
return builder;
@ -274,14 +212,9 @@ public class PluginInfo implements Streamable, ToXContent {
.append("- Plugin information:\n")
.append("Name: ").append(name).append("\n")
.append("Description: ").append(description).append("\n")
.append("Site: ").append(site).append("\n")
.append("Version: ").append(version).append("\n")
.append("JVM: ").append(jvm).append("\n");
if (jvm) {
information.append(" * Classname: ").append(classname).append("\n");
information.append(" * Isolated: ").append(isolated);
}
.append(" * Classname: ").append(classname).append("\n")
.append(" * Isolated: ").append(isolated);
return information.toString();
}

View File

@ -259,9 +259,7 @@ public class PluginManager {
}
// check for jar hell before any copying
if (info.isJvm()) {
jarHellCheck(root, info.isIsolated());
}
jarHellCheck(root, info.isIsolated());
// read optional security policy (extra permissions)
// if it exists, confirm or warn the user

View File

@ -98,7 +98,7 @@ public class PluginsService extends AbstractComponent {
// first we load plugins that are on the classpath. this is for tests and transport clients
for (Class<? extends Plugin> pluginClass : classpathPlugins) {
Plugin plugin = loadPlugin(pluginClass, settings);
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), false, "NA", true, pluginClass.getName(), false);
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), "NA", pluginClass.getName(), false);
if (logger.isTraceEnabled()) {
logger.trace("plugin loaded from classpath [{}]", pluginInfo);
}
@ -136,18 +136,10 @@ public class PluginsService extends AbstractComponent {
plugins = Collections.unmodifiableList(pluginsLoaded);
// We need to build a List of jvm and site plugins for checking mandatory plugins
Map<String, Plugin> jvmPlugins = new HashMap<>();
List<String> sitePlugins = new ArrayList<>();
// We need to build a List of plugins for checking mandatory plugins
Set<String> pluginsNames = new HashSet<>();
for (Tuple<PluginInfo, Plugin> tuple : plugins) {
PluginInfo info = tuple.v1();
if (info.isJvm()) {
jvmPlugins.put(info.getName(), tuple.v2());
}
if (info.isSite()) {
sitePlugins.add(info.getName());
}
pluginsNames.add(tuple.v1().getName());
}
// Checking expected plugins
@ -155,7 +147,7 @@ public class PluginsService extends AbstractComponent {
if (mandatoryPlugins != null) {
Set<String> missingPlugins = new HashSet<>();
for (String mandatoryPlugin : mandatoryPlugins) {
if (!jvmPlugins.containsKey(mandatoryPlugin) && !sitePlugins.contains(mandatoryPlugin) && !missingPlugins.contains(mandatoryPlugin)) {
if (!pluginsNames.contains(mandatoryPlugin) && !missingPlugins.contains(mandatoryPlugin)) {
missingPlugins.add(mandatoryPlugin);
}
}
@ -175,10 +167,11 @@ public class PluginsService extends AbstractComponent {
jvmPluginNames.add(pluginInfo.getName());
}
logger.info("modules {}, plugins {}, sites {}", moduleNames, jvmPluginNames, sitePlugins);
logger.info("modules {}, plugins {}", moduleNames, jvmPluginNames);
Map<Plugin, List<OnModuleReference>> onModuleReferences = new HashMap<>();
for (Plugin plugin : jvmPlugins.values()) {
for (Tuple<PluginInfo, Plugin> pluginEntry : plugins) {
Plugin plugin = pluginEntry.v2();
List<OnModuleReference> list = new ArrayList<>();
for (Method method : plugin.getClass().getMethods()) {
if (!method.getName().equals("onModule")) {
@ -304,9 +297,6 @@ public class PluginsService extends AbstractComponent {
continue; // skip over .DS_Store etc
}
PluginInfo info = PluginInfo.readFromProperties(module);
if (!info.isJvm()) {
throw new IllegalStateException("modules must be jvm plugins: " + info);
}
if (!info.isIsolated()) {
throw new IllegalStateException("modules must be isolated: " + info);
}
@ -353,17 +343,14 @@ public class PluginsService extends AbstractComponent {
}
List<URL> urls = new ArrayList<>();
if (info.isJvm()) {
// a jvm plugin: gather urls for jar files
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
for (Path jar : jarStream) {
// normalize with toRealPath to get symlinks out of our hair
urls.add(jar.toRealPath().toUri().toURL());
}
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
for (Path jar : jarStream) {
// normalize with toRealPath to get symlinks out of our hair
urls.add(jar.toRealPath().toUri().toURL());
}
}
final Bundle bundle;
if (info.isJvm() && info.isIsolated() == false) {
if (info.isIsolated() == false) {
bundle = bundles.get(0); // purgatory
} else {
bundle = new Bundle();
@ -395,15 +382,10 @@ public class PluginsService extends AbstractComponent {
// create a child to load the plugins in this bundle
ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), getClass().getClassLoader());
for (PluginInfo pluginInfo : bundle.plugins) {
final Plugin plugin;
if (pluginInfo.isJvm()) {
// reload lucene SPI with any new services from the plugin
reloadLuceneSPI(loader);
Class<? extends Plugin> pluginClass = loadPluginClass(pluginInfo.getClassname(), loader);
plugin = loadPlugin(pluginClass, settings);
} else {
plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
}
// reload lucene SPI with any new services from the plugin
reloadLuceneSPI(loader);
final Class<? extends Plugin> pluginClass = loadPluginClass(pluginInfo.getClassname(), loader);
final Plugin plugin = loadPlugin(pluginClass, settings);
plugins.add(new Tuple<>(pluginInfo, plugin));
}
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.snapshots.IndexShardRepository;
@ -33,6 +34,7 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.io.IOException;
import java.nio.file.Path;
import java.util.function.Function;
/**
* Shared file system implementation of the BlobStoreRepository
@ -49,6 +51,13 @@ public class FsRepository extends BlobStoreRepository {
public final static String TYPE = "fs";
public static final Setting<String> LOCATION_SETTING = new Setting<>("location", "", Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> REPOSITORIES_LOCATION_SETTING = new Setting<>("repositories.fs.location", LOCATION_SETTING, Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", "-1", false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting("repositories.fs.chunk_size", "-1", false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting("repositories.fs.compress", false, false, Setting.Scope.CLUSTER);
private final FsBlobStore blobStore;
private ByteSizeValue chunkSize;
@ -68,8 +77,8 @@ public class FsRepository extends BlobStoreRepository {
public FsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, Environment environment) throws IOException {
super(name.getName(), repositorySettings, indexShardRepository);
Path locationFile;
String location = repositorySettings.settings().get("location", settings.get("repositories.fs.location"));
if (location == null) {
String location = REPOSITORIES_LOCATION_SETTING.get(repositorySettings.settings());
if (location.isEmpty()) {
logger.warn("the repository location is missing, it should point to a shared file system location that is available on all master and data nodes");
throw new RepositoryException(name.name(), "missing location");
}
@ -85,8 +94,14 @@ public class FsRepository extends BlobStoreRepository {
}
blobStore = new FsBlobStore(settings, locationFile);
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", settings.getAsBytesSize("repositories.fs.chunk_size", null));
this.compress = repositorySettings.settings().getAsBoolean("compress", settings.getAsBoolean("repositories.fs.compress", false));
if (CHUNK_SIZE_SETTING.exists(repositorySettings.settings())) {
this.chunkSize = CHUNK_SIZE_SETTING.get(repositorySettings.settings());
} else if (REPOSITORIES_CHUNK_SIZE_SETTING.exists(settings)) {
this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(settings);
} else {
this.chunkSize = null;
}
this.compress = COMPRESS_SETTING.exists(repositorySettings.settings()) ? COMPRESS_SETTING.get(repositorySettings.settings()) : REPOSITORIES_COMPRESS_SETTING.get(settings);
this.basePath = BlobPath.cleanPath();
}

View File

@ -20,11 +20,11 @@
package org.elasticsearch.repositories.uri;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.url.URLBlobStore;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.util.URIPattern;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.snapshots.IndexShardRepository;
@ -34,9 +34,13 @@ import org.elasticsearch.repositories.RepositorySettings;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
/**
* Read-only URL-based implementation of the BlobStoreRepository
@ -51,13 +55,21 @@ public class URLRepository extends BlobStoreRepository {
public final static String TYPE = "url";
public final static String[] DEFAULT_SUPPORTED_PROTOCOLS = {"http", "https", "ftp", "file", "jar"};
public static final Setting<List<String>> SUPPORTED_PROTOCOLS_SETTING = Setting.listSetting("repositories.url.supported_protocols",
Arrays.asList("http", "https", "ftp", "file", "jar"), Function.identity(), false, Setting.Scope.CLUSTER);
public final static String SUPPORTED_PROTOCOLS_SETTING = "repositories.url.supported_protocols";
public static final Setting<List<URIPattern>> ALLOWED_URLS_SETTING = Setting.listSetting("repositories.url.allowed_urls",
Collections.emptyList(), URIPattern::new, false, Setting.Scope.CLUSTER);
public final static String ALLOWED_URLS_SETTING = "repositories.url.allowed_urls";
public static final Setting<URL> URL_SETTING = new Setting<>("url", "http:", URLRepository::parseURL, false, Setting.Scope.CLUSTER);
public static final Setting<URL> REPOSITORIES_URL_SETTING = new Setting<>("repositories.url.url", (s) -> s.get("repositories.uri.url", "http:"),
URLRepository::parseURL, false, Setting.Scope.CLUSTER);
private final String[] supportedProtocols;
public static final Setting<Boolean> LIST_DIRECTORIES_SETTING = Setting.boolSetting("list_directories", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> REPOSITORIES_LIST_DIRECTORIES_SETTING = Setting.boolSetting("repositories.uri.list_directories", true,
false, Setting.Scope.CLUSTER);
private final List<String> supportedProtocols;
private final URIPattern[] urlWhiteList;
@ -79,21 +91,16 @@ public class URLRepository extends BlobStoreRepository {
@Inject
public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, Environment environment) throws IOException {
super(name.getName(), repositorySettings, indexShardRepository);
URL url;
String path = repositorySettings.settings().get("url", settings.get("repositories.url.url", settings.get("repositories.uri.url")));
if (path == null) {
if (URL_SETTING.exists(repositorySettings.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) {
throw new RepositoryException(name.name(), "missing url");
} else {
url = new URL(path);
}
supportedProtocols = settings.getAsArray(SUPPORTED_PROTOCOLS_SETTING, DEFAULT_SUPPORTED_PROTOCOLS);
String[] urlWhiteList = settings.getAsArray(ALLOWED_URLS_SETTING, Strings.EMPTY_ARRAY);
this.urlWhiteList = new URIPattern[urlWhiteList.length];
for (int i = 0; i < urlWhiteList.length; i++) {
this.urlWhiteList[i] = new URIPattern(urlWhiteList[i]);
}
supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings);
urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{});
this.environment = environment;
listDirectories = repositorySettings.settings().getAsBoolean("list_directories", settings.getAsBoolean("repositories.uri.list_directories", true));
listDirectories = LIST_DIRECTORIES_SETTING.exists(repositorySettings.settings()) ? LIST_DIRECTORIES_SETTING.get(repositorySettings.settings()) : REPOSITORIES_LIST_DIRECTORIES_SETTING.get(settings);
URL url = URL_SETTING.exists(repositorySettings.settings()) ? URL_SETTING.get(repositorySettings.settings()) : REPOSITORIES_URL_SETTING.get(settings);
URL normalizedURL = checkURL(url);
blobStore = new URLBlobStore(settings, normalizedURL);
basePath = BlobPath.cleanPath();
@ -147,8 +154,8 @@ public class URLRepository extends BlobStoreRepository {
// We didn't match white list - try to resolve against path.repo
URL normalizedUrl = environment.resolveRepoURL(url);
if (normalizedUrl == null) {
logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] or by repositories.url.allowed_urls setting: [{}] ", url, environment.repoFiles());
throw new RepositoryException(repositoryName, "file url [" + url + "] doesn't match any of the locations specified by path.repo or repositories.url.allowed_urls");
logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting or by {} setting: [{}] ", url, ALLOWED_URLS_SETTING.getKey(), environment.repoFiles());
throw new RepositoryException(repositoryName, "file url [" + url + "] doesn't match any of the locations specified by path.repo or " + ALLOWED_URLS_SETTING.getKey());
}
return normalizedUrl;
}
@ -161,4 +168,11 @@ public class URLRepository extends BlobStoreRepository {
return true;
}
private static URL parseURL(String s) {
try {
return new URL(s);
} catch (MalformedURLException e) {
throw new IllegalArgumentException("Unable to parse URL repository setting", e);
}
}
}

View File

@ -84,8 +84,6 @@ public class RestPluginsAction extends AbstractCatAction {
table.addCell("name", "alias:n;desc:node name");
table.addCell("component", "alias:c;desc:component");
table.addCell("version", "alias:v;desc:component version");
table.addCell("type", "alias:t;desc:type (j for JVM, s for Site)");
table.addCell("url", "alias:u;desc:url for site plugins");
table.addCell("description", "alias:d;default:false;desc:plugin details");
table.endHeaders();
return table;
@ -104,22 +102,6 @@ public class RestPluginsAction extends AbstractCatAction {
table.addCell(node.name());
table.addCell(pluginInfo.getName());
table.addCell(pluginInfo.getVersion());
String type;
if (pluginInfo.isSite()) {
if (pluginInfo.isJvm()) {
type = "j/s";
} else {
type = "s";
}
} else {
if (pluginInfo.isJvm()) {
type = "j";
} else {
type = "";
}
}
table.addCell(type);
table.addCell(pluginInfo.getUrl());
table.addCell(pluginInfo.getDescription());
table.endRow();
}

View File

@ -46,6 +46,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -84,8 +85,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic";
public static final String DEFAULT_SCRIPTING_LANGUAGE_SETTING = "script.default_lang";
public static final String SCRIPT_CACHE_SIZE_SETTING = "script.cache.max_size";
public static final int SCRIPT_CACHE_SIZE_DEFAULT = 100;
public static final Setting<Integer> SCRIPT_CACHE_SIZE_SETTING = Setting.intSetting("script.cache.max_size", 100, 0, false, Setting.Scope.CLUSTER);
public static final String SCRIPT_CACHE_EXPIRE_SETTING = "script.cache.expire";
public static final String SCRIPT_INDEX = ".scripts";
public static final String DEFAULT_LANG = "groovy";
@ -148,7 +148,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
this.scriptEngines = scriptEngines;
this.scriptContextRegistry = scriptContextRegistry;
int cacheMaxSize = settings.getAsInt(SCRIPT_CACHE_SIZE_SETTING, SCRIPT_CACHE_SIZE_DEFAULT);
int cacheMaxSize = SCRIPT_CACHE_SIZE_SETTING.get(settings);
TimeValue cacheExpire = settings.getAsTime(SCRIPT_CACHE_EXPIRE_SETTING, null);
logger.debug("using script cache with max_size [{}], expire [{}]", cacheMaxSize, cacheExpire);

View File

@ -122,8 +122,9 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
public class SearchService extends AbstractLifecycleComponent<SearchService> implements IndexEventListener {
public static final Setting<Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", Loading.LAZY.toString(), (s) -> Loading.parse(s, Loading.LAZY), false, Setting.Scope.INDEX);
public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive";
public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval";
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final TimeValue NO_TIMEOUT = timeValueMillis(-1);
public static final Setting<TimeValue> DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER);
@ -183,9 +184,8 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
this.fetchPhase = fetchPhase;
this.indicesQueryCache = indicesQueryCache;
TimeValue keepAliveInterval = settings.getAsTime(KEEPALIVE_INTERVAL_KEY, timeValueMinutes(1));
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
this.defaultKeepAlive = settings.getAsTime(DEFAULT_KEEPALIVE_KEY, timeValueMinutes(5)).millis();
TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings);
this.defaultKeepAlive = DEFAULT_KEEPALIVE_SETTING.get(settings).millis();
Map<String, SearchParseElement> elementParsers = new HashMap<>();
elementParsers.putAll(dfsPhase.parseElements());

View File

@ -62,8 +62,7 @@ public class GeoHashGridParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<ValuesSource.GeoPoint> vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoHashGrid.TYPE, context)
.build();
ValuesSourceParser vsParser = ValuesSourceParser.geoPoint(aggregationName, InternalGeoHashGrid.TYPE, context).build();
int precision = GeoHashGridParams.DEFAULT_PRECISION;
int requiredSize = GeoHashGridParams.DEFAULT_MAX_NUM_CELLS;
@ -132,7 +131,6 @@ public class GeoHashGridParser implements Aggregator.Parser {
final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize,
Collections.<InternalGeoHashGrid.Bucket> emptyList(), pipelineAggregators, metaData);
return new NonCollectingAggregator(name, aggregationContext, parent, pipelineAggregators, metaData) {
@Override
public InternalAggregation buildEmptyAggregation() {
return aggregation;
}

View File

@ -28,7 +28,6 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -79,7 +78,7 @@ public class DateHistogramParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<Numeric> vsParser = ValuesSourceParser.numeric(aggregationName, InternalDateHistogram.TYPE, context)
ValuesSourceParser vsParser = ValuesSourceParser.numeric(aggregationName, InternalDateHistogram.TYPE, context)
.targetValueType(ValueType.DATE)
.formattable(true)
.timezoneAware(true)
@ -191,7 +190,7 @@ public class DateHistogramParser implements Aggregator.Parser {
.timeZone(vsParser.input().timezone())
.offset(offset).build();
ValuesSourceConfig<Numeric> config = vsParser.config();
ValuesSourceConfig config = vsParser.config();
return new HistogramAggregator.Factory(aggregationName, config, rounding, order, keyed, minDocCount, extendedBounds,
new InternalDateHistogram.Factory());

View File

@ -25,7 +25,6 @@ import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.aggregations.support.format.ValueParser;
import org.elasticsearch.search.internal.SearchContext;
@ -47,7 +46,7 @@ public class HistogramParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<Numeric> vsParser = ValuesSourceParser.numeric(aggregationName, InternalHistogram.TYPE, context)
ValuesSourceParser vsParser = ValuesSourceParser.numeric(aggregationName, InternalHistogram.TYPE, context)
.targetValueType(ValueType.NUMERIC)
.formattable(true)
.build();
@ -128,7 +127,7 @@ public class HistogramParser implements Aggregator.Parser {
Rounding rounding = new Rounding.Interval(interval);
if (offset != 0) {
rounding = new Rounding.OffsetRounding(rounding, offset);
rounding = new Rounding.OffsetRounding((Rounding.Interval) rounding, offset);
}
if (extendedBounds != null) {
@ -137,7 +136,7 @@ public class HistogramParser implements Aggregator.Parser {
}
return new HistogramAggregator.Factory(aggregationName, vsParser.config(), rounding, order, keyed, minDocCount, extendedBounds,
new InternalHistogram.Factory<>());
new InternalHistogram.Factory());
}

View File

@ -81,9 +81,9 @@ public class MissingAggregator extends SingleBucketAggregator {
return new InternalMissing(name, 0, buildEmptySubAggregations(), pipelineAggregators(), metaData());
}
public static class Factory extends ValuesSourceAggregatorFactory<ValuesSource> {
public static class Factory extends ValuesSourceAggregatorFactory<ValuesSource> {
public Factory(String name, ValuesSourceConfig<ValuesSource> valueSourceConfig) {
public Factory(String name, ValuesSourceConfig valueSourceConfig) {
super(name, InternalMissing.TYPE.name(), valueSourceConfig);
}

View File

@ -22,7 +22,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -40,7 +39,8 @@ public class MissingParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, InternalMissing.TYPE, context)
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, InternalMissing.TYPE, context)
.scriptable(false)
.build();

View File

@ -203,8 +203,7 @@ public class SamplerAggregator extends SingleBucketAggregator {
private int maxDocsPerValue;
private String executionHint;
public DiversifiedFactory(String name, int shardSize, String executionHint, ValuesSourceConfig<ValuesSource> vsConfig,
int maxDocsPerValue) {
public DiversifiedFactory(String name, int shardSize, String executionHint, ValuesSourceConfig vsConfig, int maxDocsPerValue) {
super(name, InternalSampler.TYPE.name(), vsConfig);
this.shardSize = shardSize;
this.maxDocsPerValue = maxDocsPerValue;

View File

@ -23,7 +23,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -56,10 +55,10 @@ public class SamplerParser implements Aggregator.Parser {
String executionHint = null;
int shardSize = DEFAULT_SHARD_SAMPLE_SIZE;
int maxDocsPerValue = MAX_DOCS_PER_VALUE_DEFAULT;
ValuesSourceParser vsParser = null;
boolean diversityChoiceMade = false;
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, InternalSampler.TYPE, context).scriptable(true)
.formattable(false).build();
vsParser = ValuesSourceParser.any(aggregationName, InternalSampler.TYPE, context).scriptable(true).formattable(false).build();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
@ -89,7 +88,7 @@ public class SamplerParser implements Aggregator.Parser {
}
}
ValuesSourceConfig<ValuesSource> vsConfig = vsParser.config();
ValuesSourceConfig vsConfig = vsParser.config();
if (vsConfig.valid()) {
return new SamplerAggregator.DiversifiedFactory(aggregationName, shardSize, executionHint, vsConfig, maxDocsPerValue);
} else {

View File

@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations.bucket.significant;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.PostingsEnum;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
@ -79,6 +80,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource;
IndexSearcher indexSearcher = aggregationContext.searchContext().searcher();
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsSignificantTermsAggregator(name, factories,
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext,
@ -95,8 +98,9 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter();
return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories,
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent,
termsAggregatorFactory, pipelineAggregators, metaData);
(ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter,
aggregationContext,
parent, termsAggregatorFactory, pipelineAggregators, metaData);
}
};
@ -139,7 +143,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
return new TermsAggregator.BucketCountThresholds(bucketCountThresholds);
}
public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> valueSourceConfig, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
public SignificantTermsAggregatorFactory(String name, ValuesSourceConfig valueSourceConfig, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude,
String executionHint, Query filter, SignificanceHeuristic significanceHeuristic) {
super(name, SignificantStringTerms.TYPE.name(), valueSourceConfig);

View File

@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -54,7 +53,7 @@ public class SignificantTermsParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
SignificantTermsParametersParser aggParser = new SignificantTermsParametersParser(significanceHeuristicParserMapper);
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, SignificantStringTerms.TYPE, context)
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, SignificantStringTerms.TYPE, context)
.scriptable(false)
.formattable(true)
.build();

View File

@ -36,13 +36,13 @@ public abstract class AbstractTermsParametersParser {
public static final ParseField SHARD_MIN_DOC_COUNT_FIELD_NAME = new ParseField("shard_min_doc_count");
public static final ParseField REQUIRED_SIZE_FIELD_NAME = new ParseField("size");
public static final ParseField SHOW_TERM_DOC_COUNT_ERROR = new ParseField("show_term_doc_count_error");
//These are the results of the parsing.
private TermsAggregator.BucketCountThresholds bucketCountThresholds = new TermsAggregator.BucketCountThresholds();
private String executionHint = null;
private SubAggCollectionMode collectMode = SubAggCollectionMode.DEPTH_FIRST;
@ -59,12 +59,12 @@ public abstract class AbstractTermsParametersParser {
public IncludeExclude getIncludeExclude() {
return includeExclude;
}
public SubAggCollectionMode getCollectionMode() {
return collectMode;
}
public void parse(String aggregationName, XContentParser parser, SearchContext context, ValuesSourceParser<?> vsParser, IncludeExclude.Parser incExcParser) throws IOException {
public void parse(String aggregationName, XContentParser parser, SearchContext context, ValuesSourceParser vsParser, IncludeExclude.Parser incExcParser) throws IOException {
bucketCountThresholds = getDefaultBucketCountThresholds();
XContentParser.Token token;
String currentFieldName = null;

View File

@ -165,7 +165,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
private final boolean showTermDocCountError;
public TermsAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, Terms.Order order,
public TermsAggregatorFactory(String name, ValuesSourceConfig config, Terms.Order order,
TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, String executionHint,
SubAggCollectionMode executionMode, boolean showTermDocCountError) {
super(name, StringTerms.TYPE.name(), config);

View File

@ -25,7 +25,6 @@ import org.elasticsearch.search.aggregations.bucket.BucketUtils;
import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order;
import org.elasticsearch.search.aggregations.bucket.terms.TermsParametersParser.OrderElement;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -46,8 +45,7 @@ public class TermsParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
TermsParametersParser aggParser = new TermsParametersParser();
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true)
.formattable(true).build();
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, StringTerms.TYPE, context).scriptable(true).formattable(true).build();
IncludeExclude.Parser incExcParser = new IncludeExclude.Parser();
aggParser.parse(aggregationName, parser, context, vsParser, incExcParser);

View File

@ -62,7 +62,6 @@ public abstract class ValuesSourceMetricsAggregationBuilder<B extends ValuesSour
/**
* Configure the value to use when documents miss a value.
*/
@SuppressWarnings("unchecked")
public B missing(Object missingValue) {
this.missing = missingValue;
return (B) this;

View File

@ -35,7 +35,7 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.L
private final long precisionThreshold;
CardinalityAggregatorFactory(String name, ValuesSourceConfig<ValuesSource> config, long precisionThreshold) {
CardinalityAggregatorFactory(String name, ValuesSourceConfig config, long precisionThreshold) {
super(name, InternalCardinality.TYPE.name(), config);
this.precisionThreshold = precisionThreshold;
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.search.SearchParseException;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceParser;
import org.elasticsearch.search.internal.SearchContext;
@ -44,7 +43,7 @@ public class CardinalityParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String name, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<ValuesSource> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
ValuesSourceParser<?> vsParser = ValuesSourceParser.any(name, InternalCardinality.TYPE, context).formattable(false).build();
long precisionThreshold = -1;

View File

@ -40,7 +40,7 @@ public class ValueCountParser implements Aggregator.Parser {
@Override
public AggregatorFactory parse(String aggregationName, XContentParser parser, SearchContext context) throws IOException {
ValuesSourceParser<?> vsParser = ValuesSourceParser.any(aggregationName, InternalValueCount.TYPE, context)
ValuesSourceParser vsParser = ValuesSourceParser.any(aggregationName, InternalValueCount.TYPE, context)
.build();
XContentParser.Token token;
@ -54,6 +54,6 @@ public class ValueCountParser implements Aggregator.Parser {
}
}
return new ValueCountAggregator.Factory<>(aggregationName, vsParser.config());
return new ValueCountAggregator.Factory(aggregationName, vsParser.config());
}
}

View File

@ -53,9 +53,6 @@ import org.elasticsearch.search.aggregations.support.values.ScriptLongValues;
import java.io.IOException;
/**
* How to load values for an aggregation.
*/
public abstract class ValuesSource {
/**
@ -531,7 +528,6 @@ public abstract class ValuesSource {
return indexFieldData.load(context).getBytesValues();
}
@Override
public org.elasticsearch.index.fielddata.MultiGeoPointValues geoPointValues(LeafReaderContext context) {
return indexFieldData.load(context).getGeoPointValues();
}

View File

@ -78,20 +78,19 @@ public abstract class ValuesSourceAggregatorFactory<VS extends ValuesSource> ext
boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException;
@SuppressWarnings("unchecked") // Safe because we check the types with isAssignableFrom
private void resolveValuesSourceConfigFromAncestors(String aggName, AggregatorFactory parent, Class<VS> requiredValuesSourceType) {
ValuesSourceConfig<?> config;
ValuesSourceConfig config;
while (parent != null) {
if (parent instanceof ValuesSourceAggregatorFactory) {
config = ((ValuesSourceAggregatorFactory<?>) parent).config;
config = ((ValuesSourceAggregatorFactory) parent).config;
if (config != null && config.valid()) {
if (requiredValuesSourceType == null || requiredValuesSourceType.isAssignableFrom(config.valueSourceType)) {
ValueFormat format = config.format;
this.config = (ValuesSourceConfig<VS>) config;
this.config = config;
// if the user explicitly defined a format pattern, we'll do our best to keep it even when we inherit the
// value source form one of the ancestor aggregations
if (this.config.formatPattern != null && format != null && format instanceof ValueFormat.Patternable) {
this.config.format = ((ValueFormat.Patternable<?>) format).create(this.config.formatPattern);
this.config.format = ((ValueFormat.Patternable) format).create(this.config.formatPattern);
}
return;
}

View File

@ -48,16 +48,13 @@ import java.util.HashMap;
import java.util.Map;
/**
* Parses a description of where to load the value sent by a user into a
* ValuesSourceConfig which can be used to work with the values in various ways,
* one of which is to create an actual ValueSource (done with the help of
* AggregationContext).
*
*/
public class ValuesSourceParser<VS extends ValuesSource> {
static final ParseField TIME_ZONE = new ParseField("time_zone");
public static Builder<ValuesSource> any(String aggName, InternalAggregation.Type aggType, SearchContext context) {
public static Builder any(String aggName, InternalAggregation.Type aggType, SearchContext context) {
return new Builder<>(aggName, aggType, context, ValuesSource.class);
}

View File

@ -36,7 +36,7 @@ import java.util.Locale;
* Collectors used in the search. Children CollectorResult's may be
* embedded inside of a parent CollectorResult
*/
public class CollectorResult implements ToXContent, Writeable<CollectorResult> {
public class CollectorResult implements ToXContent, Writeable {
public static final String REASON_SEARCH_COUNT = "search_count";
public static final String REASON_SEARCH_TOP_HITS = "search_top_hits";
@ -125,7 +125,7 @@ public class CollectorResult implements ToXContent, Writeable<CollectorResult> {
builder = builder.startObject()
.field(NAME.getPreferredName(), getName())
.field(REASON.getPreferredName(), getReason())
.field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", getTime() / 1000000.0));
.field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0)));
if (!children.isEmpty()) {
builder = builder.startArray(CHILDREN.getPreferredName());
@ -150,7 +150,7 @@ public class CollectorResult implements ToXContent, Writeable<CollectorResult> {
}
@Override
public CollectorResult readFrom(StreamInput in) throws IOException {
public Object readFrom(StreamInput in) throws IOException {
return new CollectorResult(in);
}
}

View File

@ -19,12 +19,19 @@
package org.elasticsearch.search.sort;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
import java.util.Locale;
/**
* A sorting order.
*
*
*/
public enum SortOrder {
public enum SortOrder implements Writeable<SortOrder> {
/**
* Ascending order.
*/
@ -42,5 +49,30 @@ public enum SortOrder {
public String toString() {
return "desc";
}
};
public static final SortOrder DEFAULT = DESC;
private static final SortOrder PROTOTYPE = DEFAULT;
@Override
public SortOrder readFrom(StreamInput in) throws IOException {
int ordinal = in.readVInt();
if (ordinal < 0 || ordinal >= values().length) {
throw new IOException("Unknown SortOrder ordinal [" + ordinal + "]");
}
return values()[ordinal];
}
public static SortOrder readOrderFrom(StreamInput in) throws IOException {
return PROTOTYPE.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.ordinal());
}
public static SortOrder fromString(String op) {
return valueOf(op.toUpperCase(Locale.ROOT));
}
}

View File

@ -117,13 +117,9 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_CLIENT;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_CONNECT_TIMEOUT;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_RECEIVE_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_DEFAULT_SEND_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
@ -221,8 +217,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
this.workerCount = settings.getAsInt(WORKER_COUNT, EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingClient = settings.getAsBoolean("transport.netty.transport.tcp.blocking_client", settings.getAsBoolean(TCP_BLOCKING_CLIENT, settings.getAsBoolean(TCP_BLOCKING, false)));
this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT)));
this.blockingClient = settings.getAsBoolean("transport.netty.transport.tcp.blocking_client", TCP_BLOCKING_CLIENT.get(settings));
this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", TCP_CONNECT_TIMEOUT.get(settings)));
this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1);
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
@ -362,29 +358,25 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory());
clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis());
String tcpNoDelay = settings.get("transport.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
if (!"default".equals(tcpNoDelay)) {
clientBootstrap.setOption("tcpNoDelay", Booleans.parseBoolean(tcpNoDelay, null));
}
boolean tcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
clientBootstrap.setOption("tcpNoDelay", tcpNoDelay);
String tcpKeepAlive = settings.get("transport.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
if (!"default".equals(tcpKeepAlive)) {
clientBootstrap.setOption("keepAlive", Booleans.parseBoolean(tcpKeepAlive, null));
}
boolean tcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
clientBootstrap.setOption("keepAlive", tcpKeepAlive);
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
if (tcpSendBufferSize != null && tcpSendBufferSize.bytes() > 0) {
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
if (tcpSendBufferSize.bytes() > 0) {
clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes());
}
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
if (tcpReceiveBufferSize != null && tcpReceiveBufferSize.bytes() > 0) {
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (tcpReceiveBufferSize.bytes() > 0) {
clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes());
}
clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
boolean reuseAddress = settings.getAsBoolean("transport.netty.reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
boolean reuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
clientBootstrap.setOption("reuseAddress", reuseAddress);
return clientBootstrap;
@ -403,26 +395,22 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
fallbackSettingsBuilder.put("publish_host", fallbackPublishHost);
}
String fallbackTcpNoDelay = settings.get("transport.netty.tcp_no_delay", settings.get(TCP_NO_DELAY, "true"));
if (fallbackTcpNoDelay != null) {
fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay);
}
boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay);
String fallbackTcpKeepAlive = settings.get("transport.netty.tcp_keep_alive", settings.get(TCP_KEEP_ALIVE, "true"));
if (fallbackTcpKeepAlive != null) {
boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive);
}
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", settings.getAsBoolean(TCP_REUSE_ADDRESS, NetworkUtils.defaultReuseAddress()));
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress);
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", settings.getAsBytesSize(TCP_SEND_BUFFER_SIZE, TCP_DEFAULT_SEND_BUFFER_SIZE));
if (fallbackTcpSendBufferSize != null) {
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
if (fallbackTcpSendBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
}
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", settings.getAsBytesSize(TCP_RECEIVE_BUFFER_SIZE, TCP_DEFAULT_RECEIVE_BUFFER_SIZE));
if (fallbackTcpBufferSize != null) {
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (fallbackTcpBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
}
@ -552,15 +540,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
private void createServerBootstrap(String name, Settings settings) {
boolean blockingServer = settings.getAsBoolean("transport.tcp.blocking_server", this.settings.getAsBoolean(TCP_BLOCKING_SERVER, this.settings.getAsBoolean(TCP_BLOCKING, false)));
boolean blockingServer = settings.getAsBoolean("transport.tcp.blocking_server", TCP_BLOCKING_SERVER.get(settings));
String port = settings.get("port");
String bindHost = settings.get("bind_host");
String publishHost = settings.get("publish_host");
String tcpNoDelay = settings.get("tcp_no_delay");
String tcpKeepAlive = settings.get("tcp_keep_alive");
boolean reuseAddress = settings.getAsBoolean("reuse_address", NetworkUtils.defaultReuseAddress());
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("tcp_send_buffer_size", TCP_DEFAULT_SEND_BUFFER_SIZE);
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("tcp_receive_buffer_size", TCP_DEFAULT_RECEIVE_BUFFER_SIZE);
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.getDefault(settings));
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.getDefault(settings));
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax);

View File

@ -44,7 +44,9 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.Environment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.node.Node;
import org.elasticsearch.rest.RestStatus;
@ -100,8 +102,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
// its a tribe configured node..., force settings
Settings.Builder sb = Settings.builder().put(settings);
sb.put("node.client", true); // this node should just act as a node client
sb.put("discovery.type", "local"); // a tribe node should not use zen discovery
sb.put("discovery.initial_state_timeout", 0); // nothing is going to be discovered, since no master will be elected
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); // nothing is going to be discovered, since no master will be elected
if (sb.get("cluster.name") == null) {
sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
}
@ -132,7 +134,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
Settings.Builder sb = Settings.builder().put(entry.getValue());
sb.put("name", settings.get("name") + "/" + entry.getKey());
sb.put("path.home", settings.get("path.home")); // pass through ES home dir
sb.put(Environment.PATH_HOME_SETTING.getKey(), Environment.PATH_HOME_SETTING.get(settings)); // pass through ES home dir
sb.put(TRIBE_NAME, entry.getKey());
if (sb.get("http.enabled") == null) {
sb.put("http.enabled", false);

View File

@ -47,7 +47,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
@Override
public void setUp() throws Exception {
super.setUp();
Settings settings = Settings.builder().put("path.home", createTempDir().toString()).build();
Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build();
Settings indexSettings = settingsBuilder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Arrays;
@ -156,7 +157,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
//we create a transport client with no nodes to make sure it throws NoNodeAvailableException
Settings settings = Settings.builder()
.put("path.home", createTempDir().toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
Client transportClient = TransportClient.builder().settings(settings).build();

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.search;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESTestCase;
@ -38,7 +39,7 @@ public class SearchRequestBuilderTests extends ESTestCase {
//this client will not be hit by any request, but it needs to be a non null proper client
//that is why we create it but we don't add any transport address to it
Settings settings = Settings.builder()
.put("path.home", createTempDir().toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
client = TransportClient.builder().settings(settings).build();
}

View File

@ -60,8 +60,8 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase {
logger.info("--> start 4 nodes, 3 master, 1 data");
final Settings sharedSettings = Settings.builder()
.put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly
.put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)

View File

@ -39,11 +39,11 @@ import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.MultiDataPathUpgrader;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexSettings;
@ -143,13 +143,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
Path baseTempDir = createTempDir();
// start single data path node
Settings.Builder nodeSettings = Settings.builder()
.put("path.data", baseTempDir.resolve("single-path").toAbsolutePath())
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("single-path").toAbsolutePath())
.put("node.master", false); // workaround for dangling index loading issue when node is master
InternalTestCluster.Async<String> singleDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
// start multi data path node
nodeSettings = Settings.builder()
.put("path.data", baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir.resolve("multi-path2").toAbsolutePath())
.put(Environment.PATH_DATA_SETTING.getKey(), baseTempDir.resolve("multi-path1").toAbsolutePath() + "," + baseTempDir.resolve("multi-path2").toAbsolutePath())
.put("node.master", false); // workaround for dangling index loading issue when node is master
InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
@ -208,10 +208,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
}
void importIndex(String indexName) throws IOException {
final Iterable<NodeEnvironment> instances = internalCluster().getInstances(NodeEnvironment.class);
for (NodeEnvironment nodeEnv : instances) { // upgrade multidata path
MultiDataPathUpgrader.upgradeMultiDataPath(nodeEnv, logger);
}
// force reloading dangling indices with a cluster state republish
client().admin().cluster().prepareReroute().get();
ensureGreen(indexName);
@ -219,6 +215,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
// randomly distribute the files from src over dests paths
public static void copyIndex(final ESLogger logger, final Path src, final String indexName, final Path... dests) throws IOException {
Path destinationDataPath = dests[randomInt(dests.length - 1)];
for (Path dest : dests) {
Path indexDir = dest.resolve(indexName);
assertFalse(Files.exists(indexDir));
@ -244,7 +241,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
}
Path relativeFile = src.relativize(file);
Path destFile = dests[randomInt(dests.length - 1)].resolve(indexName).resolve(relativeFile);
Path destFile = destinationDataPath.resolve(indexName).resolve(relativeFile);
logger.trace("--> Moving " + relativeFile.toString() + " to " + destFile.toString());
Files.move(file, destFile);
assertFalse(Files.exists(file));

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.bwcompat;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
@ -28,7 +29,7 @@ public class RecoveryWithUnsupportedIndicesIT extends StaticIndexBackwardCompati
String indexName = "unsupported-0.20.6";
logger.info("Checking static index " + indexName);
Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), Node.HTTP_ENABLED, true);
Settings nodeSettings = prepareBackwardsDataDir(getBwcIndicesPath().resolve(indexName + ".zip"), NetworkModule.HTTP_ENABLED.getKey(), true);
try {
internalCluster().startNode(nodeSettings);
fail();

View File

@ -1,3 +1,4 @@
/*
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@ -27,6 +28,8 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.repositories.uri.URLRepository;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
import org.elasticsearch.snapshots.RestoreInfo;
@ -64,7 +67,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
// Configure using path.repo
return settingsBuilder()
.put(super.nodeSettings(nodeOrdinal))
.put("path.repo", getBwcIndicesPath())
.put(Environment.PATH_REPO_SETTING.getKey(), getBwcIndicesPath())
.build();
} else {
// Configure using url white list
@ -72,7 +75,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
URI repoJarPatternUri = new URI("jar:" + getBwcIndicesPath().toUri().toString() + "*.zip!/repo/");
return settingsBuilder()
.put(super.nodeSettings(nodeOrdinal))
.putArray("repositories.url.allowed_urls", repoJarPatternUri.toString())
.putArray(URLRepository.ALLOWED_URLS_SETTING.getKey(), repoJarPatternUri.toString())
.build();
} catch (URISyntaxException ex) {
throw new IllegalArgumentException(ex);

View File

@ -48,6 +48,7 @@ import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportMessage;
@ -90,7 +91,7 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
public void initClient() {
Settings settings = Settings.builder()
.put(HEADER_SETTINGS)
.put("path.home", createTempDir().toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
threadPool = new ThreadPool("test-" + getTestName());
client = buildClient(settings, ACTIONS);

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
@ -83,7 +84,7 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
.put("node.name", "transport_client_" + this.getTestName() + "_1")
.put("client.transport.nodes_sampler_interval", "1s")
.put(HEADER_SETTINGS)
.put("path.home", createTempDir().toString()).build())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build())
.addPlugin(InternalTransportService.TestPlugin.class)
.build();

View File

@ -24,6 +24,7 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.test.ESIntegTestCase;
@ -52,7 +53,7 @@ public class TransportClientIT extends ESIntegTestCase {
TransportClientNodesService nodeService = client.nodeService();
Node node = new Node(Settings.builder()
.put(internalCluster().getDefaultSettings())
.put("path.home", createTempDir())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put("node.name", "testNodeVersionIsUpdated")
.put("http.enabled", false)
.put("node.data", false)
@ -89,7 +90,10 @@ public class TransportClientIT extends ESIntegTestCase {
}
public void testThatTransportClientSettingCannotBeChanged() {
Settings baseSettings = settingsBuilder().put(Client.CLIENT_TYPE_SETTING, "anything").put("path.home", createTempDir()).build();
Settings baseSettings = settingsBuilder()
.put(Client.CLIENT_TYPE_SETTING, "anything")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.build();
try (TransportClient client = TransportClient.builder().settings(baseSettings).build()) {
Settings settings = client.injector.getInstance(Settings.class);
assertThat(settings.get(Client.CLIENT_TYPE_SETTING), is("transport"));

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
@ -57,7 +58,7 @@ public class TransportClientRetryIT extends ESIntegTestCase {
.put("node.mode", internalCluster().getNodeMode())
.put(ClusterName.SETTING, internalCluster().getClusterName())
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
.put("path.home", createTempDir());
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir());
try (TransportClient transportClient = TransportClient.builder().settings(builder.build()).build()) {
transportClient.addTransportAddresses(addresses);

View File

@ -642,7 +642,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 1)
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "400ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
.put("discovery.initial_state_timeout", "500ms")
.build();

View File

@ -77,7 +77,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 2)
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -189,7 +189,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put("discovery.zen.minimum_master_nodes", 3)
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "1s")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -264,7 +264,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
public void testDynamicUpdateMinimumMasterNodes() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "400ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
.put("discovery.initial_state_timeout", "500ms")
.build();
@ -322,7 +322,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
int nodeCount = scaledRandomIntBetween(1, 5);
Settings.Builder settings = settingsBuilder()
.put("discovery.type", "zen")
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put("discovery.initial_state_timeout", "500ms");
// set an initial value which is at least quorum to avoid split brains during initial startup
@ -361,8 +361,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
public void testCanNotPublishWithoutMinMastNodes() throws Exception {
Settings settings = settingsBuilder()
.put("discovery.type", "zen")
.put(FaultDetection.SETTING_PING_TIMEOUT, "1h") // disable it
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1h") // disable it
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up
.build();

View File

@ -65,7 +65,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
.put("discovery.type", "zen")
.put("action.auto_create_index", autoCreateIndex)
.put("discovery.zen.minimum_master_nodes", 2)
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put("discovery.initial_state_timeout", "500ms")
.put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all")
.build();
@ -217,7 +217,7 @@ public class NoMasterNodeIT extends ESIntegTestCase {
.put("discovery.type", "zen")
.put("action.auto_create_index", false)
.put("discovery.zen.minimum_master_nodes", 2)
.put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms")
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "200ms")
.put("discovery.initial_state_timeout", "500ms")
.put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "write")
.build();

View File

@ -108,7 +108,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b")
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 3)
.put(ZenDiscovery.SETTING_JOIN_TIMEOUT, "10s")
.put(ZenDiscovery.JOIN_TIMEOUT_SETTING.getKey(), "10s")
.build();
logger.info("--> starting 4 nodes on different zones");

View File

@ -0,0 +1,93 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.metadata;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import java.util.Collections;
public class MetaDataIndexUpgradeServiceTests extends ESTestCase {
public void testArchiveBrokenIndexSettings() {
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
IndexMetaData src = newIndexMeta("foo", Settings.EMPTY);
IndexMetaData indexMetaData = service.archiveBrokenIndexSettings(src);
assertSame(indexMetaData, src);
src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
indexMetaData = service.archiveBrokenIndexSettings(src);
assertNotSame(indexMetaData, src);
assertEquals("-200", indexMetaData.getSettings().get("archived.index.refresh_interval"));
src = newIndexMeta("foo", Settings.builder().put("index.codec", "best_compression1").build());
indexMetaData = service.archiveBrokenIndexSettings(src);
assertNotSame(indexMetaData, src);
assertEquals("best_compression1", indexMetaData.getSettings().get("archived.index.codec"));
src = newIndexMeta("foo", Settings.builder().put("index.refresh.interval", "-1").build());
indexMetaData = service.archiveBrokenIndexSettings(src);
assertNotSame(indexMetaData, src);
assertEquals("-1", indexMetaData.getSettings().get("archived.index.refresh.interval"));
src = newIndexMeta("foo", indexMetaData.getSettings()); // double archive?
indexMetaData = service.archiveBrokenIndexSettings(src);
assertSame(indexMetaData, src);
}
public void testUpgrade() {
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
assertFalse(service.isUpgraded(src));
src = service.upgradeIndexMetaData(src);
assertTrue(service.isUpgraded(src));
assertEquals("-200", src.getSettings().get("archived.index.refresh_interval"));
assertNull(src.getSettings().get("index.refresh_interval"));
assertSame(src, service.upgradeIndexMetaData(src)); // no double upgrade
}
public void testIsUpgraded() {
MetaDataIndexUpgradeService service = new MetaDataIndexUpgradeService(Settings.EMPTY, new MapperRegistry(Collections.emptyMap(), Collections.emptyMap()), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
IndexMetaData src = newIndexMeta("foo", Settings.builder().put("index.refresh_interval", "-200").build());
assertFalse(service.isUpgraded(src));
Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), VersionUtils.getPreviousVersion());
src = newIndexMeta("foo", Settings.builder().put(IndexMetaData.SETTING_VERSION_UPGRADED, version).build());
assertFalse(service.isUpgraded(src));
src = newIndexMeta("foo", Settings.builder().put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build());
assertTrue(service.isUpgraded(src));
}
public static IndexMetaData newIndexMeta(String name, Settings indexSettings) {
Settings build = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_CREATION_DATE, 1)
.put(IndexMetaData.SETTING_INDEX_UUID, "BOOM")
.put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.V_0_18_1_ID)
.put(indexSettings)
.build();
IndexMetaData metaData = IndexMetaData.builder(name).settings(build).build();
return metaData;
}
}

View File

@ -27,6 +27,7 @@ import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
@ -53,8 +54,8 @@ public class Log4jESLoggerTests extends ESTestCase {
Path configDir = getDataPath("config");
// Need to set custom path.conf so we can use a custom logging.yml file for the test
Settings settings = Settings.builder()
.put("path.conf", configDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
LogConfigurator.configure(settings, true);

View File

@ -54,8 +54,8 @@ public class LoggingConfigurationTests extends ESTestCase {
try {
Path configDir = getDataPath("config");
Settings settings = Settings.builder()
.put("path.conf", configDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), configDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
LogConfigurator.configure(settings, true);
@ -84,8 +84,8 @@ public class LoggingConfigurationTests extends ESTestCase {
Files.write(loggingConf, "{\"json\": \"foo\"}".getBytes(StandardCharsets.UTF_8));
Environment environment = new Environment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build());
Settings.Builder builder = Settings.builder();
@ -101,8 +101,8 @@ public class LoggingConfigurationTests extends ESTestCase {
Files.write(loggingConf, "key: value".getBytes(StandardCharsets.UTF_8));
Environment environment = new Environment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build());
Settings.Builder builder = Settings.builder();
@ -120,8 +120,8 @@ public class LoggingConfigurationTests extends ESTestCase {
Files.write(loggingConf2, "yaml: bar".getBytes(StandardCharsets.UTF_8));
Environment environment = new Environment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build());
Settings.Builder builder = Settings.builder();
@ -138,8 +138,8 @@ public class LoggingConfigurationTests extends ESTestCase {
Files.write(invalidSuffix, "yml: bar".getBytes(StandardCharsets.UTF_8));
Environment environment = new Environment(
Settings.builder()
.put("path.conf", invalidSuffix.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), invalidSuffix.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build());
Settings.Builder builder = Settings.builder();
@ -157,8 +157,8 @@ public class LoggingConfigurationTests extends ESTestCase {
Files.write(loggingConf, "appender.file.type: file\n".getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND);
Environment environment = InternalSettingsPreparer.prepareEnvironment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.put("logger.test_resolve_order", "TRACE, console")
.put("appender.console.type", "console")
.put("appender.console.layout.type", "consolePattern")
@ -186,8 +186,8 @@ public class LoggingConfigurationTests extends ESTestCase {
StandardCharsets.UTF_8);
Environment environment = InternalSettingsPreparer.prepareEnvironment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build(), new CliToolTestCase.MockTerminal());
LogConfigurator.configure(environment.settings(), false);
ESLogger esLogger = Log4jESLoggerFactory.getLogger("test_config_not_read");

View File

@ -143,7 +143,7 @@ public class NetworkModuleTests extends ModuleTestCase {
}
// not added if http is disabled
settings = Settings.builder().put(NetworkModule.HTTP_ENABLED, false).build();
settings = Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false).build();
module = new NetworkModule(new NetworkService(settings), settings, false, null);
assertNotBound(module, HttpServerTransport.class);
}

View File

@ -104,13 +104,17 @@ public class SettingTests extends ESTestCase {
TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000));
Setting<TimeValue> setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER);
assertFalse(setting.isGroupSetting());
String aDefault = setting.getDefault(Settings.EMPTY);
String aDefault = setting.getDefaultRaw(Settings.EMPTY);
assertEquals(defautlValue.millis() + "ms", aDefault);
assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis());
assertEquals(defautlValue, setting.getDefault(Settings.EMPTY));
Setting<String> secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER);
assertEquals("some_default", secondaryDefault.get(Settings.EMPTY));
assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build()));
Setting<String> secondaryDefaultViaSettings = new Setting<>("foo.bar", secondaryDefault, (s) -> s, randomBoolean(), Setting.Scope.CLUSTER);
assertEquals("some_default", secondaryDefaultViaSettings.get(Settings.EMPTY));
assertEquals("42", secondaryDefaultViaSettings.get(Settings.builder().put("old.foo.bar", 42).build()));
}
public void testComplexType() {
@ -298,6 +302,26 @@ public class SettingTests extends ESTestCase {
for (int i = 0; i < intValues.size(); i++) {
assertEquals(i, intValues.get(i).intValue());
}
Setting<List<String>> settingWithFallback = Setting.listSetting("foo.baz", listSetting, s -> s, true, Setting.Scope.CLUSTER);
value = settingWithFallback.get(Settings.EMPTY);
assertEquals(1, value.size());
assertEquals("foo,bar", value.get(0));
value = settingWithFallback.get(Settings.builder().putArray("foo.bar", "1", "2").build());
assertEquals(2, value.size());
assertEquals("1", value.get(0));
assertEquals("2", value.get(1));
value = settingWithFallback.get(Settings.builder().putArray("foo.baz", "3", "4").build());
assertEquals(2, value.size());
assertEquals("3", value.get(0));
assertEquals("4", value.get(1));
value = settingWithFallback.get(Settings.builder().putArray("foo.baz", "3", "4").putArray("foo.bar", "1", "2").build());
assertEquals(2, value.size());
assertEquals("3", value.get(0));
assertEquals("4", value.get(1));
}
public void testListSettingAcceptsNumberSyntax() {

View File

@ -19,6 +19,10 @@
package org.elasticsearch.common.unit;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.closeTo;
@ -73,4 +77,21 @@ public class DistanceUnitTests extends ESTestCase {
assertEquals(7, DistanceUnit.MILES.ordinal());
assertEquals(8, DistanceUnit.METERS.ordinal());
}
public void testReadWrite() throws Exception {
for (DistanceUnit unit : DistanceUnit.values()) {
try (BytesStreamOutput out = new BytesStreamOutput()) {
unit.writeTo(out);
try (StreamInput in = StreamInput.wrap(out.bytes())) {
assertThat("Roundtrip serialisation failed.", DistanceUnit.readDistanceUnit(in), equalTo(unit));
}
}
}
}
public void testFromString() {
for (DistanceUnit unit : DistanceUnit.values()) {
assertThat("Roundtrip string parsing failed.", DistanceUnit.fromString(unit.toString()), equalTo(unit));
}
}
}

View File

@ -1,297 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.shard.ShardStateMetaData;
import org.elasticsearch.test.ESTestCase;
import java.io.BufferedWriter;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
/**
*/
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
public class MultiDataPathUpgraderTests extends ESTestCase {
public void testUpgradeRandomPaths() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
final String uuid = Strings.base64UUID();
final ShardId shardId = new ShardId("foo", 0);
final Path[] shardDataPaths = nodeEnvironment.availableShardPaths(shardId);
if (nodeEnvironment.nodeDataPaths().length == 1) {
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
assertFalse(helper.needsUpgrading(shardId));
return;
}
int numIdxFiles = 0;
int numTranslogFiles = 0;
int metaStateVersion = 0;
for (Path shardPath : shardDataPaths) {
final Path translog = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
final Path idx = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
Files.createDirectories(translog);
Files.createDirectories(idx);
int numFiles = randomIntBetween(1, 10);
for (int i = 0; i < numFiles; i++, numIdxFiles++) {
String filename = Integer.toString(numIdxFiles);
try (BufferedWriter w = Files.newBufferedWriter(idx.resolve(filename + ".tst"), StandardCharsets.UTF_8)) {
w.write(filename);
}
}
numFiles = randomIntBetween(1, 10);
for (int i = 0; i < numFiles; i++, numTranslogFiles++) {
String filename = Integer.toString(numTranslogFiles);
try (BufferedWriter w = Files.newBufferedWriter(translog.resolve(filename + ".translog"), StandardCharsets.UTF_8)) {
w.write(filename);
}
}
++metaStateVersion;
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(metaStateVersion, true, uuid, AllocationId.newInitializing()), metaStateVersion, shardDataPaths);
}
final Path path = randomFrom(shardDataPaths);
ShardPath targetPath = new ShardPath(false, path, path, uuid, new ShardId("foo", 0));
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
helper.upgrade(shardId, targetPath);
assertFalse(helper.needsUpgrading(shardId));
if (shardDataPaths.length > 1) {
for (Path shardPath : shardDataPaths) {
if (shardPath.equals(targetPath.getDataPath())) {
continue;
}
final Path translog = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
final Path idx = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
final Path state = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
assertFalse(Files.exists(translog));
assertFalse(Files.exists(idx));
assertFalse(Files.exists(state));
assertFalse(Files.exists(shardPath));
}
}
final ShardStateMetaData stateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, targetPath.getShardStatePath());
assertEquals(metaStateVersion, stateMetaData.version);
assertTrue(stateMetaData.primary);
assertEquals(uuid, stateMetaData.indexUUID);
final Path translog = targetPath.getDataPath().resolve(ShardPath.TRANSLOG_FOLDER_NAME);
final Path idx = targetPath.getDataPath().resolve(ShardPath.INDEX_FOLDER_NAME);
Files.deleteIfExists(idx.resolve("write.lock"));
assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length);
assertEquals(numIdxFiles, FileSystemUtils.files(idx).length);
final HashSet<Path> translogFiles = Sets.newHashSet(FileSystemUtils.files(translog));
for (int i = 0; i < numTranslogFiles; i++) {
final String name = Integer.toString(i);
translogFiles.contains(translog.resolve(name + ".translog"));
byte[] content = Files.readAllBytes(translog.resolve(name + ".translog"));
assertEquals(name , new String(content, StandardCharsets.UTF_8));
}
final HashSet<Path> idxFiles = Sets.newHashSet(FileSystemUtils.files(idx));
for (int i = 0; i < numIdxFiles; i++) {
final String name = Integer.toString(i);
idxFiles.contains(idx.resolve(name + ".tst"));
byte[] content = Files.readAllBytes(idx.resolve(name + ".tst"));
assertEquals(name , new String(content, StandardCharsets.UTF_8));
}
}
}
/**
* Run upgrade on a real bwc index
*/
public void testUpgradeRealIndex() throws IOException, URISyntaxException {
List<Path> indexes = new ArrayList<>();
try (DirectoryStream<Path> stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) {
for (Path path : stream) {
indexes.add(path);
}
}
CollectionUtil.introSort(indexes, new Comparator<Path>() {
@Override
public int compare(Path o1, Path o2) {
return o1.getFileName().compareTo(o2.getFileName());
}
});
final ShardId shardId = new ShardId("test", 0);
final Path path = randomFrom(indexes);
final Path indexFile = path;
final String indexName = indexFile.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
if (nodeEnvironment.nodeDataPaths().length == 1) {
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
assertFalse(helper.needsUpgrading(shardId));
return;
}
Path unzipDir = createTempDir();
Path unzipDataDir = unzipDir.resolve("data");
// decompress the index
try (InputStream stream = Files.newInputStream(indexFile)) {
TestUtil.unzip(stream, unzipDir);
}
// check it is unique
assertTrue(Files.exists(unzipDataDir));
Path[] list = FileSystemUtils.files(unzipDataDir);
if (list.length != 1) {
throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
}
// the bwc scripts packs the indices under this path
Path src = list[0].resolve("nodes/0/indices/" + indexName);
assertTrue("[" + indexFile + "] missing index dir: " + src.toString(), Files.exists(src));
Path[] multiDataPath = new Path[nodeEnvironment.nodeDataPaths().length];
int i = 0;
for (NodeEnvironment.NodePath nodePath : nodeEnvironment.nodePaths()) {
multiDataPath[i++] = nodePath.indicesPath;
}
logger.info("--> injecting index [{}] into multiple data paths", indexName);
OldIndexBackwardsCompatibilityIT.copyIndex(logger, src, indexName, multiDataPath);
final ShardPath shardPath = new ShardPath(false, nodeEnvironment.availableShardPaths(new ShardId(indexName, 0))[0], nodeEnvironment.availableShardPaths(new ShardId(indexName, 0))[0], IndexMetaData.INDEX_UUID_NA_VALUE, new ShardId(indexName, 0));
logger.info("{}", (Object)FileSystemUtils.files(shardPath.resolveIndex()));
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
helper.upgrade(new ShardId(indexName, 0), shardPath);
helper.checkIndex(shardPath);
assertFalse(helper.needsUpgrading(new ShardId(indexName, 0)));
}
}
public void testNeedsUpgrade() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
String uuid = Strings.randomBase64UUID();
final ShardId shardId = new ShardId("foo", 0);
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(1, true, uuid, AllocationId.newInitializing()), 1, nodeEnvironment.availableShardPaths(shardId));
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
boolean multiDataPaths = nodeEnvironment.nodeDataPaths().length > 1;
boolean needsUpgrading = helper.needsUpgrading(shardId);
if (multiDataPaths) {
assertTrue(needsUpgrading);
} else {
assertFalse(needsUpgrading);
}
}
}
public void testPickTargetShardPath() throws IOException {
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
final ShardId shard = new ShardId("foo", 0);
final Path[] paths = nodeEnvironment.availableShardPaths(shard);
if (paths.length == 1) {
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment);
try {
helper.pickShardPath(new ShardId("foo", 0));
fail("one path needs no upgrading");
} catch (IllegalStateException ex) {
// only one path
}
} else {
final Map<Path, Tuple<Long, Long>> pathToSpace = new HashMap<>();
final Path expectedPath;
if (randomBoolean()) { // path with most of the file bytes
expectedPath = randomFrom(paths);
long[] used = new long[paths.length];
long sumSpaceUsed = 0;
for (int i = 0; i < used.length; i++) {
long spaceUsed = paths[i] == expectedPath ? randomIntBetween(101, 200) : randomIntBetween(10, 100);
sumSpaceUsed += spaceUsed;
used[i] = spaceUsed;
}
for (int i = 0; i < used.length; i++) {
long availalbe = randomIntBetween((int)(2*sumSpaceUsed-used[i]), 4 * (int)sumSpaceUsed);
pathToSpace.put(paths[i], new Tuple<>(availalbe, used[i]));
}
} else { // path with largest available space
expectedPath = randomFrom(paths);
long[] used = new long[paths.length];
long sumSpaceUsed = 0;
for (int i = 0; i < used.length; i++) {
long spaceUsed = randomIntBetween(10, 100);
sumSpaceUsed += spaceUsed;
used[i] = spaceUsed;
}
for (int i = 0; i < used.length; i++) {
long availalbe = paths[i] == expectedPath ? randomIntBetween((int)(sumSpaceUsed), (int)(2*sumSpaceUsed)) : randomIntBetween(0, (int)(sumSpaceUsed) - 1) ;
pathToSpace.put(paths[i], new Tuple<>(availalbe, used[i]));
}
}
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment) {
@Override
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
return pathToSpace.get(path.resolve(shard)).v1();
}
@Override
protected long getSpaceUsedByShard(Path path) throws IOException {
return pathToSpace.get(path).v2();
}
};
String uuid = Strings.randomBase64UUID();
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(1, true, uuid, AllocationId.newInitializing()), 1, paths);
final ShardPath shardPath = helper.pickShardPath(new ShardId("foo", 0));
assertEquals(expectedPath, shardPath.getDataPath());
assertEquals(expectedPath, shardPath.getShardStatePath());
}
MultiDataPathUpgrader helper = new MultiDataPathUpgrader(nodeEnvironment) {
@Override
protected long getUsabelSpace(NodeEnvironment.NodePath path) throws IOException {
return randomIntBetween(0, 10);
}
@Override
protected long getSpaceUsedByShard(Path path) throws IOException {
return randomIntBetween(11, 20);
}
};
try {
helper.pickShardPath(new ShardId("foo", 0));
fail("not enough space");
} catch (IllegalStateException ex) {
// not enough space
}
}
}
}

View File

@ -46,7 +46,7 @@ public class DiscoveryModuleTests extends ModuleTestCase {
public void testRegisterMasterElectionService() {
Settings settings = Settings.builder().put("node.local", false).
put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_KEY, "custom").build();
put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING.getKey(), "custom").build();
DiscoveryModule module = new DiscoveryModule(settings);
module.addElectMasterService("custom", DummyMasterElectionService.class);
assertBinding(module, ElectMasterService.class, DummyMasterElectionService.class);
@ -55,7 +55,7 @@ public class DiscoveryModuleTests extends ModuleTestCase {
public void testLoadUnregisteredMasterElectionService() {
Settings settings = Settings.builder().put("node.local", false).
put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_KEY, "foobar").build();
put(DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING.getKey(), "foobar").build();
DiscoveryModule module = new DiscoveryModule(settings);
module.addElectMasterService("custom", DummyMasterElectionService.class);
assertBindingFailure(module, "Unknown master service type [foobar]");
@ -71,7 +71,7 @@ public class DiscoveryModuleTests extends ModuleTestCase {
public void testRegisterDiscovery() {
boolean local = randomBoolean();
Settings settings = Settings.builder().put("node.local", local).
put(DiscoveryModule.DISCOVERY_TYPE_KEY, "custom").build();
put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "custom").build();
DiscoveryModule module = new DiscoveryModule(settings);
module.addDiscoveryType("custom", DummyDisco.class);
assertBinding(module, Discovery.class, DummyDisco.class);

View File

@ -45,6 +45,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.math.MathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.zen.ZenDiscovery;
@ -163,8 +164,8 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
}
final static Settings DEFAULT_SETTINGS = Settings.builder()
.put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly
.put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s") // for hitting simulated network failures quickly
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1") // for hitting simulated network failures quickly
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
.put("http.enabled", false) // just to make test quicker
@ -465,7 +466,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
logger.info("[{}] Acquired semaphore and it has {} permits left", name, semaphore.availablePermits());
try {
id = Integer.toString(idGenerator.incrementAndGet());
int shard = Murmur3HashFunction.hash(id) % numPrimaries;
int shard = MathUtils.mod(Murmur3HashFunction.hash(id), numPrimaries);
logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}").setTimeout("1s").get();
assertThat(response.getVersion(), equalTo(1l));
@ -961,7 +962,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// don't wait for initial state, wat want to add the disruption while the cluster is forming..
internalCluster().startNodesAsync(3,
Settings.builder()
.put(DiscoveryService.SETTING_INITIAL_STATE_TIMEOUT, "1ms")
.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "1ms")
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s")
.build()).get();

View File

@ -131,8 +131,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
Settings.Builder settings = Settings.builder();
boolean shouldRetry = randomBoolean();
// make sure we don't ping again after the initial ping
settings.put(FaultDetection.SETTING_CONNECT_ON_NETWORK_DISCONNECT, shouldRetry)
.put(FaultDetection.SETTING_PING_INTERVAL, "5m");
settings.put(FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING.getKey(), shouldRetry)
.put(FaultDetection.PING_INTERVAL_SETTING.getKey(), "5m");
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(buildNodesForA(true)).build();
NodesFaultDetection nodesFDA = new NodesFaultDetection(settings.build(), threadPool, serviceA, clusterState.getClusterName());
nodesFDA.setLocalNode(nodeA);
@ -179,8 +179,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
Settings.Builder settings = Settings.builder();
boolean shouldRetry = randomBoolean();
// make sure we don't ping
settings.put(FaultDetection.SETTING_CONNECT_ON_NETWORK_DISCONNECT, shouldRetry)
.put(FaultDetection.SETTING_PING_INTERVAL, "5m");
settings.put(FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING.getKey(), shouldRetry)
.put(FaultDetection.PING_INTERVAL_SETTING.getKey(), "5m");
ClusterName clusterName = new ClusterName(randomAsciiOfLengthBetween(3, 20));
final ClusterState state = ClusterState.builder(clusterName).nodes(buildNodesForA(false)).build();
MasterFaultDetection masterFD = new MasterFaultDetection(settings.build(), threadPool, serviceA, clusterName,

View File

@ -95,8 +95,8 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
Settings defaultSettings = Settings.builder()
.put(FaultDetection.SETTING_PING_TIMEOUT, "1s")
.put(FaultDetection.SETTING_PING_RETRIES, "1")
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s")
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1")
.put("discovery.type", "zen")
.build();
@ -142,8 +142,8 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
@TestLogging(value = "action.admin.cluster.health:TRACE")
public void testNodeFailuresAreProcessedOnce() throws ExecutionException, InterruptedException, IOException {
Settings defaultSettings = Settings.builder()
.put(FaultDetection.SETTING_PING_TIMEOUT, "1s")
.put(FaultDetection.SETTING_PING_RETRIES, "1")
.put(FaultDetection.PING_TIMEOUT_SETTING.getKey(), "1s")
.put(FaultDetection.PING_RETRIES_SETTING.getKey(), "1")
.put("discovery.type", "zen")
.build();

View File

@ -40,8 +40,8 @@ public class EnvironmentTests extends ESTestCase {
public Environment newEnvironment(Settings settings) throws IOException {
Settings build = Settings.builder()
.put(settings)
.put("path.home", createTempDir().toAbsolutePath())
.putArray("path.data", tmpPaths()).build();
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath())
.putArray(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build();
return new Environment(build);
}
@ -49,7 +49,7 @@ public class EnvironmentTests extends ESTestCase {
Environment environment = newEnvironment();
assertThat(environment.resolveRepoFile("/test/repos/repo1"), nullValue());
assertThat(environment.resolveRepoFile("test/repos/repo1"), nullValue());
environment = newEnvironment(settingsBuilder().putArray("path.repo", "/test/repos", "/another/repos", "/test/repos/../other").build());
environment = newEnvironment(settingsBuilder().putArray(Environment.PATH_REPO_SETTING.getKey(), "/test/repos", "/another/repos", "/test/repos/../other").build());
assertThat(environment.resolveRepoFile("/test/repos/repo1"), notNullValue());
assertThat(environment.resolveRepoFile("test/repos/repo1"), notNullValue());
assertThat(environment.resolveRepoFile("/another/repos/repo1"), notNullValue());

Some files were not shown because too many files have changed in this diff Show More