Merge branch 'master' into feature-suggest-refactoring
This commit is contained in:
commit
186fa2f755
|
@ -27,7 +27,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||||
import org.elasticsearch.plugins.PluginInfo;
|
import org.elasticsearch.plugins.PluginInfo;
|
||||||
import org.elasticsearch.transport.netty.NettyTransport;
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.io.FilePermission;
|
import java.io.FilePermission;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -277,10 +277,10 @@ final class Security {
|
||||||
// see SocketPermission implies() code
|
// see SocketPermission implies() code
|
||||||
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));
|
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));
|
||||||
// transport is waaaay overengineered
|
// transport is waaaay overengineered
|
||||||
Map<String, Settings> profiles = settings.getGroups("transport.profiles", true);
|
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings).getAsGroups();
|
||||||
if (!profiles.containsKey(NettyTransport.DEFAULT_PROFILE)) {
|
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
|
||||||
profiles = new HashMap<>(profiles);
|
profiles = new HashMap<>(profiles);
|
||||||
profiles.put(NettyTransport.DEFAULT_PROFILE, Settings.EMPTY);
|
profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop through all profiles and add permissions for each one, if its valid.
|
// loop through all profiles and add permissions for each one, if its valid.
|
||||||
|
@ -288,12 +288,10 @@ final class Security {
|
||||||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||||
Settings profileSettings = entry.getValue();
|
Settings profileSettings = entry.getValue();
|
||||||
String name = entry.getKey();
|
String name = entry.getKey();
|
||||||
String transportRange = profileSettings.get("port",
|
String transportRange = profileSettings.get("port", TransportSettings.PORT.get(settings));
|
||||||
settings.get("transport.tcp.port",
|
|
||||||
NettyTransport.DEFAULT_PORT_RANGE));
|
|
||||||
|
|
||||||
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
// a profile is only valid if its the default profile, or if it has an actual name and specifies a port
|
||||||
boolean valid = NettyTransport.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
boolean valid = TransportSettings.DEFAULT_PROFILE.equals(name) || (Strings.hasLength(name) && profileSettings.get("port") != null);
|
||||||
if (valid) {
|
if (valid) {
|
||||||
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
|
||||||
// see SocketPermission implies() code
|
// see SocketPermission implies() code
|
||||||
|
|
|
@ -110,9 +110,9 @@ public class TransportClient extends AbstractClient {
|
||||||
|
|
||||||
private PluginsService newPluginService(final Settings settings) {
|
private PluginsService newPluginService(final Settings settings) {
|
||||||
final Settings.Builder settingsBuilder = settingsBuilder()
|
final Settings.Builder settingsBuilder = settingsBuilder()
|
||||||
.put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval
|
.put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||||
.put(InternalSettingsPreparer.prepareSettings(settings))
|
.put(InternalSettingsPreparer.prepareSettings(settings))
|
||||||
.put("network.server", false)
|
.put(NettyTransport.NETWORK_SERVER.getKey(), false)
|
||||||
.put(Node.NODE_CLIENT_SETTING.getKey(), true)
|
.put(Node.NODE_CLIENT_SETTING.getKey(), true)
|
||||||
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
||||||
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
||||||
|
|
|
@ -41,18 +41,19 @@ import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAl
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|
||||||
import org.elasticsearch.discovery.DiscoveryModule;
|
|
||||||
import org.elasticsearch.discovery.DiscoveryService;
|
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
import org.elasticsearch.common.network.NetworkService;
|
import org.elasticsearch.common.network.NetworkService;
|
||||||
|
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
|
import org.elasticsearch.discovery.DiscoveryModule;
|
||||||
|
import org.elasticsearch.discovery.DiscoveryService;
|
||||||
import org.elasticsearch.discovery.DiscoverySettings;
|
import org.elasticsearch.discovery.DiscoverySettings;
|
||||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||||
import org.elasticsearch.env.Environment;
|
|
||||||
import org.elasticsearch.gateway.GatewayService;
|
|
||||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||||
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.gateway.GatewayService;
|
||||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -75,6 +76,8 @@ import org.elasticsearch.search.SearchService;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.Transport;
|
import org.elasticsearch.transport.Transport;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
import org.elasticsearch.transport.netty.NettyTransport;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -124,7 +127,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
|
||||||
|
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||||
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
|
||||||
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
|
||||||
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
|
||||||
|
@ -202,8 +206,25 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
|
||||||
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||||
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
|
||||||
Transport.TRANSPORT_PROFILES_SETTING,
|
|
||||||
Transport.TRANSPORT_TCP_COMPRESS,
|
Transport.TRANSPORT_TCP_COMPRESS,
|
||||||
|
TransportSettings.TRANSPORT_PROFILES_SETTING,
|
||||||
|
TransportSettings.PORT,
|
||||||
|
NettyTransport.WORKER_COUNT,
|
||||||
|
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
|
||||||
|
NettyTransport.CONNECTIONS_PER_NODE_BULK,
|
||||||
|
NettyTransport.CONNECTIONS_PER_NODE_REG,
|
||||||
|
NettyTransport.CONNECTIONS_PER_NODE_STATE,
|
||||||
|
NettyTransport.CONNECTIONS_PER_NODE_PING,
|
||||||
|
NettyTransport.PING_SCHEDULE,
|
||||||
|
NettyTransport.TCP_BLOCKING_CLIENT,
|
||||||
|
NettyTransport.TCP_CONNECT_TIMEOUT,
|
||||||
|
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
|
||||||
|
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
|
||||||
|
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
|
||||||
|
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
|
||||||
|
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
|
||||||
|
NettyTransport.NETWORK_SERVER,
|
||||||
|
NettyTransport.NETTY_BOSS_COUNT,
|
||||||
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
|
||||||
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
|
||||||
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
|
||||||
|
@ -224,6 +245,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||||
|
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
|
||||||
HunspellService.HUNSPELL_LAZY_LOAD,
|
HunspellService.HUNSPELL_LAZY_LOAD,
|
||||||
HunspellService.HUNSPELL_IGNORE_CASE,
|
HunspellService.HUNSPELL_IGNORE_CASE,
|
||||||
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
|
||||||
|
@ -277,5 +299,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
Client.CLIENT_TYPE_SETTING_S,
|
Client.CLIENT_TYPE_SETTING_S,
|
||||||
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING,
|
||||||
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING,
|
||||||
EsExecutors.PROCESSORS_SETTING)));
|
EsExecutors.PROCESSORS_SETTING,
|
||||||
|
ThreadContext.DEFAULT_HEADERS_SETTING)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -368,7 +368,15 @@ public class Setting<T> extends ToXContentToBytes {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
|
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
|
||||||
return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
|
return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSettings, boolean dynamic, Scope scope) {
|
||||||
|
return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue, boolean dynamic, Scope scope) {
|
||||||
|
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
||||||
|
@ -521,7 +529,11 @@ public class Setting<T> extends ToXContentToBytes {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
|
||||||
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope);
|
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, boolean dynamic, Scope scope) {
|
||||||
|
return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
|
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
|
||||||
|
|
|
@ -250,6 +250,12 @@ public class TimeValue implements Streamable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static TimeValue parseTimeValue(String sValue, String settingName) {
|
||||||
|
Objects.requireNonNull(settingName);
|
||||||
|
Objects.requireNonNull(sValue);
|
||||||
|
return parseTimeValue(sValue, null, settingName);
|
||||||
|
}
|
||||||
|
|
||||||
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
|
public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) {
|
||||||
settingName = Objects.requireNonNull(settingName);
|
settingName = Objects.requireNonNull(settingName);
|
||||||
if (sValue == null) {
|
if (sValue == null) {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -62,6 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
public final class ThreadContext implements Closeable, Writeable<ThreadContext.ThreadContextStruct>{
|
public final class ThreadContext implements Closeable, Writeable<ThreadContext.ThreadContextStruct>{
|
||||||
|
|
||||||
public static final String PREFIX = "request.headers";
|
public static final String PREFIX = "request.headers";
|
||||||
|
public static final Setting<Settings> DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER);
|
||||||
private final Map<String, String> defaultHeader;
|
private final Map<String, String> defaultHeader;
|
||||||
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap());
|
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap());
|
||||||
private final ContextThreadLocal threadLocal;
|
private final ContextThreadLocal threadLocal;
|
||||||
|
@ -71,7 +73,7 @@ public final class ThreadContext implements Closeable, Writeable<ThreadContext.T
|
||||||
* @param settings the settings to read the default request headers from
|
* @param settings the settings to read the default request headers from
|
||||||
*/
|
*/
|
||||||
public ThreadContext(Settings settings) {
|
public ThreadContext(Settings settings) {
|
||||||
Settings headers = settings.getAsSettings(PREFIX);
|
Settings headers = DEFAULT_HEADERS_SETTING.get(settings);
|
||||||
if (headers == null) {
|
if (headers == null) {
|
||||||
this.defaultHeader = Collections.emptyMap();
|
this.defaultHeader = Collections.emptyMap();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -854,6 +854,10 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
if (state != IndexShardState.RECOVERING) {
|
if (state != IndexShardState.RECOVERING) {
|
||||||
throw new IndexShardNotRecoveringException(shardId, state);
|
throw new IndexShardNotRecoveringException(shardId, state);
|
||||||
}
|
}
|
||||||
|
// We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive,
|
||||||
|
// we still invoke any onShardInactive listeners ... we won't sync'd flush in this case because we only do that on primary and this
|
||||||
|
// is a replica
|
||||||
|
active.set(true);
|
||||||
return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(getEngine(), operations);
|
return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(getEngine(), operations);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -883,6 +887,11 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
// but we need to make sure we don't loose deletes until we are done recovering
|
// but we need to make sure we don't loose deletes until we are done recovering
|
||||||
engineConfig.setEnableGcDeletes(false);
|
engineConfig.setEnableGcDeletes(false);
|
||||||
engineConfig.setCreate(indexExists == false);
|
engineConfig.setCreate(indexExists == false);
|
||||||
|
if (skipTranslogRecovery == false) {
|
||||||
|
// We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive,
|
||||||
|
// we still give sync'd flush a chance to run:
|
||||||
|
active.set(true);
|
||||||
|
}
|
||||||
createNewEngine(skipTranslogRecovery, engineConfig);
|
createNewEngine(skipTranslogRecovery, engineConfig);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1043,6 +1052,10 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
MetaDataStateFormat.deleteMetaState(shardPath().getDataPath());
|
MetaDataStateFormat.deleteMetaState(shardPath().getDataPath());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean isActive() {
|
||||||
|
return active.get();
|
||||||
|
}
|
||||||
|
|
||||||
public ShardPath shardPath() {
|
public ShardPath shardPath() {
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
|
@ -1302,6 +1315,15 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
assert this.currentEngineReference.get() == null;
|
assert this.currentEngineReference.get() == null;
|
||||||
this.currentEngineReference.set(newEngine(skipTranslogRecovery, config));
|
this.currentEngineReference.set(newEngine(skipTranslogRecovery, config));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during which
|
||||||
|
// settings changes could possibly have happened, so here we forcefully push any config changes to the new engine:
|
||||||
|
Engine engine = getEngineOrNull();
|
||||||
|
|
||||||
|
// engine could perhaps be null if we were e.g. concurrently closed:
|
||||||
|
if (engine != null) {
|
||||||
|
engine.onSettingsChanged();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Engine newEngine(boolean skipTranslogRecovery, EngineConfig config) {
|
protected Engine newEngine(boolean skipTranslogRecovery, EngineConfig config) {
|
||||||
|
|
|
@ -80,7 +80,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
|
||||||
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
* A setting to enable or disable request caching on an index level. Its dynamic by default
|
||||||
* since we are checking on the cluster state IndexMetaData always.
|
* since we are checking on the cluster state IndexMetaData always.
|
||||||
*/
|
*/
|
||||||
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", true, true, Setting.Scope.INDEX);
|
public static final Setting<Boolean> INDEX_CACHE_REQUEST_ENABLED_SETTING = Setting.boolSetting("index.requests.cache.enable", false, true, Setting.Scope.INDEX);
|
||||||
public static final Setting<TimeValue> INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER);
|
public static final Setting<TimeValue> INDICES_CACHE_REQUEST_CLEAN_INTERVAL = Setting.positiveTimeSetting("indices.requests.cache.clean_interval", TimeValue.timeValueSeconds(60), false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER);
|
public static final Setting<ByteSizeValue> INDICES_CACHE_QUERY_SIZE = Setting.byteSizeSetting("indices.requests.cache.size", "1%", false, Setting.Scope.CLUSTER);
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.transport;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.component.LifecycleComponent;
|
import org.elasticsearch.common.component.LifecycleComponent;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||||
import org.elasticsearch.common.transport.TransportAddress;
|
import org.elasticsearch.common.transport.TransportAddress;
|
||||||
|
|
||||||
|
@ -36,7 +35,6 @@ import java.util.Map;
|
||||||
public interface Transport extends LifecycleComponent<Transport> {
|
public interface Transport extends LifecycleComponent<Transport> {
|
||||||
|
|
||||||
|
|
||||||
Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);
|
|
||||||
Setting<Boolean> TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER);
|
Setting<Boolean> TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
void transportServiceAdapter(TransportServiceAdapter service);
|
void transportServiceAdapter(TransportServiceAdapter service);
|
||||||
|
|
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.transport;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.settings.Setting;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security
|
||||||
|
* This class should only contain static code which is *safe* to load before the security manager is enforced.
|
||||||
|
*/
|
||||||
|
final public class TransportSettings {
|
||||||
|
|
||||||
|
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final String DEFAULT_PROFILE = "default";
|
||||||
|
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
|
private TransportSettings() {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
|
||||||
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
|
import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
import org.elasticsearch.common.compress.CompressorFactory;
|
import org.elasticsearch.common.compress.CompressorFactory;
|
||||||
|
@ -40,7 +39,9 @@ import org.elasticsearch.common.netty.OpenChannelsHandler;
|
||||||
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
|
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
|
||||||
import org.elasticsearch.common.network.NetworkAddress;
|
import org.elasticsearch.common.network.NetworkAddress;
|
||||||
import org.elasticsearch.common.network.NetworkService;
|
import org.elasticsearch.common.network.NetworkService;
|
||||||
|
import org.elasticsearch.common.network.NetworkService.TcpSettings;
|
||||||
import org.elasticsearch.common.network.NetworkUtils;
|
import org.elasticsearch.common.network.NetworkUtils;
|
||||||
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||||
|
@ -63,6 +64,7 @@ import org.elasticsearch.transport.TransportException;
|
||||||
import org.elasticsearch.transport.TransportRequest;
|
import org.elasticsearch.transport.TransportRequest;
|
||||||
import org.elasticsearch.transport.TransportRequestOptions;
|
import org.elasticsearch.transport.TransportRequestOptions;
|
||||||
import org.elasticsearch.transport.TransportServiceAdapter;
|
import org.elasticsearch.transport.TransportServiceAdapter;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.elasticsearch.transport.support.TransportStatus;
|
import org.elasticsearch.transport.support.TransportStatus;
|
||||||
import org.jboss.netty.bootstrap.ClientBootstrap;
|
import org.jboss.netty.bootstrap.ClientBootstrap;
|
||||||
import org.jboss.netty.bootstrap.ServerBootstrap;
|
import org.jboss.netty.bootstrap.ServerBootstrap;
|
||||||
|
@ -117,9 +119,7 @@ import java.util.regex.Matcher;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import static java.util.Collections.unmodifiableMap;
|
import static java.util.Collections.unmodifiableMap;
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_CLIENT;
|
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
|
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT;
|
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
|
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
|
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
|
||||||
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
|
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
|
||||||
|
@ -148,16 +148,38 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker";
|
public static final String TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX = "transport_client_worker";
|
||||||
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
|
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
|
||||||
|
|
||||||
public static final String WORKER_COUNT = "transport.netty.worker_count";
|
public static final Setting<Integer> WORKER_COUNT = new Setting<>("transport.netty.worker_count",
|
||||||
public static final String CONNECTIONS_PER_NODE_RECOVERY = "transport.connections_per_node.recovery";
|
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"),
|
||||||
public static final String CONNECTIONS_PER_NODE_BULK = "transport.connections_per_node.bulk";
|
false, Setting.Scope.CLUSTER);
|
||||||
public static final String CONNECTIONS_PER_NODE_REG = "transport.connections_per_node.reg";
|
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = Setting.intSetting("transport.connections_per_node.recovery", 2, 1, false, Setting.Scope.CLUSTER);
|
||||||
public static final String CONNECTIONS_PER_NODE_STATE = "transport.connections_per_node.state";
|
public static final Setting<Integer> CONNECTIONS_PER_NODE_BULK = Setting.intSetting("transport.connections_per_node.bulk", 3, 1, false, Setting.Scope.CLUSTER);
|
||||||
public static final String CONNECTIONS_PER_NODE_PING = "transport.connections_per_node.ping";
|
public static final Setting<Integer> CONNECTIONS_PER_NODE_REG = Setting.intSetting("transport.connections_per_node.reg", 6, 1, false, Setting.Scope.CLUSTER);
|
||||||
public static final String PING_SCHEDULE = "transport.ping_schedule"; // the scheduled internal ping interval setting
|
public static final Setting<Integer> CONNECTIONS_PER_NODE_STATE = Setting.intSetting("transport.connections_per_node.state", 1, 1, false, Setting.Scope.CLUSTER);
|
||||||
public static final TimeValue DEFAULT_PING_SCHEDULE = TimeValue.timeValueMillis(-1); // the default ping schedule, defaults to disabled (-1)
|
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER);
|
||||||
public static final String DEFAULT_PORT_RANGE = "9300-9400";
|
// the scheduled internal ping interval setting, defaults to disabled (-1)
|
||||||
public static final String DEFAULT_PROFILE = "default";
|
public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport." + TcpSettings.TCP_BLOCKING_CLIENT.getKey(), TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport." + TcpSettings.TCP_CONNECT_TIMEOUT.getKey(), TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
|
|
||||||
|
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
||||||
|
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("transport.netty.receive_predictor_size",
|
||||||
|
settings -> {
|
||||||
|
long defaultReceiverPredictor = 512 * 1024;
|
||||||
|
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
||||||
|
// we can guess a better default...
|
||||||
|
long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / WORKER_COUNT.get(settings));
|
||||||
|
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
|
||||||
|
}
|
||||||
|
return new ByteSizeValue(defaultReceiverPredictor).toString();
|
||||||
|
}, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = Setting.byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
|
||||||
|
public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
|
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
|
||||||
|
|
||||||
protected final NetworkService networkService;
|
protected final NetworkService networkService;
|
||||||
protected final Version version;
|
protected final Version version;
|
||||||
|
@ -203,8 +225,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
final ScheduledPing scheduledPing;
|
final ScheduledPing scheduledPing;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
@SuppressForbidden(reason = "sets org.jboss.netty.epollBugWorkaround based on netty.epollBugWorkaround")
|
|
||||||
// TODO: why be confusing like this? just let the user do it with the netty parameter instead!
|
|
||||||
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) {
|
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.threadPool = threadPool;
|
this.threadPool = threadPool;
|
||||||
|
@ -212,44 +232,22 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
this.bigArrays = bigArrays;
|
this.bigArrays = bigArrays;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
|
|
||||||
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
|
this.workerCount = WORKER_COUNT.get(settings);
|
||||||
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
|
this.blockingClient = TCP_BLOCKING_CLIENT.get(settings);
|
||||||
}
|
this.connectTimeout = TCP_CONNECT_TIMEOUT.get(settings);
|
||||||
|
this.maxCumulationBufferCapacity = NETTY_MAX_CUMULATION_BUFFER_CAPACITY.get(settings);
|
||||||
this.workerCount = settings.getAsInt(WORKER_COUNT, EsExecutors.boundedNumberOfProcessors(settings) * 2);
|
this.maxCompositeBufferComponents = NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
|
||||||
this.blockingClient = settings.getAsBoolean("transport.netty.transport.tcp.blocking_client", TCP_BLOCKING_CLIENT.get(settings));
|
|
||||||
this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", TCP_CONNECT_TIMEOUT.get(settings)));
|
|
||||||
this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null);
|
|
||||||
this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1);
|
|
||||||
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
|
this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings);
|
||||||
|
|
||||||
this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2));
|
this.connectionsPerNodeRecovery = CONNECTIONS_PER_NODE_RECOVERY.get(settings);
|
||||||
this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3));
|
this.connectionsPerNodeBulk = CONNECTIONS_PER_NODE_BULK.get(settings);
|
||||||
this.connectionsPerNodeReg = this.settings.getAsInt("transport.netty.connections_per_node.reg", settings.getAsInt(CONNECTIONS_PER_NODE_REG, 6));
|
this.connectionsPerNodeReg = CONNECTIONS_PER_NODE_REG.get(settings);
|
||||||
this.connectionsPerNodeState = this.settings.getAsInt("transport.netty.connections_per_node.high", settings.getAsInt(CONNECTIONS_PER_NODE_STATE, 1));
|
this.connectionsPerNodeState = CONNECTIONS_PER_NODE_STATE.get(settings);
|
||||||
this.connectionsPerNodePing = this.settings.getAsInt("transport.netty.connections_per_node.ping", settings.getAsInt(CONNECTIONS_PER_NODE_PING, 1));
|
this.connectionsPerNodePing = CONNECTIONS_PER_NODE_PING.get(settings);
|
||||||
|
|
||||||
// we want to have at least 1 for reg/state/ping
|
|
||||||
if (this.connectionsPerNodeReg == 0) {
|
|
||||||
throw new IllegalArgumentException("can't set [connection_per_node.reg] to 0");
|
|
||||||
}
|
|
||||||
if (this.connectionsPerNodePing == 0) {
|
|
||||||
throw new IllegalArgumentException("can't set [connection_per_node.ping] to 0");
|
|
||||||
}
|
|
||||||
if (this.connectionsPerNodeState == 0) {
|
|
||||||
throw new IllegalArgumentException("can't set [connection_per_node.state] to 0");
|
|
||||||
}
|
|
||||||
|
|
||||||
long defaultReceiverPredictor = 512 * 1024;
|
|
||||||
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
|
|
||||||
// we can guess a better default...
|
|
||||||
long l = (long) ((0.3 * JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes()) / workerCount);
|
|
||||||
defaultReceiverPredictor = Math.min(defaultReceiverPredictor, Math.max(l, 64 * 1024));
|
|
||||||
}
|
|
||||||
|
|
||||||
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
|
||||||
this.receivePredictorMin = this.settings.getAsBytesSize("transport.netty.receive_predictor_min", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
|
this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
|
||||||
this.receivePredictorMax = this.settings.getAsBytesSize("transport.netty.receive_predictor_max", this.settings.getAsBytesSize("transport.netty.receive_predictor_size", new ByteSizeValue(defaultReceiverPredictor)));
|
this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings);
|
||||||
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
|
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
|
||||||
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
|
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
|
||||||
} else {
|
} else {
|
||||||
|
@ -257,7 +255,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
}
|
}
|
||||||
|
|
||||||
this.scheduledPing = new ScheduledPing();
|
this.scheduledPing = new ScheduledPing();
|
||||||
this.pingSchedule = settings.getAsTime(PING_SCHEDULE, DEFAULT_PING_SCHEDULE);
|
this.pingSchedule = PING_SCHEDULE.get(settings);
|
||||||
if (pingSchedule.millis() > 0) {
|
if (pingSchedule.millis() > 0) {
|
||||||
threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing);
|
threadPool.schedule(pingSchedule, ThreadPool.Names.GENERIC, scheduledPing);
|
||||||
}
|
}
|
||||||
|
@ -286,19 +284,19 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
clientBootstrap = createClientBootstrap();
|
clientBootstrap = createClientBootstrap();
|
||||||
if (settings.getAsBoolean("network.server", true)) {
|
if (NETWORK_SERVER.get(settings)) {
|
||||||
final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger);
|
final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger);
|
||||||
this.serverOpenChannels = openChannels;
|
this.serverOpenChannels = openChannels;
|
||||||
|
|
||||||
// extract default profile first and create standard bootstrap
|
// extract default profile first and create standard bootstrap
|
||||||
Map<String, Settings> profiles = TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true);
|
Map<String, Settings> profiles = TransportSettings.TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true);
|
||||||
if (!profiles.containsKey(DEFAULT_PROFILE)) {
|
if (!profiles.containsKey(TransportSettings.DEFAULT_PROFILE)) {
|
||||||
profiles = new HashMap<>(profiles);
|
profiles = new HashMap<>(profiles);
|
||||||
profiles.put(DEFAULT_PROFILE, Settings.EMPTY);
|
profiles.put(TransportSettings.DEFAULT_PROFILE, Settings.EMPTY);
|
||||||
}
|
}
|
||||||
|
|
||||||
Settings fallbackSettings = createFallbackSettings();
|
Settings fallbackSettings = createFallbackSettings();
|
||||||
Settings defaultSettings = profiles.get(DEFAULT_PROFILE);
|
Settings defaultSettings = profiles.get(TransportSettings.DEFAULT_PROFILE);
|
||||||
|
|
||||||
// loop through all profiles and start them up, special handling for default one
|
// loop through all profiles and start them up, special handling for default one
|
||||||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||||
|
@ -308,10 +306,10 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
if (!Strings.hasLength(name)) {
|
if (!Strings.hasLength(name)) {
|
||||||
logger.info("transport profile configured without a name. skipping profile with settings [{}]", profileSettings.toDelimitedString(','));
|
logger.info("transport profile configured without a name. skipping profile with settings [{}]", profileSettings.toDelimitedString(','));
|
||||||
continue;
|
continue;
|
||||||
} else if (DEFAULT_PROFILE.equals(name)) {
|
} else if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
|
||||||
profileSettings = settingsBuilder()
|
profileSettings = settingsBuilder()
|
||||||
.put(profileSettings)
|
.put(profileSettings)
|
||||||
.put("port", profileSettings.get("port", this.settings.get("transport.tcp.port", DEFAULT_PORT_RANGE)))
|
.put("port", profileSettings.get("port", TransportSettings.PORT.get(this.settings)))
|
||||||
.build();
|
.build();
|
||||||
} else if (profileSettings.get("port") == null) {
|
} else if (profileSettings.get("port") == null) {
|
||||||
// if profile does not have a port, skip it
|
// if profile does not have a port, skip it
|
||||||
|
@ -348,7 +346,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
if (blockingClient) {
|
if (blockingClient) {
|
||||||
clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))));
|
clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))));
|
||||||
} else {
|
} else {
|
||||||
int bossCount = settings.getAsInt("transport.netty.boss_count", 1);
|
int bossCount = NETTY_BOSS_COUNT.get(settings);
|
||||||
clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(
|
clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(
|
||||||
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)),
|
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)),
|
||||||
bossCount,
|
bossCount,
|
||||||
|
@ -443,7 +441,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
|
|
||||||
final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(name, settings, boundAddresses);
|
final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(name, settings, boundAddresses);
|
||||||
|
|
||||||
if (DEFAULT_PROFILE.equals(name)) {
|
if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
|
||||||
this.boundAddress = boundTransportAddress;
|
this.boundAddress = boundTransportAddress;
|
||||||
} else {
|
} else {
|
||||||
profileBoundAddresses.put(name, boundTransportAddress);
|
profileBoundAddresses.put(name, boundTransportAddress);
|
||||||
|
@ -496,7 +494,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
}
|
}
|
||||||
|
|
||||||
final String[] publishHosts;
|
final String[] publishHosts;
|
||||||
if (DEFAULT_PROFILE.equals(name)) {
|
if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
|
||||||
publishHosts = settings.getAsArray("transport.netty.publish_host", settings.getAsArray("transport.publish_host", settings.getAsArray("transport.host", null)));
|
publishHosts = settings.getAsArray("transport.netty.publish_host", settings.getAsArray("transport.publish_host", settings.getAsArray("transport.host", null)));
|
||||||
} else {
|
} else {
|
||||||
publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings);
|
publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings);
|
||||||
|
@ -510,7 +508,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
}
|
}
|
||||||
|
|
||||||
Integer publishPort;
|
Integer publishPort;
|
||||||
if (DEFAULT_PROFILE.equals(name)) {
|
if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
|
||||||
publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", null));
|
publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", null));
|
||||||
} else {
|
} else {
|
||||||
publishPort = profileSettings.getAsInt("publish_port", null);
|
publishPort = profileSettings.getAsInt("publish_port", null);
|
||||||
|
@ -667,10 +665,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
|
||||||
return parse(address, settings.get("transport.profiles.default.port",
|
return parse(address, settings.get("transport.profiles.default.port", TransportSettings.PORT.get(settings)), perAddressLimit);
|
||||||
settings.get("transport.netty.port",
|
|
||||||
settings.get("transport.tcp.port",
|
|
||||||
DEFAULT_PORT_RANGE))), perAddressLimit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// this code is a take on guava's HostAndPort, like a HostAndPortRange
|
// this code is a take on guava's HostAndPort, like a HostAndPortRange
|
||||||
|
@ -1140,7 +1135,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
public ChannelPipeline getPipeline() throws Exception {
|
public ChannelPipeline getPipeline() throws Exception {
|
||||||
ChannelPipeline channelPipeline = Channels.pipeline();
|
ChannelPipeline channelPipeline = Channels.pipeline();
|
||||||
SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
|
SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
|
||||||
if (nettyTransport.maxCumulationBufferCapacity != null) {
|
if (nettyTransport.maxCumulationBufferCapacity.bytes() >= 0) {
|
||||||
if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
|
if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
|
||||||
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
|
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1178,7 +1173,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
ChannelPipeline channelPipeline = Channels.pipeline();
|
ChannelPipeline channelPipeline = Channels.pipeline();
|
||||||
channelPipeline.addLast("openChannels", nettyTransport.serverOpenChannels);
|
channelPipeline.addLast("openChannels", nettyTransport.serverOpenChannels);
|
||||||
SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
|
SizeHeaderFrameDecoder sizeHeader = new SizeHeaderFrameDecoder();
|
||||||
if (nettyTransport.maxCumulationBufferCapacity != null) {
|
if (nettyTransport.maxCumulationBufferCapacity.bytes() > 0) {
|
||||||
if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
|
if (nettyTransport.maxCumulationBufferCapacity.bytes() > Integer.MAX_VALUE) {
|
||||||
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
|
sizeHeader.setMaxCumulationBufferCapacity(Integer.MAX_VALUE);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.bwcompat;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.test.ESBackcompatTestCase;
|
import org.elasticsearch.test.ESBackcompatTestCase;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
@ -30,7 +31,7 @@ public class UnicastBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
||||||
protected Settings nodeSettings(int nodeOrdinal) {
|
protected Settings nodeSettings(int nodeOrdinal) {
|
||||||
return Settings.builder()
|
return Settings.builder()
|
||||||
.put(super.nodeSettings(nodeOrdinal))
|
.put(super.nodeSettings(nodeOrdinal))
|
||||||
.put("transport.tcp.port", 9380 + nodeOrdinal)
|
.put(TransportSettings.PORT.getKey(), 9380 + nodeOrdinal)
|
||||||
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
|
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
@ -39,7 +40,7 @@ public class UnicastBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
||||||
protected Settings externalNodeSettings(int nodeOrdinal) {
|
protected Settings externalNodeSettings(int nodeOrdinal) {
|
||||||
return Settings.settingsBuilder()
|
return Settings.settingsBuilder()
|
||||||
.put(super.externalNodeSettings(nodeOrdinal))
|
.put(super.externalNodeSettings(nodeOrdinal))
|
||||||
.put("transport.tcp.port", 9390 + nodeOrdinal)
|
.put(TransportSettings.PORT.getKey(), 9390 + nodeOrdinal)
|
||||||
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
|
.put("discovery.zen.ping.unicast.hosts", "localhost:9380,localhost:9381,localhost:9390,localhost:9391")
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,12 @@ public class SettingTests extends ESTestCase {
|
||||||
assertFalse(byteSizeValueSetting.isGroupSetting());
|
assertFalse(byteSizeValueSetting.isGroupSetting());
|
||||||
ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
|
ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
|
||||||
assertEquals(byteSizeValue.bytes(), 1024);
|
assertEquals(byteSizeValue.bytes(), 1024);
|
||||||
|
|
||||||
|
byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", true, Setting.Scope.CLUSTER);
|
||||||
|
byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY);
|
||||||
|
assertEquals(byteSizeValue.bytes(), 2048);
|
||||||
|
|
||||||
|
|
||||||
AtomicReference<ByteSizeValue> value = new AtomicReference<>(null);
|
AtomicReference<ByteSizeValue> value = new AtomicReference<>(null);
|
||||||
ClusterSettings.SettingUpdater<ByteSizeValue> settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger);
|
ClusterSettings.SettingUpdater<ByteSizeValue> settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger);
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -170,7 +170,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
||||||
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
.put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out
|
||||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly
|
||||||
.put("http.enabled", false) // just to make test quicker
|
.put("http.enabled", false) // just to make test quicker
|
||||||
.put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.node.service.NodeService;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.elasticsearch.transport.netty.NettyTransport;
|
import org.elasticsearch.transport.netty.NettyTransport;
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
@ -48,7 +49,7 @@ public class UnicastZenPingIT extends ESTestCase {
|
||||||
Settings settings = Settings.EMPTY;
|
Settings settings = Settings.EMPTY;
|
||||||
int startPort = 11000 + randomIntBetween(0, 1000);
|
int startPort = 11000 + randomIntBetween(0, 1000);
|
||||||
int endPort = startPort + 10;
|
int endPort = startPort + 10;
|
||||||
settings = Settings.builder().put(settings).put("transport.tcp.port", startPort + "-" + endPort).build();
|
settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), startPort + "-" + endPort).build();
|
||||||
|
|
||||||
ThreadPool threadPool = new ThreadPool(getClass().getName());
|
ThreadPool threadPool = new ThreadPool(getClass().getName());
|
||||||
ClusterName clusterName = new ClusterName("test");
|
ClusterName clusterName = new ClusterName("test");
|
||||||
|
|
|
@ -1087,4 +1087,65 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
||||||
newShard.performBatchRecovery(operations);
|
newShard.performBatchRecovery(operations);
|
||||||
assertFalse(newShard.getTranslog().syncNeeded());
|
assertFalse(newShard.getTranslog().syncNeeded());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testIndexingBufferDuringInternalRecovery() throws IOException {
|
||||||
|
createIndex("index");
|
||||||
|
client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
|
||||||
|
.startObject("testtype")
|
||||||
|
.startObject("properties")
|
||||||
|
.startObject("foo")
|
||||||
|
.field("type", "string")
|
||||||
|
.endObject()
|
||||||
|
.endObject().endObject().endObject()).get();
|
||||||
|
ensureGreen();
|
||||||
|
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||||
|
IndexService test = indicesService.indexService("index");
|
||||||
|
IndexShard shard = test.getShardOrNull(0);
|
||||||
|
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||||
|
test.removeShard(0, "b/c britta says so");
|
||||||
|
IndexShard newShard = test.createShard(routing);
|
||||||
|
newShard.shardRouting = routing;
|
||||||
|
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||||
|
newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode));
|
||||||
|
// Shard is still inactive since we haven't started recovering yet
|
||||||
|
assertFalse(newShard.isActive());
|
||||||
|
newShard.prepareForIndexRecovery();
|
||||||
|
// Shard is still inactive since we haven't started recovering yet
|
||||||
|
assertFalse(newShard.isActive());
|
||||||
|
newShard.performTranslogRecovery(true);
|
||||||
|
// Shard should now be active since we did recover:
|
||||||
|
assertTrue(newShard.isActive());
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testIndexingBufferDuringPeerRecovery() throws IOException {
|
||||||
|
createIndex("index");
|
||||||
|
client().admin().indices().preparePutMapping("index").setType("testtype").setSource(jsonBuilder().startObject()
|
||||||
|
.startObject("testtype")
|
||||||
|
.startObject("properties")
|
||||||
|
.startObject("foo")
|
||||||
|
.field("type", "string")
|
||||||
|
.endObject()
|
||||||
|
.endObject().endObject().endObject()).get();
|
||||||
|
ensureGreen();
|
||||||
|
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||||
|
IndexService test = indicesService.indexService("index");
|
||||||
|
IndexShard shard = test.getShardOrNull(0);
|
||||||
|
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||||
|
test.removeShard(0, "b/c britta says so");
|
||||||
|
IndexShard newShard = test.createShard(routing);
|
||||||
|
newShard.shardRouting = routing;
|
||||||
|
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||||
|
newShard.markAsRecovering("for testing", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.REPLICA, localNode, localNode));
|
||||||
|
// Shard is still inactive since we haven't started recovering yet
|
||||||
|
assertFalse(newShard.isActive());
|
||||||
|
List<Translog.Operation> operations = new ArrayList<>();
|
||||||
|
operations.add(new Translog.Index("testtype", "1", jsonBuilder().startObject().field("foo", "bar").endObject().bytes().toBytes()));
|
||||||
|
newShard.prepareForIndexRecovery();
|
||||||
|
newShard.skipTranslogRecovery();
|
||||||
|
// Shard is still inactive since we haven't started recovering yet
|
||||||
|
assertFalse(newShard.isActive());
|
||||||
|
newShard.performBatchRecovery(operations);
|
||||||
|
// Shard should now be active since we did recover:
|
||||||
|
assertTrue(newShard.isActive());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,7 +53,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase {
|
||||||
private final Settings settings = settingsBuilder()
|
private final Settings settings = settingsBuilder()
|
||||||
.put("name", "foo")
|
.put("name", "foo")
|
||||||
.put("transport.host", "127.0.0.1")
|
.put("transport.host", "127.0.0.1")
|
||||||
.put("transport.tcp.port", "0")
|
.put(TransportSettings.PORT.getKey(), "0")
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
private ThreadPool threadPool;
|
private ThreadPool threadPool;
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.transport.TransportRequestHandler;
|
||||||
import org.elasticsearch.transport.TransportRequestOptions;
|
import org.elasticsearch.transport.TransportRequestOptions;
|
||||||
import org.elasticsearch.transport.TransportResponse;
|
import org.elasticsearch.transport.TransportResponse;
|
||||||
import org.elasticsearch.transport.TransportResponseOptions;
|
import org.elasticsearch.transport.TransportResponseOptions;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -49,7 +50,7 @@ public class NettyScheduledPingTests extends ESTestCase {
|
||||||
public void testScheduledPing() throws Exception {
|
public void testScheduledPing() throws Exception {
|
||||||
ThreadPool threadPool = new ThreadPool(getClass().getName());
|
ThreadPool threadPool = new ThreadPool(getClass().getName());
|
||||||
|
|
||||||
Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE, "5ms").put("transport.tcp.port", 0).build();
|
Settings settings = Settings.builder().put(NettyTransport.PING_SCHEDULE.getKey(), "5ms").put(TransportSettings.PORT.getKey(), 0).build();
|
||||||
|
|
||||||
final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry());
|
final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, new NamedWriteableRegistry());
|
||||||
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
|
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.ActionNotFoundTransportException;
|
import org.elasticsearch.transport.ActionNotFoundTransportException;
|
||||||
import org.elasticsearch.transport.RequestHandlerRegistry;
|
import org.elasticsearch.transport.RequestHandlerRegistry;
|
||||||
import org.elasticsearch.transport.TransportRequest;
|
import org.elasticsearch.transport.TransportRequest;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.jboss.netty.channel.Channel;
|
import org.jboss.netty.channel.Channel;
|
||||||
import org.jboss.netty.channel.ChannelPipeline;
|
import org.jboss.netty.channel.ChannelPipeline;
|
||||||
import org.jboss.netty.channel.ChannelPipelineFactory;
|
import org.jboss.netty.channel.ChannelPipelineFactory;
|
||||||
|
@ -85,7 +86,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
||||||
fail("Expected exception, but didnt happen");
|
fail("Expected exception, but didnt happen");
|
||||||
} catch (ElasticsearchException e) {
|
} catch (ElasticsearchException e) {
|
||||||
assertThat(e.getMessage(), containsString("MY MESSAGE"));
|
assertThat(e.getMessage(), containsString("MY MESSAGE"));
|
||||||
assertThat(channelProfileName, is(NettyTransport.DEFAULT_PROFILE));
|
assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +128,7 @@ public class NettyTransportIT extends ESIntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
public ChannelPipeline getPipeline() throws Exception {
|
public ChannelPipeline getPipeline() throws Exception {
|
||||||
ChannelPipeline pipeline = super.getPipeline();
|
ChannelPipeline pipeline = super.getPipeline();
|
||||||
pipeline.replace("dispatcher", "dispatcher", new MessageChannelHandler(nettyTransport, logger, NettyTransport.DEFAULT_PROFILE) {
|
pipeline.replace("dispatcher", "dispatcher", new MessageChannelHandler(nettyTransport, logger, TransportSettings.DEFAULT_PROFILE) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected String handleRequest(Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
|
protected String handleRequest(Channel channel, StreamInput buffer, long requestId, Version version) throws IOException {
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||||
|
@ -52,7 +53,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
public void testThatNettyCanBindToMultiplePorts() throws Exception {
|
public void testThatNettyCanBindToMultiplePorts() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("network.host", host)
|
.put("network.host", host)
|
||||||
.put("transport.tcp.port", 22) // will not actually bind to this
|
.put(TransportSettings.PORT.getKey(), 22) // will not actually bind to this
|
||||||
.put("transport.profiles.default.port", 0)
|
.put("transport.profiles.default.port", 0)
|
||||||
.put("transport.profiles.client1.port", 0)
|
.put("transport.profiles.client1.port", 0)
|
||||||
.build();
|
.build();
|
||||||
|
@ -69,7 +70,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception {
|
public void testThatDefaultProfileInheritsFromStandardSettings() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("network.host", host)
|
.put("network.host", host)
|
||||||
.put("transport.tcp.port", 0)
|
.put(TransportSettings.PORT.getKey(), 0)
|
||||||
.put("transport.profiles.client1.port", 0)
|
.put("transport.profiles.client1.port", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -86,7 +87,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
|
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("network.host", host)
|
.put("network.host", host)
|
||||||
.put("transport.tcp.port", 0)
|
.put(TransportSettings.PORT.getKey(), 0)
|
||||||
.put("transport.profiles.client1.whatever", "foo")
|
.put("transport.profiles.client1.whatever", "foo")
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -102,8 +103,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception {
|
public void testThatDefaultProfilePortOverridesGeneralConfiguration() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("network.host", host)
|
.put("network.host", host)
|
||||||
.put("transport.tcp.port", 22) // will not actually bind to this
|
.put(TransportSettings.PORT.getKey(), 22) // will not actually bind to this
|
||||||
.put("transport.netty.port", 23) // will not actually bind to this
|
|
||||||
.put("transport.profiles.default.port", 0)
|
.put("transport.profiles.default.port", 0)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
||||||
public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
|
public void testThatProfileWithoutValidNameIsIgnored() throws Exception {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("network.host", host)
|
.put("network.host", host)
|
||||||
.put("transport.tcp.port", 0)
|
.put(TransportSettings.PORT.getKey(), 0)
|
||||||
// mimics someone trying to define a profile for .local which is the profile for a node request to itself
|
// mimics someone trying to define a profile for .local which is the profile for a node request to itself
|
||||||
.put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", 22) // will not actually bind to this
|
.put("transport.profiles." + TransportService.DIRECT_RESPONSE_PROFILE + ".port", 22) // will not actually bind to this
|
||||||
.put("transport.profiles..port", 23) // will not actually bind to this
|
.put("transport.profiles..port", 23) // will not actually bind to this
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.util.BigArrays;
|
||||||
import org.elasticsearch.test.transport.MockTransportService;
|
import org.elasticsearch.test.transport.MockTransportService;
|
||||||
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
import org.elasticsearch.transport.AbstractSimpleTransportTestCase;
|
||||||
import org.elasticsearch.transport.ConnectTransportException;
|
import org.elasticsearch.transport.ConnectTransportException;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
@ -39,7 +40,7 @@ public class SimpleNettyTransportTests extends AbstractSimpleTransportTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected MockTransportService build(Settings settings, Version version, NamedWriteableRegistry namedWriteableRegistry) {
|
protected MockTransportService build(Settings settings, Version version, NamedWriteableRegistry namedWriteableRegistry) {
|
||||||
settings = Settings.builder().put(settings).put("transport.tcp.port", "0").build();
|
settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build();
|
||||||
MockTransportService transportService = new MockTransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, version, namedWriteableRegistry), threadPool);
|
MockTransportService transportService = new MockTransportService(settings, new NettyTransport(settings, threadPool, new NetworkService(settings), BigArrays.NON_RECYCLING_INSTANCE, version, namedWriteableRegistry), threadPool);
|
||||||
transportService.start();
|
transportService.start();
|
||||||
return transportService;
|
return transportService;
|
||||||
|
|
|
@ -49,6 +49,7 @@ A number of analysis plugins have been contributed by our community:
|
||||||
* https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov)
|
* https://github.com/imotov/elasticsearch-analysis-morphology[Russian and English Morphological Analysis Plugin] (by Igor Motov)
|
||||||
* https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl)
|
* https://github.com/medcl/elasticsearch-analysis-pinyin[Pinyin Analysis Plugin] (by Medcl)
|
||||||
* https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do)
|
* https://github.com/duydo/elasticsearch-analysis-vietnamese[Vietnamese Analysis Plugin] (by Duy Do)
|
||||||
|
* https://github.com/ofir123/elasticsearch-network-analysis[Network Addresses Analysis Plugin] (by Ofir123)
|
||||||
|
|
||||||
These community plugins appear to have been abandoned:
|
These community plugins appear to have been abandoned:
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,7 @@ compound:: Whether the segment is stored in a compound file. When true, this
|
||||||
means that Lucene merged all files from the segment in a single
|
means that Lucene merged all files from the segment in a single
|
||||||
one in order to save file descriptors.
|
one in order to save file descriptors.
|
||||||
|
|
||||||
|
[float]
|
||||||
=== Verbose mode
|
=== Verbose mode
|
||||||
|
|
||||||
To add additional information that can be used for debugging, use the `verbose` flag.
|
To add additional information that can be used for debugging, use the `verbose` flag.
|
||||||
|
|
|
@ -89,6 +89,7 @@ import org.elasticsearch.test.transport.AssertingLocalTransport;
|
||||||
import org.elasticsearch.test.transport.MockTransportService;
|
import org.elasticsearch.test.transport.MockTransportService;
|
||||||
import org.elasticsearch.transport.Transport;
|
import org.elasticsearch.transport.Transport;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.elasticsearch.transport.netty.NettyTransport;
|
import org.elasticsearch.transport.netty.NettyTransport;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
@ -288,7 +289,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom"));
|
builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom"));
|
||||||
builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
|
builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
|
||||||
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));
|
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));
|
||||||
builder.put("transport.tcp.port", TRANSPORT_BASE_PORT + "-" + (TRANSPORT_BASE_PORT + PORTS_PER_CLUSTER));
|
builder.put(TransportSettings.PORT.getKey(), TRANSPORT_BASE_PORT + "-" + (TRANSPORT_BASE_PORT + PORTS_PER_CLUSTER));
|
||||||
builder.put("http.port", HTTP_BASE_PORT + "-" + (HTTP_BASE_PORT + PORTS_PER_CLUSTER));
|
builder.put("http.port", HTTP_BASE_PORT + "-" + (HTTP_BASE_PORT + PORTS_PER_CLUSTER));
|
||||||
builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true);
|
builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING.getKey(), true);
|
||||||
builder.put(Node.NODE_MODE_SETTING.getKey(), nodeMode);
|
builder.put(Node.NODE_MODE_SETTING.getKey(), nodeMode);
|
||||||
|
@ -401,10 +402,10 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
|
|
||||||
// randomize netty settings
|
// randomize netty settings
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
builder.put(NettyTransport.WORKER_COUNT, random.nextInt(3) + 1);
|
builder.put(NettyTransport.WORKER_COUNT.getKey(), random.nextInt(3) + 1);
|
||||||
builder.put(NettyTransport.CONNECTIONS_PER_NODE_RECOVERY, random.nextInt(2) + 1);
|
builder.put(NettyTransport.CONNECTIONS_PER_NODE_RECOVERY.getKey(), random.nextInt(2) + 1);
|
||||||
builder.put(NettyTransport.CONNECTIONS_PER_NODE_BULK, random.nextInt(3) + 1);
|
builder.put(NettyTransport.CONNECTIONS_PER_NODE_BULK.getKey(), random.nextInt(3) + 1);
|
||||||
builder.put(NettyTransport.CONNECTIONS_PER_NODE_REG, random.nextInt(6) + 1);
|
builder.put(NettyTransport.CONNECTIONS_PER_NODE_REG.getKey(), random.nextInt(6) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
|
@ -444,7 +445,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms");
|
builder.put(NettyTransport.PING_SCHEDULE.getKey(), RandomInts.randomIntBetween(random, 100, 2000) + "ms");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.util.CollectionUtils;
|
||||||
import org.elasticsearch.discovery.DiscoveryModule;
|
import org.elasticsearch.discovery.DiscoveryModule;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.test.NodeConfigurationSource;
|
import org.elasticsearch.test.NodeConfigurationSource;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
@ -113,7 +114,7 @@ public class ClusterDiscoveryConfiguration extends NodeConfigurationSource {
|
||||||
throw new ElasticsearchException("nodeOrdinal [" + nodeOrdinal + "] is greater than the number unicast ports [" + unicastHostPorts.length + "]");
|
throw new ElasticsearchException("nodeOrdinal [" + nodeOrdinal + "] is greater than the number unicast ports [" + unicastHostPorts.length + "]");
|
||||||
} else {
|
} else {
|
||||||
// we need to pin the node port & host so we'd know where to point things
|
// we need to pin the node port & host so we'd know where to point things
|
||||||
builder.put("transport.tcp.port", unicastHostPorts[nodeOrdinal]);
|
builder.put(TransportSettings.PORT.getKey(), unicastHostPorts[nodeOrdinal]);
|
||||||
builder.put("transport.host", IP_ADDR); // only bind on one IF we use v4 here by default
|
builder.put("transport.host", IP_ADDR); // only bind on one IF we use v4 here by default
|
||||||
builder.put("transport.bind_host", IP_ADDR);
|
builder.put("transport.bind_host", IP_ADDR);
|
||||||
builder.put("transport.publish_host", IP_ADDR);
|
builder.put("transport.publish_host", IP_ADDR);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.test.NodeConfigurationSource;
|
import org.elasticsearch.test.NodeConfigurationSource;
|
||||||
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
|
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -72,8 +73,7 @@ public class InternalTestClusterTests extends ESTestCase {
|
||||||
|
|
||||||
static {
|
static {
|
||||||
clusterUniqueSettings.add(ClusterName.CLUSTER_NAME_SETTING.getKey());
|
clusterUniqueSettings.add(ClusterName.CLUSTER_NAME_SETTING.getKey());
|
||||||
clusterUniqueSettings.add("transport.tcp.port");
|
clusterUniqueSettings.add(TransportSettings.PORT.getKey());
|
||||||
clusterUniqueSettings.add("http.port");
|
|
||||||
clusterUniqueSettings.add("http.port");
|
clusterUniqueSettings.add("http.port");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue