Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
3736097e19
|
@ -21,6 +21,7 @@ package org.elasticsearch.gradle.precommit
|
|||
import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis
|
||||
import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin
|
||||
import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask
|
||||
import org.gradle.api.JavaVersion
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.file.FileCollection
|
||||
|
@ -101,6 +102,11 @@ class PrecommitTasks {
|
|||
signaturesURLs = project.forbiddenApis.signaturesURLs +
|
||||
[ getClass().getResource('/forbidden/es-server-signatures.txt') ]
|
||||
}
|
||||
// forbidden apis doesn't support Java 11, so stop at 10
|
||||
String targetMajorVersion = (project.compilerJavaVersion.compareTo(JavaVersion.VERSION_1_10) > 0 ?
|
||||
JavaVersion.VERSION_1_10 :
|
||||
project.compilerJavaVersion).getMajorVersion()
|
||||
targetCompatibility = Integer.parseInt(targetMajorVersion) >= 9 ?targetMajorVersion : "1.${targetMajorVersion}"
|
||||
}
|
||||
Task forbiddenApis = project.tasks.findByName('forbiddenApis')
|
||||
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
|
||||
|
|
|
@ -23,7 +23,7 @@ There are a few concepts that are core to Elasticsearch. Understanding these con
|
|||
[float]
|
||||
=== Near Realtime (NRT)
|
||||
|
||||
Elasticsearch is a near real time search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable.
|
||||
Elasticsearch is a near-realtime search platform. What this means is there is a slight latency (normally one second) from the time you index a document until the time it becomes searchable.
|
||||
|
||||
[float]
|
||||
=== Cluster
|
||||
|
@ -59,7 +59,7 @@ In a single cluster, you can define as many indexes as you want.
|
|||
|
||||
deprecated[6.0.0,See <<removal-of-types>>]
|
||||
|
||||
A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, eg one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <<removal-of-types>> for more.
|
||||
A type used to be a logical category/partition of your index to allow you to store different types of documents in the same index, e.g. one type for users, another type for blog posts. It is no longer possible to create multiple types in an index, and the whole concept of types will be removed in a later version. See <<removal-of-types>> for more.
|
||||
|
||||
[float]
|
||||
=== Document
|
||||
|
@ -1066,7 +1066,7 @@ In the previous section, we skipped over a little detail called the document sco
|
|||
|
||||
But queries do not always need to produce scores, in particular when they are only used for "filtering" the document set. Elasticsearch detects these situations and automatically optimizes query execution in order not to compute useless scores.
|
||||
|
||||
The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering.
|
||||
The {ref}/query-dsl-bool-query.html[`bool` query] that we introduced in the previous section also supports `filter` clauses which allow us to use a query to restrict the documents that will be matched by other clauses, without changing how scores are computed. As an example, let's introduce the {ref}/query-dsl-range-query.html[`range` query], which allows us to filter documents by a range of values. This is generally used for numeric or date filtering.
|
||||
|
||||
This example uses a bool query to return all accounts with balances between 20000 and 30000, inclusive. In other words, we want to find accounts with a balance that is greater than or equal to 20000 and less than or equal to 30000.
|
||||
|
||||
|
|
|
@ -503,3 +503,31 @@ guide to the {painless}/index.html[Painless Scripting Language].
|
|||
|
||||
See the {painless}/painless-api-reference.html[Painless API Reference] in
|
||||
the guide to the {painless}/index.html[Painless Scripting Language].
|
||||
|
||||
[role="exclude", id="security-api-roles"]
|
||||
=== Role management APIs
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve roles in the native realm:
|
||||
|
||||
* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
|
||||
* <<security-api-clear-role-cache,Clear roles cache>>
|
||||
* <<security-api-get-role,Get roles>>
|
||||
|
||||
[role="exclude",id="security-api-tokens"]
|
||||
=== Token management APIs
|
||||
|
||||
You can use the following APIs to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication:
|
||||
|
||||
* <<security-api-get-token,Get token>>, <<security-api-invalidate-token,Invalidate token>>
|
||||
|
||||
[role="exclude",id="security-api-users"]
|
||||
=== User Management APIs
|
||||
|
||||
You can use the following APIs to create, read, update, and delete users from the
|
||||
native realm:
|
||||
|
||||
* <<security-api-put-user,Create users>>, <<security-api-delete-user,Delete users>>
|
||||
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
|
||||
* <<security-api-change-password,Change passwords>>
|
||||
* <<security-api-get-user,Get users>>
|
||||
|
|
|
@ -1056,6 +1056,33 @@ Specifies the supported protocols for TLS/SSL.
|
|||
Specifies the
|
||||
cipher suites that should be supported.
|
||||
|
||||
[float]
|
||||
[[ref-kerberos-settings]]
|
||||
===== Kerberos realm settings
|
||||
|
||||
For a Kerberos realm, the `type` must be set to `kerberos`. In addition to the
|
||||
<<ref-realm-settings,settings that are valid for all realms>>, you can specify
|
||||
the following settings:
|
||||
|
||||
`keytab.path`:: Specifies the path to the Kerberos keytab file that contains the
|
||||
service principal used by this {es} node. This must be a location within the
|
||||
{es} configuration directory and the file must have read permissions. Required.
|
||||
|
||||
`remove_realm_name`:: Set to `true` to remove the realm part of principal names.
|
||||
Principal names in Kerberos have the form `user/instance@REALM`. If this option
|
||||
is `true`, the realm part (`@REALM`) will not be included in the username.
|
||||
Defaults to `false`.
|
||||
|
||||
`krb.debug`:: Set to `true` to enable debug logs for the Java login module that
|
||||
provides support for Kerberos authentication. Defaults to `false`.
|
||||
|
||||
`cache.ttl`:: The time-to-live for cached user entries. A user is cached for
|
||||
this period of time. Specify the time period using the standard {es}
|
||||
<<time-units,time units>>. Defaults to `20m`.
|
||||
|
||||
`cache.max_users`:: The maximum number of user entries that can live in the
|
||||
cache at any given time. Defaults to 100,000.
|
||||
|
||||
[float]
|
||||
[[load-balancing]]
|
||||
===== Load balancing and failover
|
||||
|
|
|
@ -53,7 +53,7 @@ public class InitializerTests extends ScriptTestCase {
|
|||
"Object[] x = new Object[] {y, z, 1 + s, s + 'aaa'}; return x;");
|
||||
|
||||
assertEquals(4, objects.length);
|
||||
assertEquals(new Integer(2), objects[0]);
|
||||
assertEquals(Integer.valueOf(2), objects[0]);
|
||||
assertEquals(new ArrayList(), objects[1]);
|
||||
assertEquals("1aaa", objects[2]);
|
||||
assertEquals("aaaaaa", objects[3]);
|
||||
|
@ -85,7 +85,7 @@ public class InitializerTests extends ScriptTestCase {
|
|||
list = (List)exec("int y = 2; List z = new ArrayList(); String s = 'aaa'; List x = [y, z, 1 + s, s + 'aaa']; return x;");
|
||||
|
||||
assertEquals(4, list.size());
|
||||
assertEquals(new Integer(2), list.get(0));
|
||||
assertEquals(Integer.valueOf(2), list.get(0));
|
||||
assertEquals(new ArrayList(), list.get(1));
|
||||
assertEquals("1aaa", list.get(2));
|
||||
assertEquals("aaaaaa", list.get(3));
|
||||
|
@ -100,15 +100,15 @@ public class InitializerTests extends ScriptTestCase {
|
|||
map = (Map)exec("[5 : 7, -1 : 14]");
|
||||
|
||||
assertEquals(2, map.size());
|
||||
assertEquals(new Integer(7), map.get(5));
|
||||
assertEquals(new Integer(14), map.get(-1));
|
||||
assertEquals(Integer.valueOf(7), map.get(5));
|
||||
assertEquals(Integer.valueOf(14), map.get(-1));
|
||||
|
||||
map = (Map)exec("int y = 2; int z = 3; Map x = [y*z : y + z, y - z : y, z : z]; return x;");
|
||||
|
||||
assertEquals(3, map.size());
|
||||
assertEquals(new Integer(5), map.get(6));
|
||||
assertEquals(new Integer(2), map.get(-1));
|
||||
assertEquals(new Integer(3), map.get(3));
|
||||
assertEquals(Integer.valueOf(5), map.get(6));
|
||||
assertEquals(Integer.valueOf(2), map.get(-1));
|
||||
assertEquals(Integer.valueOf(3), map.get(3));
|
||||
|
||||
map = (Map)exec("int y = 2; List z = new ArrayList(); String s = 'aaa';" +
|
||||
"def x = [y : z, 1 + s : s + 'aaa']; return x;");
|
||||
|
@ -139,7 +139,7 @@ public class InitializerTests extends ScriptTestCase {
|
|||
list3.add(9);
|
||||
|
||||
assertEquals(3, map.size());
|
||||
assertEquals(new Integer(5), map.get(6));
|
||||
assertEquals(Integer.valueOf(5), map.get(6));
|
||||
assertEquals(list2, map.get("s"));
|
||||
assertEquals(list3, map.get(3));
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.transport.netty4;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -87,13 +86,6 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
|
|||
return transportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
|
||||
final Netty4Transport t = (Netty4Transport) transport;
|
||||
final TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection;
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
public void testConnectException() throws UnknownHostException {
|
||||
try {
|
||||
serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876),
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.transport.nio;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -93,12 +92,6 @@ public class SimpleNioTransportTests extends AbstractSimpleTransportTestCase {
|
|||
return transportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
|
||||
TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection;
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
public void testConnectException() throws UnknownHostException {
|
||||
try {
|
||||
serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876),
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
|
||||
package org.elasticsearch.common.inject.matcher;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.lang.annotation.Retention;
|
||||
import java.lang.annotation.RetentionPolicy;
|
||||
|
@ -327,7 +329,9 @@ public class Matchers {
|
|||
return "inPackage(" + targetPackage.getName() + ")";
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "ClassLoader.getDefinedPackage not available yet")
|
||||
public Object readResolve() {
|
||||
// TODO minJava >= 9 : use ClassLoader.getDefinedPackage and remove @SuppressForbidden
|
||||
return inPackage(Package.getPackage(packageName));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,8 @@ public class ConnectionManager implements Closeable {
|
|||
}
|
||||
|
||||
public Transport.Connection openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) {
|
||||
return transport.openConnection(node, ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile));
|
||||
ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile);
|
||||
return internalOpenConnection(node, resolvedProfile);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -115,7 +116,7 @@ public class ConnectionManager implements Closeable {
|
|||
}
|
||||
boolean success = false;
|
||||
try {
|
||||
connection = transport.openConnection(node, resolvedProfile);
|
||||
connection = internalOpenConnection(node, resolvedProfile);
|
||||
connectionValidator.accept(connection, resolvedProfile);
|
||||
// we acquire a connection lock, so no way there is an existing connection
|
||||
connectedNodes.put(node, connection);
|
||||
|
@ -227,6 +228,19 @@ public class ConnectionManager implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private Transport.Connection internalOpenConnection(DiscoveryNode node, ConnectionProfile connectionProfile) {
|
||||
Transport.Connection connection = transport.openConnection(node, connectionProfile);
|
||||
try {
|
||||
connectionListener.onConnectionOpened(connection);
|
||||
} finally {
|
||||
connection.addCloseListener(ActionListener.wrap(() -> connectionListener.onConnectionClosed(connection)));
|
||||
}
|
||||
if (connection.isClosed()) {
|
||||
throw new ConnectTransportException(node, "a channel closed while connecting");
|
||||
}
|
||||
return connection;
|
||||
}
|
||||
|
||||
private void ensureOpen() {
|
||||
if (lifecycle.started() == false) {
|
||||
throw new IllegalStateException("connection manager is closed");
|
||||
|
@ -289,6 +303,20 @@ public class ConnectionManager implements Closeable {
|
|||
listener.onNodeConnected(node);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onConnectionOpened(Transport.Connection connection) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onConnectionOpened(connection);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onConnectionClosed(Transport.Connection connection) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onConnectionClosed(connection);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static ConnectionProfile buildDefaultConnectionProfile(Settings settings) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -48,9 +49,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
/**
|
||||
* A list of initial seed nodes to discover eligible nodes from the remote cluster
|
||||
*/
|
||||
public static final Setting.AffixSetting<List<InetSocketAddress>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting("search.remote.",
|
||||
"seeds", (key) -> Setting.listSetting(key, Collections.emptyList(), RemoteClusterAware::parseSeedAddress,
|
||||
Setting.Property.NodeScope, Setting.Property.Dynamic));
|
||||
public static final Setting.AffixSetting<List<String>> REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting(
|
||||
"search.remote.",
|
||||
"seeds",
|
||||
key -> Setting.listSetting(
|
||||
key, Collections.emptyList(),
|
||||
s -> {
|
||||
// validate seed address
|
||||
parsePort(s);
|
||||
return s;
|
||||
},
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Dynamic
|
||||
)
|
||||
);
|
||||
public static final char REMOTE_CLUSTER_INDEX_SEPARATOR = ':';
|
||||
public static final String LOCAL_CLUSTER_GROUP_KEY = "";
|
||||
|
||||
|
@ -65,18 +77,20 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
|
||||
}
|
||||
|
||||
protected static Map<String, List<DiscoveryNode>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<InetSocketAddress>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
protected static Map<String, List<Supplier<DiscoveryNode>>> buildRemoteClustersSeeds(Settings settings) {
|
||||
Stream<Setting<List<String>>> allConcreteSettings = REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(settings);
|
||||
return allConcreteSettings.collect(
|
||||
Collectors.toMap(REMOTE_CLUSTERS_SEEDS::getNamespace, concreteSetting -> {
|
||||
String clusterName = REMOTE_CLUSTERS_SEEDS.getNamespace(concreteSetting);
|
||||
List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
for (InetSocketAddress address : concreteSetting.get(settings)) {
|
||||
TransportAddress transportAddress = new TransportAddress(address);
|
||||
DiscoveryNode node = new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
nodes.add(node);
|
||||
List<String> addresses = concreteSetting.get(settings);
|
||||
List<Supplier<DiscoveryNode>> nodes = new ArrayList<>(addresses.size());
|
||||
for (String address : addresses) {
|
||||
nodes.add(() -> {
|
||||
TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
|
||||
return new DiscoveryNode(clusterName + "#" + transportAddress.toString(),
|
||||
transportAddress,
|
||||
Version.CURRENT.minimumCompatibilityVersion());
|
||||
});
|
||||
}
|
||||
return nodes;
|
||||
}));
|
||||
|
@ -128,7 +142,7 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
* Subclasses must implement this to receive information about updated cluster aliases. If the given address list is
|
||||
* empty the cluster alias is unregistered and should be removed.
|
||||
*/
|
||||
protected abstract void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses);
|
||||
protected abstract void updateRemoteCluster(String clusterAlias, List<String> addresses);
|
||||
|
||||
/**
|
||||
* Registers this instance to listen to updates on the cluster settings.
|
||||
|
@ -138,29 +152,37 @@ public abstract class RemoteClusterAware extends AbstractComponent {
|
|||
(namespace, value) -> {});
|
||||
}
|
||||
|
||||
private static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
String host = remoteHost.substring(0, portSeparator);
|
||||
protected static InetSocketAddress parseSeedAddress(String remoteHost) {
|
||||
String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost));
|
||||
InetAddress hostAddress;
|
||||
try {
|
||||
hostAddress = InetAddress.getByName(host);
|
||||
} catch (UnknownHostException e) {
|
||||
throw new IllegalArgumentException("unknown host [" + host + "]", e);
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, parsePort(remoteHost));
|
||||
}
|
||||
|
||||
private static int parsePort(String remoteHost) {
|
||||
try {
|
||||
int port = Integer.valueOf(remoteHost.substring(portSeparator + 1));
|
||||
int port = Integer.valueOf(remoteHost.substring(indexOfPortSeparator(remoteHost) + 1));
|
||||
if (port <= 0) {
|
||||
throw new IllegalArgumentException("port number must be > 0 but was: [" + port + "]");
|
||||
}
|
||||
return new InetSocketAddress(hostAddress, port);
|
||||
return port;
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port must be a number", e);
|
||||
throw new IllegalArgumentException("failed to parse port", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static int indexOfPortSeparator(String remoteHost) {
|
||||
int portSeparator = remoteHost.lastIndexOf(':'); // in case we have a IPv6 address ie. [::1]:9300
|
||||
if (portSeparator == -1 || portSeparator == remoteHost.length()) {
|
||||
throw new IllegalArgumentException("remote hosts need to be configured as [host:port], found [" + remoteHost + "] instead");
|
||||
}
|
||||
return portSeparator;
|
||||
}
|
||||
|
||||
public static String buildRemoteIndexName(String clusterAlias, String indexName) {
|
||||
return clusterAlias != null ? clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + indexName : indexName;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
|
@ -84,7 +85,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
private final String clusterAlias;
|
||||
private final int maxNumRemoteConnections;
|
||||
private final Predicate<DiscoveryNode> nodePredicate;
|
||||
private volatile List<DiscoveryNode> seedNodes;
|
||||
private volatile List<Supplier<DiscoveryNode>> seedNodes;
|
||||
private volatile boolean skipUnavailable;
|
||||
private final ConnectHandler connectHandler;
|
||||
private SetOnce<ClusterName> remoteClusterName = new SetOnce<>();
|
||||
|
@ -99,7 +100,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
* @param maxNumRemoteConnections the maximum number of connections to the remote cluster
|
||||
* @param nodePredicate a predicate to filter eligible remote nodes to connect to
|
||||
*/
|
||||
RemoteClusterConnection(Settings settings, String clusterAlias, List<DiscoveryNode> seedNodes,
|
||||
RemoteClusterConnection(Settings settings, String clusterAlias, List<Supplier<DiscoveryNode>> seedNodes,
|
||||
TransportService transportService, int maxNumRemoteConnections, Predicate<DiscoveryNode> nodePredicate) {
|
||||
super(settings);
|
||||
this.localClusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
|
@ -127,7 +128,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
/**
|
||||
* Updates the list of seed nodes for this cluster connection
|
||||
*/
|
||||
synchronized void updateSeedNodes(List<DiscoveryNode> seedNodes, ActionListener<Void> connectListener) {
|
||||
synchronized void updateSeedNodes(List<Supplier<DiscoveryNode>> seedNodes, ActionListener<Void> connectListener) {
|
||||
this.seedNodes = Collections.unmodifiableList(new ArrayList<>(seedNodes));
|
||||
connectHandler.connect(connectListener);
|
||||
}
|
||||
|
@ -456,7 +457,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
});
|
||||
}
|
||||
|
||||
void collectRemoteNodes(Iterator<DiscoveryNode> seedNodes,
|
||||
private void collectRemoteNodes(Iterator<Supplier<DiscoveryNode>> seedNodes,
|
||||
final TransportService transportService, ActionListener<Void> listener) {
|
||||
if (Thread.currentThread().isInterrupted()) {
|
||||
listener.onFailure(new InterruptedException("remote connect thread got interrupted"));
|
||||
|
@ -464,7 +465,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
try {
|
||||
if (seedNodes.hasNext()) {
|
||||
cancellableThreads.executeIO(() -> {
|
||||
final DiscoveryNode seedNode = seedNodes.next();
|
||||
final DiscoveryNode seedNode = seedNodes.next().get();
|
||||
final TransportService.HandshakeResponse handshakeResponse;
|
||||
Transport.Connection connection = transportService.openConnection(seedNode,
|
||||
ConnectionProfile.buildSingleChannelProfile(TransportRequestOptions.Type.REG, null, null));
|
||||
|
@ -554,11 +555,11 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
private final TransportService transportService;
|
||||
private final Transport.Connection connection;
|
||||
private final ActionListener<Void> listener;
|
||||
private final Iterator<DiscoveryNode> seedNodes;
|
||||
private final Iterator<Supplier<DiscoveryNode>> seedNodes;
|
||||
private final CancellableThreads cancellableThreads;
|
||||
|
||||
SniffClusterStateResponseHandler(TransportService transportService, Transport.Connection connection,
|
||||
ActionListener<Void> listener, Iterator<DiscoveryNode> seedNodes,
|
||||
ActionListener<Void> listener, Iterator<Supplier<DiscoveryNode>> seedNodes,
|
||||
CancellableThreads cancellableThreads) {
|
||||
this.transportService = transportService;
|
||||
this.connection = connection;
|
||||
|
@ -651,7 +652,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
* Get the information about remote nodes to be rendered on {@code _remote/info} requests.
|
||||
*/
|
||||
public RemoteConnectionInfo getConnectionInfo() {
|
||||
List<TransportAddress> seedNodeAddresses = seedNodes.stream().map(DiscoveryNode::getAddress).collect(Collectors.toList());
|
||||
List<TransportAddress> seedNodeAddresses = seedNodes.stream().map(node -> node.get().getAddress()).collect(Collectors.toList());
|
||||
TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
return new RemoteConnectionInfo(clusterAlias, seedNodeAddresses, maxNumRemoteConnections, connectedNodes.size(),
|
||||
initialConnectionTimeout, skipUnavailable);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
|
@ -40,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -115,7 +115,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
* @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes
|
||||
* @param connectionListener a listener invoked once every configured cluster has been connected to
|
||||
*/
|
||||
private synchronized void updateRemoteClusters(Map<String, List<DiscoveryNode>> seeds, ActionListener<Void> connectionListener) {
|
||||
private synchronized void updateRemoteClusters(Map<String, List<Supplier<DiscoveryNode>>> seeds,
|
||||
ActionListener<Void> connectionListener) {
|
||||
if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) {
|
||||
throw new IllegalArgumentException("remote clusters must not have the empty string as its key");
|
||||
}
|
||||
|
@ -125,7 +126,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
} else {
|
||||
CountDown countDown = new CountDown(seeds.size());
|
||||
remoteClusters.putAll(this.remoteClusters);
|
||||
for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
|
||||
for (Map.Entry<String, List<Supplier<DiscoveryNode>>> entry : seeds.entrySet()) {
|
||||
RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey());
|
||||
if (entry.getValue().isEmpty()) { // with no seed nodes we just remove the connection
|
||||
try {
|
||||
|
@ -310,16 +311,17 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
}
|
||||
}
|
||||
|
||||
protected void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
@Override
|
||||
protected void updateRemoteCluster(String clusterAlias, List<String> addresses) {
|
||||
updateRemoteCluster(clusterAlias, addresses, ActionListener.wrap((x) -> {}, (x) -> {}));
|
||||
}
|
||||
|
||||
void updateRemoteCluster(
|
||||
final String clusterAlias,
|
||||
final List<InetSocketAddress> addresses,
|
||||
final List<String> addresses,
|
||||
final ActionListener<Void> connectionListener) {
|
||||
final List<DiscoveryNode> nodes = addresses.stream().map(address -> {
|
||||
final TransportAddress transportAddress = new TransportAddress(address);
|
||||
final List<Supplier<DiscoveryNode>> nodes = addresses.stream().<Supplier<DiscoveryNode>>map(address -> () -> {
|
||||
final TransportAddress transportAddress = new TransportAddress(RemoteClusterAware.parseSeedAddress(address));
|
||||
final String id = clusterAlias + "#" + transportAddress.toString();
|
||||
final Version version = Version.CURRENT.minimumCompatibilityVersion();
|
||||
return new DiscoveryNode(id, transportAddress, version);
|
||||
|
@ -334,7 +336,7 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
|
|||
void initializeRemoteClusters() {
|
||||
final TimeValue timeValue = REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings);
|
||||
final PlainActionFuture<Void> future = new PlainActionFuture<>();
|
||||
Map<String, List<DiscoveryNode>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings);
|
||||
Map<String, List<Supplier<DiscoveryNode>>> seeds = RemoteClusterAware.buildRemoteClustersSeeds(settings);
|
||||
updateRemoteClusters(seeds, future);
|
||||
try {
|
||||
future.get(timeValue.millis(), TimeUnit.MILLISECONDS);
|
||||
|
|
|
@ -184,7 +184,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
protected final NetworkService networkService;
|
||||
protected final Set<ProfileSettings> profileSettings;
|
||||
|
||||
private final DelegatingTransportConnectionListener transportListener = new DelegatingTransportConnectionListener();
|
||||
private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener();
|
||||
|
||||
private final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
|
||||
private final Map<String, List<TcpServerChannel>> serverChannels = newConcurrentMap();
|
||||
|
@ -248,14 +248,12 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
protected void doStart() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
transportListener.listeners.add(listener);
|
||||
public void addMessageListener(TransportMessageListener listener) {
|
||||
messageListener.listeners.add(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeConnectionListener(TransportConnectionListener listener) {
|
||||
return transportListener.listeners.remove(listener);
|
||||
public boolean removeMessageListener(TransportMessageListener listener) {
|
||||
return messageListener.listeners.remove(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -344,10 +342,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
return connectionTypeHandle.getChannel(channels);
|
||||
}
|
||||
|
||||
boolean allChannelsOpen() {
|
||||
return channels.stream().allMatch(TcpChannel::isOpen);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean sendPing() {
|
||||
for (TcpChannel channel : channels) {
|
||||
|
@ -481,11 +475,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
// underlying channels.
|
||||
nodeChannels = new NodeChannels(node, channels, connectionProfile, version);
|
||||
final NodeChannels finalNodeChannels = nodeChannels;
|
||||
try {
|
||||
transportListener.onConnectionOpened(nodeChannels);
|
||||
} finally {
|
||||
nodeChannels.addCloseListener(ActionListener.wrap(() -> transportListener.onConnectionClosed(finalNodeChannels)));
|
||||
}
|
||||
|
||||
Consumer<TcpChannel> onClose = c -> {
|
||||
assert c.isOpen() == false : "channel is still open when onClose is called";
|
||||
|
@ -493,10 +482,6 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
};
|
||||
|
||||
nodeChannels.channels.forEach(ch -> ch.addCloseListener(ActionListener.wrap(() -> onClose.accept(ch))));
|
||||
|
||||
if (nodeChannels.allChannelsOpen() == false) {
|
||||
throw new ConnectTransportException(node, "a channel closed while connecting");
|
||||
}
|
||||
success = true;
|
||||
return nodeChannels;
|
||||
} catch (ConnectTransportException e) {
|
||||
|
@ -907,7 +892,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
final TransportRequestOptions finalOptions = options;
|
||||
// this might be called in a different thread
|
||||
SendListener onRequestSent = new SendListener(channel, stream,
|
||||
() -> transportListener.onRequestSent(node, requestId, action, request, finalOptions), message.length());
|
||||
() -> messageListener.onRequestSent(node, requestId, action, request, finalOptions), message.length());
|
||||
internalSendMessage(channel, message, onRequestSent);
|
||||
addedReleaseListener = true;
|
||||
} finally {
|
||||
|
@ -961,7 +946,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length());
|
||||
CompositeBytesReference message = new CompositeBytesReference(header, bytes);
|
||||
SendListener onResponseSent = new SendListener(channel, null,
|
||||
() -> transportListener.onResponseSent(requestId, action, error), message.length());
|
||||
() -> messageListener.onResponseSent(requestId, action, error), message.length());
|
||||
internalSendMessage(channel, message, onResponseSent);
|
||||
}
|
||||
}
|
||||
|
@ -1010,7 +995,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
final TransportResponseOptions finalOptions = options;
|
||||
// this might be called in a different thread
|
||||
SendListener listener = new SendListener(channel, stream,
|
||||
() -> transportListener.onResponseSent(requestId, action, response, finalOptions), message.length());
|
||||
() -> messageListener.onResponseSent(requestId, action, response, finalOptions), message.length());
|
||||
internalSendMessage(channel, message, listener);
|
||||
addedReleaseListener = true;
|
||||
} finally {
|
||||
|
@ -1266,7 +1251,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
if (isHandshake) {
|
||||
handler = pendingHandshakes.remove(requestId);
|
||||
} else {
|
||||
TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, transportListener);
|
||||
TransportResponseHandler theHandler = responseHandlers.onResponseReceived(requestId, messageListener);
|
||||
if (theHandler == null && TransportStatus.isError(status)) {
|
||||
handler = pendingHandshakes.remove(requestId);
|
||||
} else {
|
||||
|
@ -1373,7 +1358,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
features = Collections.emptySet();
|
||||
}
|
||||
final String action = stream.readString();
|
||||
transportListener.onRequestReceived(requestId, action);
|
||||
messageListener.onRequestReceived(requestId, action);
|
||||
TransportChannel transportChannel = null;
|
||||
try {
|
||||
if (TransportStatus.isHandshake(status)) {
|
||||
|
@ -1682,26 +1667,27 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
}
|
||||
}
|
||||
|
||||
private static final class DelegatingTransportConnectionListener implements TransportConnectionListener {
|
||||
private final List<TransportConnectionListener> listeners = new CopyOnWriteArrayList<>();
|
||||
private static final class DelegatingTransportMessageListener implements TransportMessageListener {
|
||||
|
||||
private final List<TransportMessageListener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Override
|
||||
public void onRequestReceived(long requestId, String action) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
for (TransportMessageListener listener : listeners) {
|
||||
listener.onRequestReceived(requestId, action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
for (TransportMessageListener listener : listeners) {
|
||||
listener.onResponseSent(requestId, action, response, finalOptions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponseSent(long requestId, String action, Exception error) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
for (TransportMessageListener listener : listeners) {
|
||||
listener.onResponseSent(requestId, action, error);
|
||||
}
|
||||
}
|
||||
|
@ -1709,42 +1695,14 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
@Override
|
||||
public void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request,
|
||||
TransportRequestOptions finalOptions) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
for (TransportMessageListener listener : listeners) {
|
||||
listener.onRequestSent(node, requestId, action, request, finalOptions);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNodeDisconnected(DiscoveryNode key) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onNodeDisconnected(key);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onConnectionOpened(Connection nodeChannels) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onConnectionOpened(nodeChannels);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onNodeConnected(DiscoveryNode node) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onNodeConnected(node);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onConnectionClosed(Connection nodeChannels) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
listener.onConnectionClosed(nodeChannels);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponseReceived(long requestId, ResponseContext holder) {
|
||||
for (TransportConnectionListener listener : listeners) {
|
||||
for (TransportMessageListener listener : listeners) {
|
||||
listener.onResponseReceived(requestId, holder);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,18 +56,9 @@ public interface Transport extends LifecycleComponent {
|
|||
*/
|
||||
RequestHandlerRegistry getRequestHandler(String action);
|
||||
|
||||
/**
|
||||
* Adds a new event listener
|
||||
* @param listener the listener to add
|
||||
*/
|
||||
void addConnectionListener(TransportConnectionListener listener);
|
||||
void addMessageListener(TransportMessageListener listener);
|
||||
|
||||
/**
|
||||
* Removes an event listener
|
||||
* @param listener the listener to remove
|
||||
* @return <code>true</code> iff the listener was removed otherwise <code>false</code>
|
||||
*/
|
||||
boolean removeConnectionListener(TransportConnectionListener listener);
|
||||
boolean removeMessageListener(TransportMessageListener listener);
|
||||
|
||||
/**
|
||||
* The address the transport is bound on.
|
||||
|
@ -254,7 +245,7 @@ public interface Transport extends LifecycleComponent {
|
|||
* sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not
|
||||
* found.
|
||||
*/
|
||||
public TransportResponseHandler onResponseReceived(final long requestId, TransportConnectionListener listener) {
|
||||
public TransportResponseHandler onResponseReceived(final long requestId, TransportMessageListener listener) {
|
||||
ResponseContext context = handlers.remove(requestId);
|
||||
listener.onResponseReceived(requestId, context);
|
||||
if (context == null) {
|
||||
|
|
|
@ -28,42 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
*/
|
||||
public interface TransportConnectionListener {
|
||||
|
||||
/**
|
||||
* Called once a request is received
|
||||
* @param requestId the internal request ID
|
||||
* @param action the request action
|
||||
*
|
||||
*/
|
||||
default void onRequestReceived(long requestId, String action) {}
|
||||
|
||||
/**
|
||||
* Called for every action response sent after the response has been passed to the underlying network implementation.
|
||||
* @param requestId the request ID (unique per client)
|
||||
* @param action the request action
|
||||
* @param response the response send
|
||||
* @param finalOptions the response options
|
||||
*/
|
||||
default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {}
|
||||
|
||||
/***
|
||||
* Called for every failed action response after the response has been passed to the underlying network implementation.
|
||||
* @param requestId the request ID (unique per client)
|
||||
* @param action the request action
|
||||
* @param error the error sent back to the caller
|
||||
*/
|
||||
default void onResponseSent(long requestId, String action, Exception error) {}
|
||||
|
||||
/**
|
||||
* Called for every request sent to a server after the request has been passed to the underlying network implementation
|
||||
* @param node the node the request was sent to
|
||||
* @param requestId the internal request id
|
||||
* @param action the action name
|
||||
* @param request the actual request
|
||||
* @param finalOptions the request options
|
||||
*/
|
||||
default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request,
|
||||
TransportRequestOptions finalOptions) {}
|
||||
|
||||
/**
|
||||
* Called once a connection was opened
|
||||
* @param connection the connection
|
||||
|
@ -76,13 +40,6 @@ public interface TransportConnectionListener {
|
|||
*/
|
||||
default void onConnectionClosed(Transport.Connection connection) {}
|
||||
|
||||
/**
|
||||
* Called for every response received
|
||||
* @param requestId the request id for this reponse
|
||||
* @param context the response context or null if the context was already processed ie. due to a timeout.
|
||||
*/
|
||||
default void onResponseReceived(long requestId, Transport.ResponseContext context) {}
|
||||
|
||||
/**
|
||||
* Called once a node connection is opened and registered.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
||||
public interface TransportMessageListener {
|
||||
|
||||
/**
|
||||
* Called once a request is received
|
||||
* @param requestId the internal request ID
|
||||
* @param action the request action
|
||||
*
|
||||
*/
|
||||
default void onRequestReceived(long requestId, String action) {}
|
||||
|
||||
/**
|
||||
* Called for every action response sent after the response has been passed to the underlying network implementation.
|
||||
* @param requestId the request ID (unique per client)
|
||||
* @param action the request action
|
||||
* @param response the response send
|
||||
* @param finalOptions the response options
|
||||
*/
|
||||
default void onResponseSent(long requestId, String action, TransportResponse response, TransportResponseOptions finalOptions) {}
|
||||
|
||||
/***
|
||||
* Called for every failed action response after the response has been passed to the underlying network implementation.
|
||||
* @param requestId the request ID (unique per client)
|
||||
* @param action the request action
|
||||
* @param error the error sent back to the caller
|
||||
*/
|
||||
default void onResponseSent(long requestId, String action, Exception error) {}
|
||||
|
||||
/**
|
||||
* Called for every request sent to a server after the request has been passed to the underlying network implementation
|
||||
* @param node the node the request was sent to
|
||||
* @param requestId the internal request id
|
||||
* @param action the action name
|
||||
* @param request the actual request
|
||||
* @param finalOptions the request options
|
||||
*/
|
||||
default void onRequestSent(DiscoveryNode node, long requestId, String action, TransportRequest request,
|
||||
TransportRequestOptions finalOptions) {}
|
||||
|
||||
/**
|
||||
* Called for every response received
|
||||
* @param requestId the request id for this reponse
|
||||
* @param context the response context or null if the context was already processed ie. due to a timeout.
|
||||
*/
|
||||
default void onResponseReceived(long requestId, Transport.ResponseContext context) {}
|
||||
}
|
|
@ -77,7 +77,7 @@ import static org.elasticsearch.common.settings.Setting.intSetting;
|
|||
import static org.elasticsearch.common.settings.Setting.listSetting;
|
||||
import static org.elasticsearch.common.settings.Setting.timeSetting;
|
||||
|
||||
public class TransportService extends AbstractLifecycleComponent implements TransportConnectionListener {
|
||||
public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener {
|
||||
|
||||
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY =
|
||||
intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope);
|
||||
|
@ -248,7 +248,8 @@ public class TransportService extends AbstractLifecycleComponent implements Tran
|
|||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
transport.addConnectionListener(this);
|
||||
transport.addMessageListener(this);
|
||||
connectionManager.addListener(this);
|
||||
transport.start();
|
||||
if (transport.boundAddress() != null && logger.isInfoEnabled()) {
|
||||
logger.info("{}", transport.boundAddress());
|
||||
|
@ -506,12 +507,10 @@ public class TransportService extends AbstractLifecycleComponent implements Tran
|
|||
}
|
||||
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
transport.addConnectionListener(listener);
|
||||
connectionManager.addListener(listener);
|
||||
}
|
||||
|
||||
public void removeConnectionListener(TransportConnectionListener listener) {
|
||||
transport.removeConnectionListener(listener);
|
||||
connectionManager.removeListener(listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ import org.elasticsearch.transport.ConnectTransportException;
|
|||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.RequestHandlerRegistry;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportMessageListener;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
@ -62,7 +62,7 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
|||
private volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
|
||||
private final Object requestHandlerMutex = new Object();
|
||||
private final ResponseHandlers responseHandlers = new ResponseHandlers();
|
||||
private TransportConnectionListener listener;
|
||||
private TransportMessageListener listener;
|
||||
|
||||
private boolean connectMode = true;
|
||||
|
||||
|
@ -223,13 +223,15 @@ abstract class FailAndRetryMockTransport<Response extends TransportResponse> imp
|
|||
return requestHandlers.get(action);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
public void addMessageListener(TransportMessageListener listener) {
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeConnectionListener(TransportConnectionListener listener) {
|
||||
public boolean removeMessageListener(TransportMessageListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,9 +37,9 @@ import org.elasticsearch.transport.ConnectTransportException;
|
|||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.RequestHandlerRegistry;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportMessageListener;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -107,7 +107,6 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
assertConnectedExactlyToNodes(event.state());
|
||||
}
|
||||
|
||||
|
||||
public void testReconnect() {
|
||||
List<DiscoveryNode> nodes = generateNodes();
|
||||
NodeConnectionsService service = new NodeConnectionsService(Settings.EMPTY, threadPool, transportService);
|
||||
|
@ -188,7 +187,7 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
private final class MockTransport implements Transport {
|
||||
private ResponseHandlers responseHandlers = new ResponseHandlers();
|
||||
private volatile boolean randomConnectionExceptions = false;
|
||||
private TransportConnectionListener listener = new TransportConnectionListener() {
|
||||
private TransportMessageListener listener = new TransportMessageListener() {
|
||||
};
|
||||
|
||||
@Override
|
||||
|
@ -201,12 +200,12 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
public void addMessageListener(TransportMessageListener listener) {
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeConnectionListener(TransportConnectionListener listener) {
|
||||
public boolean removeMessageListener(TransportMessageListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
|
@ -231,7 +230,6 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
if (randomConnectionExceptions && randomBoolean()) {
|
||||
throw new ConnectTransportException(node, "simulated");
|
||||
}
|
||||
listener.onNodeConnected(node);
|
||||
}
|
||||
Connection connection = new Connection() {
|
||||
@Override
|
||||
|
@ -260,7 +258,6 @@ public class NodeConnectionsServiceTests extends ESTestCase {
|
|||
return false;
|
||||
}
|
||||
};
|
||||
listener.onConnectionOpened(connection);
|
||||
return connection;
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ public class FuzzinessTests extends ESTestCase {
|
|||
Float floatRep = randomFloat();
|
||||
Number value = intValue;
|
||||
if (randomBoolean()) {
|
||||
value = new Float(floatRep += intValue);
|
||||
value = Float.valueOf(floatRep += intValue);
|
||||
}
|
||||
XContentBuilder json = jsonBuilder().startObject()
|
||||
.field(Fuzziness.X_FIELD_NAME, randomBoolean() ? value.toString() : value)
|
||||
|
|
|
@ -337,7 +337,7 @@ public class EsExecutorsTests extends ESTestCase {
|
|||
final CountDownLatch executed = new CountDownLatch(1);
|
||||
|
||||
threadContext.putHeader("foo", "bar");
|
||||
final Integer one = new Integer(1);
|
||||
final Integer one = Integer.valueOf(1);
|
||||
threadContext.putTransient("foo", one);
|
||||
EsThreadPoolExecutor executor =
|
||||
EsExecutors.newFixed(getName(), pool, queue, EsExecutors.daemonThreadFactory("dummy"), threadContext);
|
||||
|
|
|
@ -42,7 +42,7 @@ public class ThreadContextTests extends ESTestCase {
|
|||
threadContext.putHeader("foo", "bar");
|
||||
threadContext.putTransient("ctx.foo", 1);
|
||||
assertEquals("bar", threadContext.getHeader("foo"));
|
||||
assertEquals(new Integer(1), threadContext.getTransient("ctx.foo"));
|
||||
assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo"));
|
||||
assertEquals("1", threadContext.getHeader("default"));
|
||||
try (ThreadContext.StoredContext ctx = threadContext.stashContext()) {
|
||||
assertNull(threadContext.getHeader("foo"));
|
||||
|
@ -61,7 +61,7 @@ public class ThreadContextTests extends ESTestCase {
|
|||
threadContext.putHeader("foo", "bar");
|
||||
threadContext.putTransient("ctx.foo", 1);
|
||||
assertEquals("bar", threadContext.getHeader("foo"));
|
||||
assertEquals(new Integer(1), threadContext.getTransient("ctx.foo"));
|
||||
assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo"));
|
||||
assertEquals("1", threadContext.getHeader("default"));
|
||||
HashMap<String, String> toMerge = new HashMap<>();
|
||||
toMerge.put("foo", "baz");
|
||||
|
|
|
@ -108,8 +108,14 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
aggScript(vars, state -> state.put((String) XContentMapValues.extractValue("params.param1", vars),
|
||||
XContentMapValues.extractValue("params.param2", vars))));
|
||||
|
||||
scripts.put("vars.multiplier = 3", vars ->
|
||||
((Map<String, Object>) vars.get("vars")).put("multiplier", 3));
|
||||
scripts.put("vars.multiplier = 3", vars -> {
|
||||
((Map<String, Object>) vars.get("vars")).put("multiplier", 3);
|
||||
|
||||
Map<String, Object> state = (Map<String, Object>) vars.get("state");
|
||||
state.put("list", new ArrayList());
|
||||
|
||||
return state;
|
||||
});
|
||||
|
||||
scripts.put("state.list.add(vars.multiplier)", vars ->
|
||||
aggScript(vars, state -> {
|
||||
|
|
|
@ -601,10 +601,10 @@ public class HighlightBuilderTests extends ESTestCase {
|
|||
value = randomAlphaOfLengthBetween(1, 10);
|
||||
break;
|
||||
case 1:
|
||||
value = new Integer(randomInt(1000));
|
||||
value = Integer.valueOf(randomInt(1000));
|
||||
break;
|
||||
case 2:
|
||||
value = new Boolean(randomBoolean());
|
||||
value = Boolean.valueOf(randomBoolean());
|
||||
break;
|
||||
}
|
||||
options.put(randomAlphaOfLengthBetween(1, 10), value);
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -158,8 +159,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -198,8 +199,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -254,8 +255,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -276,7 +277,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(incompatibleTransport.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(incompatibleSeedNode, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> incompatibleSeedNode, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -310,8 +311,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertFalse(service.nodeConnected(spareNode));
|
||||
|
@ -359,8 +360,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> n.equals(rejectedNode) == false)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
if (rejectedNode.equals(seedNode)) {
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
|
@ -374,7 +375,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void updateSeedNodes(RemoteClusterConnection connection, List<DiscoveryNode> seedNodes) throws Exception {
|
||||
private void updateSeedNodes(RemoteClusterConnection connection, List<Supplier<DiscoveryNode>> seedNodes) throws Exception {
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exceptionAtomicReference = new AtomicReference<>();
|
||||
ActionListener<Void> listener = ActionListener.wrap(x -> latch.countDown(), x -> {
|
||||
|
@ -398,8 +399,8 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(seedNode)));
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
expectThrows(Exception.class, () -> updateSeedNodes(connection, Arrays.asList(() -> seedNode)));
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
}
|
||||
|
@ -461,7 +462,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
connection.addConnectedNode(seedNode);
|
||||
for (DiscoveryNode node : knownNodes) {
|
||||
final Transport.Connection transportConnection = connection.getConnection(node);
|
||||
|
@ -504,7 +505,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
CountDownLatch listenerCalled = new CountDownLatch(1);
|
||||
AtomicReference<Exception> exceptionReference = new AtomicReference<>();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
ActionListener<Void> listener = ActionListener.wrap(x -> {
|
||||
listenerCalled.countDown();
|
||||
fail("expected exception");
|
||||
|
@ -512,7 +513,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
exceptionReference.set(x);
|
||||
listenerCalled.countDown();
|
||||
});
|
||||
connection.updateSeedNodes(Arrays.asList(seedNode), listener);
|
||||
connection.updateSeedNodes(Arrays.asList(() -> seedNode), listener);
|
||||
acceptedLatch.await();
|
||||
connection.close(); // now close it, this should trigger an interrupt on the socket and we can move on
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -539,7 +540,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
List<DiscoveryNode> nodes = Collections.singletonList(seedNode);
|
||||
List<Supplier<DiscoveryNode>> nodes = Collections.singletonList(() -> seedNode);
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
nodes, service, Integer.MAX_VALUE, n -> true)) {
|
||||
if (randomBoolean()) {
|
||||
|
@ -579,7 +580,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
List<DiscoveryNode> nodes = Collections.singletonList(seedNode);
|
||||
List<Supplier<DiscoveryNode>> nodes = Collections.singletonList(() -> seedNode);
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
nodes, service, Integer.MAX_VALUE, n -> true)) {
|
||||
SearchRequest request = new SearchRequest("test-index");
|
||||
|
@ -635,7 +636,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Collections.singletonList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Collections.singletonList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
|
||||
SearchRequest request = new SearchRequest("test-index");
|
||||
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index")
|
||||
|
@ -738,7 +739,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(seedTransport1.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -816,7 +817,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
knownNodes.add(seedTransport1.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> seedNode1, () -> seedNode);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -904,7 +905,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
knownNodes.add(transport3.getLocalDiscoNode());
|
||||
knownNodes.add(transport2.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(node3, node1, node2);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = Arrays.asList(() -> node3, () -> node1, () -> node2);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -1059,7 +1060,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
assertFalse(service.nodeConnected(seedNode));
|
||||
assertFalse(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
|
@ -1108,9 +1109,9 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Arrays.asList(() -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
if (randomBoolean()) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
}
|
||||
CountDownLatch responseLatch = new CountDownLatch(1);
|
||||
AtomicReference<Function<String, DiscoveryNode>> reference = new AtomicReference<>();
|
||||
|
@ -1142,14 +1143,14 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
List<MockTransportService> discoverableTransports = new CopyOnWriteArrayList<>();
|
||||
try {
|
||||
final int numDiscoverableNodes = randomIntBetween(5, 20);
|
||||
List<DiscoveryNode> discoverableNodes = new ArrayList<>(numDiscoverableNodes);
|
||||
for (int i = 0; i < numDiscoverableNodes; i++) {
|
||||
List<Supplier<DiscoveryNode>> discoverableNodes = new ArrayList<>(numDiscoverableNodes);
|
||||
for (int i = 0; i < numDiscoverableNodes; i++ ) {
|
||||
MockTransportService transportService = startTransport("discoverable_node" + i, knownNodes, Version.CURRENT);
|
||||
discoverableNodes.add(transportService.getLocalDiscoNode());
|
||||
discoverableNodes.add(transportService::getLocalDiscoNode);
|
||||
discoverableTransports.add(transportService);
|
||||
}
|
||||
|
||||
List<DiscoveryNode> seedNodes = randomSubsetOf(discoverableNodes);
|
||||
List<Supplier<DiscoveryNode>> seedNodes = randomSubsetOf(discoverableNodes);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
|
@ -1198,7 +1199,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
discoverableTransports.add(transportService);
|
||||
connection.addConnectedNode(transportService.getLocalDiscoNode());
|
||||
} else {
|
||||
DiscoveryNode node = randomFrom(discoverableNodes);
|
||||
DiscoveryNode node = randomFrom(discoverableNodes).get();
|
||||
connection.onNodeDisconnected(node);
|
||||
}
|
||||
}
|
||||
|
@ -1246,12 +1247,13 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedNode));
|
||||
Arrays.asList( () -> seedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(() -> seedNode));
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
List<DiscoveryNode> discoveryNodes = Arrays.asList(otherClusterTransport.getLocalDiscoNode(), seedNode);
|
||||
List<Supplier<DiscoveryNode>> discoveryNodes =
|
||||
Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode(), () -> seedNode);
|
||||
Collections.shuffle(discoveryNodes, random());
|
||||
updateSeedNodes(connection, discoveryNodes);
|
||||
assertTrue(service.nodeConnected(seedNode));
|
||||
|
@ -1262,7 +1264,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
assertTrue(service.nodeConnected(discoverableNode));
|
||||
assertTrue(connection.assertNoRunningConnections());
|
||||
IllegalStateException illegalStateException = expectThrows(IllegalStateException.class, () ->
|
||||
updateSeedNodes(connection, Arrays.asList(otherClusterTransport.getLocalDiscoNode())));
|
||||
updateSeedNodes(connection, Arrays.asList(() -> otherClusterTransport.getLocalDiscoNode())));
|
||||
assertThat(illegalStateException.getMessage(),
|
||||
startsWith("handshake failed, mismatched cluster name [Cluster [otherCluster]]" +
|
||||
" - {other_cluster_discoverable_node}"));
|
||||
|
@ -1325,7 +1327,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Collections.singletonList(connectedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
Collections.singletonList(() -> connectedNode), service, Integer.MAX_VALUE, n -> true)) {
|
||||
connection.addConnectedNode(connectedNode);
|
||||
for (int i = 0; i < 10; i++) {
|
||||
//always a direct connection as the remote node is already connected
|
||||
|
@ -1348,4 +1350,34 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testLazyResolveTransportAddress() throws Exception {
|
||||
List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
|
||||
MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
|
||||
DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
|
||||
knownNodes.add(seedTransport.getLocalDiscoNode());
|
||||
knownNodes.add(discoverableTransport.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
CountDownLatch multipleResolveLatch = new CountDownLatch(2);
|
||||
Supplier<DiscoveryNode> seedSupplier = () -> {
|
||||
multipleResolveLatch.countDown();
|
||||
return seedNode;
|
||||
};
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
Arrays.asList(seedSupplier), service, Integer.MAX_VALUE, n -> true)) {
|
||||
updateSeedNodes(connection, Arrays.asList(seedSupplier));
|
||||
// Closing connections leads to RemoteClusterConnection.ConnectHandler.collectRemoteNodes
|
||||
// being called again so we try to resolve the same seed node's host twice
|
||||
discoverableTransport.close();
|
||||
seedTransport.close();
|
||||
assertTrue(multipleResolveLatch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.transport;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
|
@ -103,10 +104,19 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
.put("search.remote.foo.seeds", "192.168.0.1").build();
|
||||
expectThrows(IllegalArgumentException.class, () ->
|
||||
RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings).forEach(setting -> setting.get(brokenSettings)));
|
||||
|
||||
Settings brokenPortSettings = Settings.builder()
|
||||
.put("search.remote.foo.seeds", "192.168.0.1:123456789123456789").build();
|
||||
Exception e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> RemoteClusterAware.REMOTE_CLUSTERS_SEEDS.getAllConcreteSettings(brokenSettings)
|
||||
.forEach(setting -> setting.get(brokenPortSettings))
|
||||
);
|
||||
assertEquals("failed to parse port", e.getMessage());
|
||||
}
|
||||
|
||||
public void testBuiltRemoteClustersSeeds() throws Exception {
|
||||
Map<String, List<DiscoveryNode>> map = RemoteClusterService.buildRemoteClustersSeeds(
|
||||
Map<String, List<Supplier<DiscoveryNode>>> map = RemoteClusterService.buildRemoteClustersSeeds(
|
||||
Settings.builder().put("search.remote.foo.seeds", "192.168.0.1:8080").put("search.remote.bar.seeds", "[::1]:9090").build());
|
||||
assertEquals(2, map.size());
|
||||
assertTrue(map.containsKey("foo"));
|
||||
|
@ -114,13 +124,13 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertEquals(1, map.get("foo").size());
|
||||
assertEquals(1, map.get("bar").size());
|
||||
|
||||
DiscoveryNode foo = map.get("foo").get(0);
|
||||
DiscoveryNode foo = map.get("foo").get(0).get();
|
||||
|
||||
assertEquals(foo.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("192.168.0.1"), 8080)));
|
||||
assertEquals(foo.getId(), "foo#192.168.0.1:8080");
|
||||
assertEquals(foo.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
|
||||
|
||||
DiscoveryNode bar = map.get("bar").get(0);
|
||||
DiscoveryNode bar = map.get("bar").get(0).get();
|
||||
assertEquals(bar.getAddress(), new TransportAddress(new InetSocketAddress(InetAddress.getByName("[::1]"), 9090)));
|
||||
assertEquals(bar.getId(), "bar#[::1]:9090");
|
||||
assertEquals(bar.getVersion(), Version.CURRENT.minimumCompatibilityVersion());
|
||||
|
@ -194,10 +204,10 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().address()));
|
||||
service.updateRemoteCluster("cluster_1", Collections.singletonList(seedNode.getAddress().toString()));
|
||||
assertTrue(service.isCrossClusterSearchEnabled());
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
|
||||
service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().address()));
|
||||
service.updateRemoteCluster("cluster_2", Collections.singletonList(otherSeedNode.getAddress().toString()));
|
||||
assertTrue(service.isCrossClusterSearchEnabled());
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_1"));
|
||||
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
|
||||
|
@ -252,22 +262,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
|
||||
|
@ -321,22 +326,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
|
||||
|
@ -398,22 +398,17 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
service.initializeRemoteClusters();
|
||||
assertFalse(service.isCrossClusterSearchEnabled());
|
||||
|
||||
final InetSocketAddress c1N1Address = c1N1Node.getAddress().address();
|
||||
final InetSocketAddress c1N2Address = c1N2Node.getAddress().address();
|
||||
final InetSocketAddress c2N1Address = c2N1Node.getAddress().address();
|
||||
final InetSocketAddress c2N2Address = c2N2Node.getAddress().address();
|
||||
|
||||
final CountDownLatch firstLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_1",
|
||||
Arrays.asList(c1N1Address, c1N2Address),
|
||||
Arrays.asList(c1N1Node.getAddress().toString(), c1N2Node.getAddress().toString()),
|
||||
connectionListener(firstLatch));
|
||||
firstLatch.await();
|
||||
|
||||
final CountDownLatch secondLatch = new CountDownLatch(1);
|
||||
service.updateRemoteCluster(
|
||||
"cluster_2",
|
||||
Arrays.asList(c2N1Address, c2N2Address),
|
||||
Arrays.asList(c2N1Node.getAddress().toString(), c2N2Node.getAddress().toString()),
|
||||
connectionListener(secondLatch));
|
||||
secondLatch.await();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
|
|
|
@ -84,6 +84,7 @@ if (isEclipse) {
|
|||
// for server-src and server-tests
|
||||
projects << 'server-tests'
|
||||
projects << 'libs:core-tests'
|
||||
projects << 'libs:dissect-tests'
|
||||
projects << 'libs:nio-tests'
|
||||
projects << 'libs:x-content-tests'
|
||||
projects << 'libs:secure-sm-tests'
|
||||
|
@ -103,6 +104,10 @@ if (isEclipse) {
|
|||
project(":libs:core").buildFileName = 'eclipse-build.gradle'
|
||||
project(":libs:core-tests").projectDir = new File(rootProject.projectDir, 'libs/core/src/test')
|
||||
project(":libs:core-tests").buildFileName = 'eclipse-build.gradle'
|
||||
project(":libs:dissect").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/main')
|
||||
project(":libs:dissect").buildFileName = 'eclipse-build.gradle'
|
||||
project(":libs:dissect-tests").projectDir = new File(rootProject.projectDir, 'libs/dissect/src/test')
|
||||
project(":libs:dissect-tests").buildFileName = 'eclipse-build.gradle'
|
||||
project(":libs:nio").projectDir = new File(rootProject.projectDir, 'libs/nio/src/main')
|
||||
project(":libs:nio").buildFileName = 'eclipse-build.gradle'
|
||||
project(":libs:nio-tests").projectDir = new File(rootProject.projectDir, 'libs/nio/src/test')
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.junit.Assert.assertTrue;
|
|||
*/
|
||||
public class EqualsHashCodeTestUtils {
|
||||
|
||||
private static Object[] someObjects = new Object[] { "some string", new Integer(1), new Double(1.0) };
|
||||
private static Object[] someObjects = new Object[] { "some string", Integer.valueOf(1), Double.valueOf(1.0) };
|
||||
|
||||
/**
|
||||
* A function that makes a copy of its input argument
|
||||
|
|
|
@ -41,9 +41,9 @@ import org.elasticsearch.transport.RemoteTransportException;
|
|||
import org.elasticsearch.transport.RequestHandlerRegistry;
|
||||
import org.elasticsearch.transport.SendRequestTransportException;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportMessageListener;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
@ -72,7 +72,7 @@ public class CapturingTransport implements Transport {
|
|||
private volatile Map<String, RequestHandlerRegistry> requestHandlers = Collections.emptyMap();
|
||||
final Object requestHandlerMutex = new Object();
|
||||
private final ResponseHandlers responseHandlers = new ResponseHandlers();
|
||||
private TransportConnectionListener listener;
|
||||
private TransportMessageListener listener;
|
||||
|
||||
public static class CapturedRequest {
|
||||
public final DiscoveryNode node;
|
||||
|
@ -341,7 +341,7 @@ public class CapturingTransport implements Transport {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
public void addMessageListener(TransportMessageListener listener) {
|
||||
if (this.listener != null) {
|
||||
throw new IllegalStateException("listener already set");
|
||||
}
|
||||
|
@ -349,11 +349,12 @@ public class CapturingTransport implements Transport {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean removeConnectionListener(TransportConnectionListener listener) {
|
||||
public boolean removeMessageListener(TransportMessageListener listener) {
|
||||
if (listener == this.listener) {
|
||||
this.listener = null;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.transport.ConnectionProfile;
|
||||
import org.elasticsearch.transport.RequestHandlerRegistry;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportMessageListener;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportStats;
|
||||
|
@ -86,13 +86,13 @@ public class StubbableTransport implements Transport {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void addConnectionListener(TransportConnectionListener listener) {
|
||||
delegate.addConnectionListener(listener);
|
||||
public void addMessageListener(TransportMessageListener listener) {
|
||||
delegate.addMessageListener(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean removeConnectionListener(TransportConnectionListener listener) {
|
||||
return delegate.removeConnectionListener(listener);
|
||||
public boolean removeMessageListener(TransportMessageListener listener) {
|
||||
return delegate.removeMessageListener(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -179,7 +179,7 @@ public class StubbableTransport implements Transport {
|
|||
return delegate.profileBoundAddresses();
|
||||
}
|
||||
|
||||
private class WrappedConnection implements Transport.Connection {
|
||||
public class WrappedConnection implements Transport.Connection {
|
||||
|
||||
private final Transport.Connection connection;
|
||||
|
||||
|
@ -234,6 +234,10 @@ public class StubbableTransport implements Transport {
|
|||
public void close() {
|
||||
connection.close();
|
||||
}
|
||||
|
||||
public Transport.Connection getConnection() {
|
||||
return connection;
|
||||
}
|
||||
}
|
||||
|
||||
@FunctionalInterface
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.network.NetworkUtils;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
|
@ -52,6 +53,7 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.test.transport.StubbableTransport;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
|
@ -2642,15 +2644,22 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
|
||||
public void testChannelCloseWhileConnecting() {
|
||||
try (MockTransportService service = build(Settings.builder().put("name", "close").build(), version0, null, true)) {
|
||||
service.transport.addConnectionListener(new TransportConnectionListener() {
|
||||
AtomicBoolean connectionClosedListenerCalled = new AtomicBoolean(false);
|
||||
service.addConnectionListener(new TransportConnectionListener() {
|
||||
@Override
|
||||
public void onConnectionOpened(final Transport.Connection connection) {
|
||||
closeConnectionChannel(connection);
|
||||
try {
|
||||
closeConnectionChannel(service.getOriginalTransport(), connection);
|
||||
} catch (final IOException e) {
|
||||
assertBusy(connection::isClosed);
|
||||
} catch (Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onConnectionClosed(Transport.Connection connection) {
|
||||
connectionClosedListenerCalled.set(true);
|
||||
}
|
||||
});
|
||||
final ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
|
||||
builder.addConnections(1,
|
||||
|
@ -2662,10 +2671,15 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
final ConnectTransportException e =
|
||||
expectThrows(ConnectTransportException.class, () -> service.openConnection(nodeA, builder.build()));
|
||||
assertThat(e, hasToString(containsString(("a channel closed while connecting"))));
|
||||
assertTrue(connectionClosedListenerCalled.get());
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException;
|
||||
private void closeConnectionChannel(Transport.Connection connection) {
|
||||
StubbableTransport.WrappedConnection wrappedConnection = (StubbableTransport.WrappedConnection) connection;
|
||||
TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) wrappedConnection.getConnection();
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "need local ephemeral port")
|
||||
private InetSocketAddress getLocalEphemeral() throws UnknownHostException {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.transport;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -60,13 +59,4 @@ public class MockTcpTransportTests extends AbstractSimpleTransportTestCase {
|
|||
public int channelsPerNodeConnection() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
|
||||
final MockTcpTransport t = (MockTcpTransport) transport;
|
||||
final TcpTransport.NodeChannels channels =
|
||||
(TcpTransport.NodeChannels) connection;
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.transport.nio;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -99,12 +98,6 @@ public class SimpleMockNioTransportTests extends AbstractSimpleTransportTestCase
|
|||
return 3;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
|
||||
TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection;
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
public void testConnectException() throws UnknownHostException {
|
||||
try {
|
||||
serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876),
|
||||
|
|
|
@ -736,3 +736,16 @@ setups['admin_role'] = '''
|
|||
"metadata" : {"version": 1}
|
||||
}
|
||||
'''
|
||||
setups['jacknich_user'] = '''
|
||||
- do:
|
||||
xpack.security.put_user:
|
||||
username: "jacknich"
|
||||
body: >
|
||||
{
|
||||
"password" : "test-password",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : { "intelligence" : 7 }
|
||||
}
|
||||
'''
|
||||
|
|
|
@ -9,20 +9,52 @@ You can use the following APIs to perform {security} activities.
|
|||
* <<security-api-privileges>>
|
||||
* <<security-api-role-mapping>>
|
||||
* <<security-api-ssl>>
|
||||
* <<security-api-tokens>>
|
||||
* <<security-api-users>>
|
||||
|
||||
include::security/roles.asciidoc[]
|
||||
[float]
|
||||
[[security-role-apis]]
|
||||
=== Roles
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve roles in the native realm:
|
||||
|
||||
* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
|
||||
* <<security-api-clear-role-cache,Clear roles cache>>
|
||||
* <<security-api-get-role,Get roles>>
|
||||
|
||||
[float]
|
||||
[[security-token-apis]]
|
||||
=== Tokens
|
||||
|
||||
You can use the following APIs to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication:
|
||||
|
||||
* <<security-api-get-token,Get token>>, <<security-api-invalidate-token,Invalidate token>>
|
||||
|
||||
[float]
|
||||
[[security-user-apis]]
|
||||
=== Users
|
||||
|
||||
You can use the following APIs to create, read, update, and delete users from the
|
||||
native realm:
|
||||
|
||||
* <<security-api-put-user,Create users>>, <<security-api-delete-user,Delete users>>
|
||||
* <<security-api-enable-user,Enable users>>, <<security-api-disable-user,Disable users>>
|
||||
* <<security-api-change-password,Change passwords>>
|
||||
* <<security-api-get-user,Get users>>
|
||||
|
||||
include::security/authenticate.asciidoc[]
|
||||
include::security/change-password.asciidoc[]
|
||||
include::security/clear-cache.asciidoc[]
|
||||
include::security/clear-roles-cache.asciidoc[]
|
||||
include::security/create-roles.asciidoc[]
|
||||
include::security/create-users.asciidoc[]
|
||||
include::security/delete-roles.asciidoc[]
|
||||
include::security/delete-tokens.asciidoc[]
|
||||
include::security/delete-users.asciidoc[]
|
||||
include::security/disable-users.asciidoc[]
|
||||
include::security/enable-users.asciidoc[]
|
||||
include::security/get-roles.asciidoc[]
|
||||
include::security/get-tokens.asciidoc[]
|
||||
include::security/get-users.asciidoc[]
|
||||
include::security/privileges.asciidoc[]
|
||||
include::security/role-mapping.asciidoc[]
|
||||
include::security/ssl.asciidoc[]
|
||||
include::security/tokens.asciidoc[]
|
||||
include::security/users.asciidoc[]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
[role="xpack"]
|
||||
[[security-api-change-password]]
|
||||
=== Change Password API
|
||||
=== Change passwords API
|
||||
|
||||
The Change Password API enables you to submit a request to change the password
|
||||
of a user.
|
||||
Changes the passwords of users in the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
|
@ -12,6 +11,15 @@ of a user.
|
|||
`POST _xpack/security/user/<username>/_password`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
You can use the <<security-api-put-user,create user API>> to update everything
|
||||
but a user's `username` and `password`. This API changes a user's password.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
|
@ -33,16 +41,17 @@ privilege can change passwords of other users.
|
|||
|
||||
==== Examples
|
||||
|
||||
The following example updates the password for the `elastic` user:
|
||||
The following example updates the password for the `jacknich` user:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/security/user/elastic/_password
|
||||
POST /_xpack/security/user/jacknich/_password
|
||||
{
|
||||
"password": "x-pack-test-password"
|
||||
"password" : "s3cr3t"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
A successful call returns an empty JSON structure.
|
||||
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
[role="xpack"]
|
||||
[[security-api-put-user]]
|
||||
=== Create users API
|
||||
|
||||
Creates and updates users in the native realm. These users are commonly referred
|
||||
to as _native users_.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
When updating a user, you can update everything but its `username` and `password`.
|
||||
To change a user's password, use the
|
||||
<<security-api-change-password, change password API>>.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
+
|
||||
--
|
||||
[[username-validation]]
|
||||
NOTE: Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. Leading or trailing whitespace is not allowed.
|
||||
|
||||
--
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a POST or PUT request:
|
||||
|
||||
`enabled`::
|
||||
(boolean) Specifies whether the user is enabled. The default value is `true`.
|
||||
|
||||
`email`::
|
||||
(string) The email of the user.
|
||||
|
||||
`full_name`::
|
||||
(string) The full name of the user.
|
||||
|
||||
`metadata`::
|
||||
(object) Arbitrary metadata that you want to associate with the user.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password. Passwords must be at least 6 characters long.
|
||||
|
||||
`roles` (required)::
|
||||
(list) A set of roles the user has. The roles determine the user's access
|
||||
permissions. To create a user without any roles, specify an empty list: `[]`.
|
||||
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example creates a user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/user/jacknich
|
||||
{
|
||||
"password" : "j@rV1s",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the user has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"user": {
|
||||
"created" : true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing user is updated, `created` is set to false.
|
||||
|
||||
After you add a user, requests from that user can be authenticated. For example:
|
||||
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -0,0 +1,54 @@
|
|||
[role="xpack"]
|
||||
[[security-api-invalidate-token]]
|
||||
=== Delete token API
|
||||
|
||||
Invalidates a bearer token for access without requiring basic authentication.
|
||||
|
||||
==== Request
|
||||
|
||||
`DELETE /_xpack/security/oauth2/token`
|
||||
|
||||
==== Description
|
||||
|
||||
The tokens returned by the <<security-api-get-token,get token API>> have a
|
||||
finite period of time for which they are valid and after that time period, they
|
||||
can no longer be used. That time period is defined by the
|
||||
`xpack.security.authc.token.timeout` setting. For more information, see
|
||||
<<token-service-settings>>.
|
||||
|
||||
If you want to invalidate a token immediately, use this delete token API.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a DELETE request and
|
||||
pertain to deleting a token:
|
||||
|
||||
`token` (required)::
|
||||
(string) An access token.
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example invalidates the specified token immediately:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/oauth2/token
|
||||
{
|
||||
"token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ=="
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
A successful call returns a JSON structure that indicates whether the token
|
||||
has already been invalidated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"created" : true <1>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
<1> When a token has already been invalidated, `created` is set to false.
|
|
@ -0,0 +1,48 @@
|
|||
[role="xpack"]
|
||||
[[security-api-delete-user]]
|
||||
=== Delete users API
|
||||
|
||||
Deletes users from the native realm.
|
||||
|
||||
==== Request
|
||||
|
||||
`DELETE /_xpack/security/user/<username>`
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example deletes the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
If the user is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -0,0 +1,43 @@
|
|||
[role="xpack"]
|
||||
[[security-api-disable-user]]
|
||||
=== Disable users API
|
||||
|
||||
Disables users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_disable`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
By default, when you create users, they are enabled. You can use this API to
|
||||
revoke a user's access to {es}. To re-enable a user, there is an
|
||||
<<security-api-enable-user,enable users API>>.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example disables the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_disable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
|
@ -0,0 +1,42 @@
|
|||
[role="xpack"]
|
||||
[[security-api-enable-user]]
|
||||
=== Enable users API
|
||||
|
||||
Enables users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_enable`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
By default, when you create users, they are enabled. You can use this enable
|
||||
users API and the <<security-api-disable-user,disable users API>> to change that attribute.
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username` (required)::
|
||||
(string) An identifier for the user.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
The following example enables the user `jacknich`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_enable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
|
@ -1,15 +1,12 @@
|
|||
[role="xpack"]
|
||||
[[security-api-tokens]]
|
||||
=== Token Management APIs
|
||||
[[security-api-get-token]]
|
||||
=== Get token API
|
||||
|
||||
The `token` API enables you to create and invalidate bearer tokens for access
|
||||
without requiring basic authentication.
|
||||
Creates a bearer token for access without requiring basic authentication.
|
||||
|
||||
==== Request
|
||||
|
||||
`POST /_xpack/security/oauth2/token` +
|
||||
|
||||
`DELETE /_xpack/security/oauth2/token`
|
||||
`POST /_xpack/security/oauth2/token`
|
||||
|
||||
==== Description
|
||||
|
||||
|
@ -19,20 +16,20 @@ you can explicitly enable the `xpack.security.authc.token.enabled` setting. When
|
|||
you are running in production mode, a bootstrap check prevents you from enabling
|
||||
the token service unless you also enable TLS on the HTTP interface.
|
||||
|
||||
The Get Token API takes the same parameters as a typical OAuth 2.0 token API
|
||||
The get token API takes the same parameters as a typical OAuth 2.0 token API
|
||||
except for the use of a JSON request body.
|
||||
|
||||
A successful Get Token API call returns a JSON structure that contains the access
|
||||
A successful get token API call returns a JSON structure that contains the access
|
||||
token, the amount of time (seconds) that the token expires in, the type, and the
|
||||
scope if available.
|
||||
|
||||
The tokens returned by the Get Token API have a finite period of time for which
|
||||
The tokens returned by the get token API have a finite period of time for which
|
||||
they are valid and after that time period, they can no longer be used. That time
|
||||
period is defined by the `xpack.security.authc.token.timeout` setting. For more
|
||||
information, see <<token-service-settings>>.
|
||||
|
||||
If you want to invalidate a token immediately, you can do so by using the Delete
|
||||
Token API.
|
||||
If you want to invalidate a token immediately, you can do so by using the
|
||||
<<security-api-invalidate-token,delete token API>>.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
@ -41,28 +38,28 @@ The following parameters can be specified in the body of a POST request and
|
|||
pertain to creating a token:
|
||||
|
||||
`grant_type`::
|
||||
(string) The type of grant. Currently only the `password` grant type is supported.
|
||||
(string) The type of grant. Valid grant types are: `password` and `refresh_token`.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password.
|
||||
`password`::
|
||||
(string) The user's password. If you specify the `password` grant type, this
|
||||
parameter is required.
|
||||
|
||||
`refresh_token`::
|
||||
(string) If you specify the `refresh_token` grant type, this parameter is
|
||||
required. It contains the string that was returned when you created the token
|
||||
and enables you to extend its life.
|
||||
|
||||
`scope`::
|
||||
(string) The scope of the token. Currently tokens are only issued for a scope of
|
||||
`FULL` regardless of the value sent with the request.
|
||||
|
||||
`username` (required)::
|
||||
(string) The username that identifies the user.
|
||||
|
||||
The following parameters can be specified in the body of a DELETE request and
|
||||
pertain to deleting a token:
|
||||
|
||||
`token`::
|
||||
(string) An access token.
|
||||
`username`::
|
||||
(string) The username that identifies the user. If you specify the `password`
|
||||
grant type, this parameter is required.
|
||||
|
||||
==== Examples
|
||||
[[security-api-get-token]]
|
||||
To obtain a token, submit a POST request to the `/_xpack/security/oauth2/token`
|
||||
endpoint.
|
||||
|
||||
The following example obtains a token for the `test_admin` user:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -101,8 +98,8 @@ curl -H "Authorization: Bearer dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvb
|
|||
// NOTCONSOLE
|
||||
|
||||
[[security-api-refresh-token]]
|
||||
To extend the life of an existing token, the token api may be called again with the refresh
|
||||
token within 24 hours of the token's creation.
|
||||
To extend the life of an existing token, you can call the API again with the
|
||||
refresh token within 24 hours of the token's creation. For example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -116,7 +113,8 @@ POST /_xpack/security/oauth2/token
|
|||
// TEST[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
||||
// TEST[continued]
|
||||
|
||||
The API will return a new token and refresh token. Each refresh token may only be used one time.
|
||||
The API will return a new token and refresh token. Each refresh token may only
|
||||
be used one time.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -128,32 +126,4 @@ The API will return a new token and refresh token. Each refresh token may only b
|
|||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/]
|
||||
// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
||||
|
||||
[[security-api-invalidate-token]]
|
||||
If a token must be invalidated immediately, you can do so by submitting a DELETE
|
||||
request to `/_xpack/security/oauth2/token`. For example:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/oauth2/token
|
||||
{
|
||||
"token" : "dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ=="
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[s/dGhpcyBpcyBub3QgYSByZWFsIHRva2VuIGJ1dCBpdCBpcyBvbmx5IHRlc3QgZGF0YS4gZG8gbm90IHRyeSB0byByZWFkIHRva2VuIQ==/$body.access_token/]
|
||||
// TEST[continued]
|
||||
|
||||
A successful call returns a JSON structure that indicates whether the token
|
||||
has already been invalidated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"created" : true <1>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
|
||||
<1> When a token has already been invalidated, `created` is set to false.
|
||||
// TESTRESPONSE[s/vLBPvmAB6KvwvJZr27cS/$body.refresh_token/]
|
|
@ -0,0 +1,74 @@
|
|||
[role="xpack"]
|
||||
[[security-api-get-user]]
|
||||
=== Get users API
|
||||
|
||||
Retrieves information about users in the native realm.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/user` +
|
||||
|
||||
`GET /_xpack/security/user/<username>`
|
||||
|
||||
==== Description
|
||||
|
||||
For more information about the native realm, see
|
||||
{stack-ov}/realms.html[Realms] and <<configuring-native-realm>>.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
(string) An identifier for the user. You can specify multiple usernames as a comma-separated list. If you omit this parameter, the API retrieves
|
||||
information about all users.
|
||||
|
||||
//==== Request Body
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
To retrieve a native user, submit a GET request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:jacknich_user]
|
||||
|
||||
A successful call returns an array of users with the JSON representation of the
|
||||
user. Note that user passwords are not included.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"jacknich": {
|
||||
"username": "jacknich",
|
||||
"roles": [
|
||||
"admin", "other_role1"
|
||||
],
|
||||
"full_name": "Jack Nicholson",
|
||||
"email": "jacknich@example.com",
|
||||
"metadata": { "intelligence" : 7 },
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TESTRESPONSE
|
||||
|
||||
If the user is not defined in the `native` realm, the request 404s.
|
||||
|
||||
Omit the username to retrieve all users:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
|
@ -22,7 +22,7 @@ Role mappings have _rules_ that identify users and a list of _roles_ that are
|
|||
granted to those users.
|
||||
|
||||
NOTE: This API does not create roles. Rather, it maps users to existing roles.
|
||||
Roles can be created by using <<security-api-roles, Role Management APIs>> or
|
||||
Roles can be created by using <<security-role-apis,role management APIs>> or
|
||||
{xpack-ref}/defining-roles.html#roles-management-file[roles files].
|
||||
|
||||
The role mapping rule is a logical condition that is expressed using a JSON DSL.
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
[float]
|
||||
[[security-api-roles]]
|
||||
=== Roles
|
||||
|
||||
You can use the following APIs to add, remove, and retrieve roles in the native realm:
|
||||
|
||||
* <<security-api-put-role,Create role>>, <<security-api-delete-role,Delete role>>
|
||||
* <<security-api-clear-role-cache,Clear roles cache>>
|
||||
* <<security-api-get-role,Get roles>>
|
|
@ -1,226 +0,0 @@
|
|||
[role="xpack"]
|
||||
[[security-api-users]]
|
||||
=== User Management APIs
|
||||
|
||||
The `user` API enables you to create, read, update, and delete users from the
|
||||
`native` realm. These users are commonly referred to as *native users*.
|
||||
|
||||
|
||||
==== Request
|
||||
|
||||
`GET /_xpack/security/user` +
|
||||
|
||||
`GET /_xpack/security/user/<username>` +
|
||||
|
||||
`DELETE /_xpack/security/user/<username>` +
|
||||
|
||||
`POST /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_disable` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_enable` +
|
||||
|
||||
`PUT /_xpack/security/user/<username>/_password`
|
||||
|
||||
|
||||
==== Description
|
||||
|
||||
You can use the PUT user API to create or update users. When updating a user,
|
||||
you can update everything but its `username` and `password`. To change a user's
|
||||
password, use the <<security-api-reset-user-password, reset password API>>.
|
||||
|
||||
[[username-validation]]
|
||||
NOTE: Usernames must be at least 1 and no more than 1024 characters. They can
|
||||
contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, punctuation, and
|
||||
printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block].
|
||||
Leading or trailing whitespace is not allowed.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
`username`::
|
||||
(string) An identifier for the user. If you omit this parameter from a Get
|
||||
User API request, it retrieves information about all users.
|
||||
|
||||
|
||||
==== Request Body
|
||||
|
||||
The following parameters can be specified in the body of a POST or PUT request
|
||||
and pertain to creating a user:
|
||||
|
||||
`enabled`::
|
||||
(boolean) Specifies whether the user is enabled. The default value is `true`.
|
||||
|
||||
`email`::
|
||||
(string) The email of the user.
|
||||
|
||||
`full_name`::
|
||||
(string) The full name of the user.
|
||||
|
||||
`metadata`::
|
||||
(object) Arbitrary metadata that you want to associate with the user.
|
||||
|
||||
`password` (required)::
|
||||
(string) The user's password. Passwords must be at least 6 characters long.
|
||||
|
||||
`roles` (required)::
|
||||
(list) A set of roles the user has. The roles determine the user's access
|
||||
permissions. To create a user without any roles, specify an empty list: `[]`.
|
||||
|
||||
==== Authorization
|
||||
|
||||
To use this API, you must have at least the `manage_security` cluster privilege.
|
||||
|
||||
|
||||
==== Examples
|
||||
|
||||
[[security-api-put-user]]
|
||||
To add a user, submit a PUT or POST request to the `/_xpack/security/user/<username>`
|
||||
endpoint.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /_xpack/security/user/jacknich
|
||||
{
|
||||
"password" : "j@rV1s",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
A successful call returns a JSON structure that shows whether the user has been
|
||||
created or updated.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"user": {
|
||||
"created" : true <1>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> When an existing user is updated, `created` is set to false.
|
||||
|
||||
After you add a user through the Users API, requests from that user can be
|
||||
authenticated. For example:
|
||||
|
||||
[source,shell]
|
||||
--------------------------------------------------
|
||||
curl -u jacknich:j@rV1s http://localhost:9200/_cluster/health
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
|
||||
[[security-api-get-user]]
|
||||
To retrieve a native user, submit a GET request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
A successful call returns an array of users with the JSON representation of the
|
||||
user. Note that user passwords are not included.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"jacknich": { <1>
|
||||
"username" : "jacknich",
|
||||
"roles" : [ "admin", "other_role1" ],
|
||||
"full_name" : "Jack Nicholson",
|
||||
"email" : "jacknich@example.com",
|
||||
"enabled": true,
|
||||
"metadata" : {
|
||||
"intelligence" : 7
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
||||
<1> If the user is not defined in the `native` realm, the request 404s.
|
||||
|
||||
You can specify multiple usernames as a comma-separated list:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user/jacknich,rdinero
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
Omit the username to retrieve all users:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_xpack/security/user
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-reset-user-password]]
|
||||
To reset the password for a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_password` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_password
|
||||
{
|
||||
"password" : "s3cr3t"
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-disable-user]]
|
||||
To disable a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_disable` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_disable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-enable-user]]
|
||||
To enable a user, submit a PUT request to the
|
||||
`/_xpack/security/user/<username>/_enable` endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /_xpack/security/user/jacknich/_enable
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
[[security-api-delete-user]]
|
||||
To delete a user, submit a DELETE request to the `/_xpack/security/user/<username>`
|
||||
endpoint:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /_xpack/security/user/jacknich
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
If the user is successfully deleted, the request returns `{"found": true}`.
|
||||
Otherwise, `found` is set to false.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"found" : true
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE
|
|
@ -0,0 +1,170 @@
|
|||
[role="xpack"]
|
||||
[[configuring-kerberos-realm]]
|
||||
=== Configuring a Kerberos realm
|
||||
|
||||
Kerberos is used to protect services and uses a ticket-based authentication
|
||||
protocol to authenticate users.
|
||||
You can configure {es} to use the Kerberos V5 authentication protocol, which is
|
||||
an industry standard protocol, to authenticate users.
|
||||
In this scenario, clients must present Kerberos tickets for authentication.
|
||||
|
||||
In Kerberos, users authenticate with an authentication service and later
|
||||
with a ticket granting service to generate a TGT (ticket-granting ticket).
|
||||
This ticket is then presented to the service for authentication.
|
||||
Refer to your Kerberos installation documentation for more information about
|
||||
obtaining TGT. {es} clients must first obtain a TGT then initiate the process of
|
||||
authenticating with {es}.
|
||||
|
||||
For a summary of Kerberos terminology, see {stack-ov}/kerberos-realm.html[Kerberos authentication].
|
||||
|
||||
==== Before you begin
|
||||
|
||||
. Deploy Kerberos.
|
||||
+
|
||||
--
|
||||
You must have the Kerberos infrastructure set up in your environment.
|
||||
|
||||
NOTE: Kerberos requires a lot of external services to function properly, such as
|
||||
time synchronization between all machines and working forward and reverse DNS
|
||||
mappings in your domain. Refer to your Kerberos documentation for more details.
|
||||
|
||||
These instructions do not cover setting up and configuring your Kerberos
|
||||
deployment. Where examples are provided, they pertain to an MIT Kerberos V5
|
||||
deployment. For more information, see
|
||||
http://web.mit.edu/kerberos/www/index.html[MIT Kerberos documentation]
|
||||
--
|
||||
|
||||
. Configure Java GSS.
|
||||
+
|
||||
--
|
||||
|
||||
{es} uses Java GSS framework support for Kerberos authentication.
|
||||
To support Kerberos authentication, {es} needs the following files:
|
||||
|
||||
* `krb5.conf`, a Kerberos configuration file
|
||||
* A `keytab` file that contains credentials for the {es} service principal
|
||||
|
||||
The configuration requirements depend on your Kerberos setup. Refer to your
|
||||
Kerberos documentation to configure the `krb5.conf` file.
|
||||
|
||||
For more information on Java GSS, see
|
||||
https://docs.oracle.com/javase/10/security/kerberos-requirements1.htm[Java GSS Kerberos requirements]
|
||||
--
|
||||
|
||||
==== Create a Kerberos realm
|
||||
|
||||
To configure a Kerberos realm in {es}:
|
||||
|
||||
. Configure the JVM to find the Kerberos configuration file.
|
||||
+
|
||||
--
|
||||
{es} uses Java GSS and JAAS Krb5LoginModule to support Kerberos authentication
|
||||
using a Simple and Protected GSSAPI Negotiation Mechanism (SPNEGO) mechanism.
|
||||
The Kerberos configuration file (`krb5.conf`) provides information such as the
|
||||
default realm, the Key Distribution Center (KDC), and other configuration details
|
||||
required for Kerberos authentication. When the JVM needs some configuration
|
||||
properties, it tries to find those values by locating and loading this file. The
|
||||
JVM system property to configure the file path is `java.security.krb5.conf`. To
|
||||
configure JVM system properties see {ref}/jvm-options.html[configuring jvm options].
|
||||
If this system property is not specified, Java tries to locate the file based on
|
||||
the conventions.
|
||||
|
||||
TIP: It is recommended that this system property be configured for {es}.
|
||||
The method for setting this property depends on your Kerberos infrastructure.
|
||||
Refer to your Kerberos documentation for more details.
|
||||
|
||||
For more information, see http://web.mit.edu/kerberos/krb5-latest/doc/admin/conf_files/krb5_conf.html[krb5.conf]
|
||||
|
||||
--
|
||||
|
||||
. Create a keytab for the {es} node.
|
||||
+
|
||||
--
|
||||
A keytab is a file that stores pairs of principals and encryption keys. {es}
|
||||
uses the keys from the keytab to decrypt the tickets presented by the user. You
|
||||
must create a keytab for {es} by using the tools provided by your Kerberos
|
||||
implementation. For example, some tools that create keytabs are `ktpass.exe` on
|
||||
Windows and `kadmin` for MIT Kerberos.
|
||||
--
|
||||
|
||||
. Put the keytab file in the {es} configuration directory.
|
||||
+
|
||||
--
|
||||
Make sure that this keytab file has read permissions. This file contains
|
||||
credentials, therefore you must take appropriate measures to protect it.
|
||||
|
||||
IMPORTANT: {es} uses Kerberos on the HTTP network layer, therefore there must be
|
||||
a keytab file for the HTTP service principal on every {es} node. The service
|
||||
principal name must have the format `HTTP/es.domain.local@ES.DOMAIN.LOCAL`.
|
||||
The keytab files are unique for each node since they include the hostname.
|
||||
An {es} node can act as any principal a client requests as long as that
|
||||
principal and its credentials are found in the configured keytab.
|
||||
|
||||
--
|
||||
|
||||
. Create a Kerberos realm.
|
||||
+
|
||||
--
|
||||
|
||||
To enable Kerberos authentication in {es}, you must add a Kerberos realm in the
|
||||
realm chain.
|
||||
|
||||
NOTE: You can configure only one Kerberos realm on {es} nodes.
|
||||
|
||||
To configure a Kerberos realm, there are a few mandatory realm settings and
|
||||
other optional settings that you need to configure in the `elasticsearch.yml`
|
||||
configuration file. Add a realm of type `kerberos` under the
|
||||
`xpack.security.authc.realms` namespace.
|
||||
|
||||
The most common configuration for a Kerberos realm is as follows:
|
||||
|
||||
[source, yaml]
|
||||
------------------------------------------------------------
|
||||
xpack.security.authc.realms.kerb1:
|
||||
type: kerberos
|
||||
order: 3
|
||||
keytab.path: es.keytab
|
||||
remove_realm_name: false
|
||||
------------------------------------------------------------
|
||||
|
||||
The `username` is extracted from the ticket presented by user and usually has
|
||||
the format `username@REALM`. This `username` is used for mapping
|
||||
roles to the user. If realm setting `remove_realm_name` is
|
||||
set to `true`, the realm part (`@REALM`) is removed. The resulting `username`
|
||||
is used for role mapping.
|
||||
|
||||
For detailed information of available realm settings,
|
||||
see {ref}/security-settings.html#ref-kerberos-settings[Kerberos realm settings].
|
||||
|
||||
--
|
||||
|
||||
. Restart {es}
|
||||
|
||||
. Map Kerberos users to roles.
|
||||
+
|
||||
--
|
||||
|
||||
The `kerberos` realm enables you to map Kerberos users to roles. You can
|
||||
configure these role mappings by using the
|
||||
{ref}/security-api-role-mapping.html[role-mapping API]. You identify
|
||||
users by their `username` field.
|
||||
|
||||
The following example uses the role mapping API to map `user@REALM` to the roles
|
||||
`monitoring` and `user`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _xpack/security/role_mapping/kerbrolemapping
|
||||
{
|
||||
"roles" : [ "monitoring_user" ],
|
||||
"enabled": true,
|
||||
"rules" : {
|
||||
"field" : { "username" : "user@REALM" }
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
For more information, see {stack-ov}/mapping-roles.html[Mapping users and groups to roles].
|
||||
--
|
||||
|
|
@ -130,7 +130,7 @@ manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*.
|
|||
The _Role Management APIs_ enable you to add, update, remove and retrieve roles
|
||||
dynamically. When you use the APIs to manage roles in the `native` realm, the
|
||||
roles are stored in an internal {es} index. For more information and examples,
|
||||
see {ref}/security-api.html#security-api-roles[role management APIs].
|
||||
see {ref}/security-api.html#security-role-apis[role management APIs].
|
||||
|
||||
[float]
|
||||
[[roles-management-file]]
|
||||
|
|
|
@ -18,7 +18,7 @@ the API, and other roles that are mapped through files.
|
|||
|
||||
When you use role-mappings, you assign existing roles to users.
|
||||
The available roles should either be added using the
|
||||
{ref}/security-api.html#security-api-roles[role management APIs] or defined in the
|
||||
{ref}/security-api.html#security-role-apis[role management APIs] or defined in the
|
||||
<<roles-management-file, roles file>>. Either role-mapping method can use
|
||||
either role management method. For example, when you use the role mapping API,
|
||||
you are able to map users to both API-managed roles and file-managed roles
|
||||
|
|
|
@ -77,6 +77,7 @@ user API.
|
|||
** <<configuring-native-realm,Configure a native realm>>.
|
||||
** <<configuring-pki-realm,Configure a PKI realm>>.
|
||||
** <<configuring-saml-realm,Configure a SAML realm>>.
|
||||
** <<configuring-kerberos-realm,Configure a Kerberos realm>>.
|
||||
|
||||
. Set up roles and users to control access to {es}.
|
||||
For example, to grant _John Doe_ full access to all indices that match
|
||||
|
@ -142,5 +143,7 @@ include::authentication/configuring-ldap-realm.asciidoc[]
|
|||
include::authentication/configuring-native-realm.asciidoc[]
|
||||
include::authentication/configuring-pki-realm.asciidoc[]
|
||||
include::authentication/configuring-saml-realm.asciidoc[]
|
||||
:edit_url: https://github.com/elastic/elasticsearch/edit/{branch}/x-pack/docs/en/security/authentication/configuring-kerberos-realm.asciidoc
|
||||
include::authentication/configuring-kerberos-realm.asciidoc[]
|
||||
include::{es-repo-dir}/settings/security-settings.asciidoc[]
|
||||
include::{es-repo-dir}/settings/audit-settings.asciidoc[]
|
||||
|
|
|
@ -392,7 +392,7 @@ public class ScrollDataExtractorTests extends ESTestCase {
|
|||
assertThat(extractor.hasNext(), is(true));
|
||||
output = extractor.next();
|
||||
assertThat(output.isPresent(), is(true));
|
||||
assertEquals(new Long(1400L), extractor.getLastTimestamp());
|
||||
assertEquals(Long.valueOf(1400L), extractor.getLastTimestamp());
|
||||
// A second failure is not tolerated
|
||||
assertThat(extractor.hasNext(), is(true));
|
||||
expectThrows(SearchPhaseExecutionException.class, extractor::next);
|
||||
|
|
|
@ -125,7 +125,7 @@ public class JobResultsProviderIT extends MlSingleNodeTestCase {
|
|||
Long matchedCount = queryResult.stream().filter(
|
||||
c -> c.getId().equals("foo calendar") || c.getId().equals("foo bar calendar") || c.getId().equals("cat foo calendar"))
|
||||
.count();
|
||||
assertEquals(new Long(3), matchedCount);
|
||||
assertEquals(Long.valueOf(3), matchedCount);
|
||||
|
||||
queryResult = getCalendars("bar");
|
||||
assertThat(queryResult, hasSize(1));
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.transport.TransportRequest;
|
|||
import org.elasticsearch.xpack.core.graph.action.GraphExploreRequest;
|
||||
import org.elasticsearch.xpack.core.security.authz.IndicesAndAliasesResolverField;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -429,7 +428,7 @@ class IndicesAndAliasesResolver {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void updateRemoteCluster(String clusterAlias, List<InetSocketAddress> addresses) {
|
||||
protected void updateRemoteCluster(String clusterAlias, List<String> addresses) {
|
||||
if (addresses.isEmpty()) {
|
||||
clusters.remove(clusterAlias);
|
||||
} else {
|
||||
|
|
|
@ -10,7 +10,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.network.CloseableChannel;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.MockSecureSettings;
|
||||
|
@ -35,6 +34,9 @@ import org.elasticsearch.xpack.core.common.socket.SocketAccess;
|
|||
import org.elasticsearch.xpack.core.ssl.SSLConfiguration;
|
||||
import org.elasticsearch.xpack.core.ssl.SSLService;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.net.ssl.HandshakeCompletedListener;
|
||||
import javax.net.ssl.SSLSocket;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
|
@ -44,10 +46,6 @@ import java.util.Collections;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.net.ssl.HandshakeCompletedListener;
|
||||
import javax.net.ssl.SSLSocket;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -118,12 +116,6 @@ public class SimpleSecurityNioTransportTests extends AbstractSimpleTransportTest
|
|||
return transportService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
|
||||
TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection;
|
||||
CloseableChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
|
||||
}
|
||||
|
||||
public void testConnectException() throws UnknownHostException {
|
||||
try {
|
||||
serviceA.connectToNode(new DiscoveryNode("C", new TransportAddress(InetAddress.getByName("localhost"), 9876),
|
||||
|
|
|
@ -58,7 +58,7 @@ final class TypeConverter {
|
|||
|
||||
private static final long DAY_IN_MILLIS = 60 * 60 * 24;
|
||||
private static final Map<Class<?>, JDBCType> javaToJDBC;
|
||||
|
||||
|
||||
static {
|
||||
Map<Class<?>, JDBCType> aMap = Arrays.stream(DataType.values())
|
||||
.filter(dataType -> dataType.javaClass() != null
|
||||
|
@ -119,7 +119,7 @@ final class TypeConverter {
|
|||
c.setTimeInMillis(initial);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static long convertFromCalendarToUTC(long value, Calendar cal) {
|
||||
if (cal == null) {
|
||||
return value;
|
||||
|
@ -142,7 +142,7 @@ final class TypeConverter {
|
|||
if (type == null) {
|
||||
return (T) convert(val, columnType);
|
||||
}
|
||||
|
||||
|
||||
if (type.isInstance(val)) {
|
||||
try {
|
||||
return type.cast(val);
|
||||
|
@ -150,7 +150,7 @@ final class TypeConverter {
|
|||
throw new SQLDataException("Unable to convert " + val.getClass().getName() + " to " + columnType, cce);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (type == String.class) {
|
||||
return (T) asString(convert(val, columnType));
|
||||
}
|
||||
|
@ -276,8 +276,8 @@ final class TypeConverter {
|
|||
}
|
||||
return dataType.isSigned();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
static JDBCType fromJavaToJDBC(Class<?> clazz) throws SQLException {
|
||||
for (Entry<Class<?>, JDBCType> e : javaToJDBC.entrySet()) {
|
||||
// java.util.Calendar from {@code javaToJDBC} is an abstract class and this method can be used with concrete classes as well
|
||||
|
@ -285,7 +285,7 @@ final class TypeConverter {
|
|||
return e.getValue();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
throw new SQLFeatureNotSupportedException("Objects of type " + clazz.getName() + " are not supported");
|
||||
}
|
||||
|
||||
|
@ -432,7 +432,7 @@ final class TypeConverter {
|
|||
case REAL:
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return new Float(((Number) val).doubleValue());
|
||||
return Float.valueOf((((float) ((Number) val).doubleValue())));
|
||||
default:
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ final class TypeConverter {
|
|||
case REAL:
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return new Double(((Number) val).doubleValue());
|
||||
return Double.valueOf(((Number) val).doubleValue());
|
||||
default:
|
||||
}
|
||||
|
||||
|
@ -539,4 +539,4 @@ final class TypeConverter {
|
|||
}
|
||||
return Math.round(x);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,27 +38,27 @@ import static java.sql.JDBCType.VARBINARY;
|
|||
import static java.sql.JDBCType.VARCHAR;
|
||||
|
||||
public class JdbcPreparedStatementTests extends ESTestCase {
|
||||
|
||||
|
||||
public void testSettingBooleanValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
jps.setBoolean(1, true);
|
||||
assertEquals(true, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, false);
|
||||
assertEquals(false, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, true, Types.BOOLEAN);
|
||||
assertEquals(true, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Boolean);
|
||||
|
||||
|
||||
jps.setObject(1, true, Types.INTEGER);
|
||||
assertEquals(1, value(jps));
|
||||
assertEquals(INTEGER, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, true, Types.VARCHAR);
|
||||
assertEquals("true", value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
@ -66,264 +66,264 @@ public class JdbcPreparedStatementTests extends ESTestCase {
|
|||
|
||||
public void testThrownExceptionsWhenSettingBooleanValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, true, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [BOOLEAN] to [Timestamp] not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingStringValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
jps.setString(1, "foo bar");
|
||||
assertEquals("foo bar", value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, "foo bar");
|
||||
assertEquals("foo bar", value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, "foo bar", Types.VARCHAR);
|
||||
assertEquals("foo bar", value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof String);
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingStringValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, "foo bar", Types.INTEGER));
|
||||
assertEquals("Conversion from type [VARCHAR] to [Integer] not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingByteTypeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
jps.setByte(1, (byte) 6);
|
||||
assertEquals((byte) 6, value(jps));
|
||||
assertEquals(TINYINT, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, (byte) 6);
|
||||
assertEquals((byte) 6, value(jps));
|
||||
assertEquals(TINYINT, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Byte);
|
||||
|
||||
|
||||
jps.setObject(1, (byte) 0, Types.BOOLEAN);
|
||||
assertEquals(false, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, (byte) 123, Types.BOOLEAN);
|
||||
assertEquals(true, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, (byte) 123, Types.INTEGER);
|
||||
assertEquals(123, value(jps));
|
||||
assertEquals(INTEGER, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, (byte) -128, Types.DOUBLE);
|
||||
assertEquals(-128.0, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingByteTypeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (byte) 6, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [TINYINT] to [Timestamp] not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingShortTypeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
short someShort = randomShort();
|
||||
jps.setShort(1, someShort);
|
||||
assertEquals(someShort, value(jps));
|
||||
assertEquals(SMALLINT, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someShort);
|
||||
assertEquals(someShort, value(jps));
|
||||
assertEquals(SMALLINT, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Short);
|
||||
|
||||
|
||||
jps.setObject(1, (short) 1, Types.BOOLEAN);
|
||||
assertEquals(true, value(jps));
|
||||
assertEquals(BOOLEAN, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, (short) -32700, Types.DOUBLE);
|
||||
assertEquals(-32700.0, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someShort, Types.INTEGER);
|
||||
assertEquals((int) someShort, value(jps));
|
||||
assertEquals(INTEGER, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingShortTypeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, (short) 6, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [SMALLINT] to [Timestamp] not supported", sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, 256, Types.TINYINT));
|
||||
assertEquals("Numeric " + 256 + " out of range", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingIntegerValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
int someInt = randomInt();
|
||||
jps.setInt(1, someInt);
|
||||
assertEquals(someInt, value(jps));
|
||||
assertEquals(INTEGER, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someInt);
|
||||
assertEquals(someInt, value(jps));
|
||||
assertEquals(INTEGER, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Integer);
|
||||
|
||||
|
||||
jps.setObject(1, someInt, Types.VARCHAR);
|
||||
assertEquals(String.valueOf(someInt), value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someInt, Types.FLOAT);
|
||||
assertEquals(Double.valueOf(someInt), value(jps));
|
||||
assertTrue(value(jps) instanceof Double);
|
||||
assertEquals(FLOAT, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingIntegerValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
int someInt = randomInt();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someInt, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [INTEGER] to [Timestamp] not supported", sqle.getMessage());
|
||||
|
||||
|
||||
Integer randomIntNotShort = randomIntBetween(32768, Integer.MAX_VALUE);
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.SMALLINT));
|
||||
assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomIntNotShort, Types.TINYINT));
|
||||
assertEquals("Numeric " + randomIntNotShort + " out of range", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingLongValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
long someLong = randomLong();
|
||||
jps.setLong(1, someLong);
|
||||
assertEquals(someLong, value(jps));
|
||||
assertEquals(BIGINT, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someLong);
|
||||
assertEquals(someLong, value(jps));
|
||||
assertEquals(BIGINT, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Long);
|
||||
|
||||
|
||||
jps.setObject(1, someLong, Types.VARCHAR);
|
||||
assertEquals(String.valueOf(someLong), value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someLong, Types.DOUBLE);
|
||||
assertEquals((double) someLong, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someLong, Types.FLOAT);
|
||||
assertEquals((double) someLong, value(jps));
|
||||
assertEquals(FLOAT, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingLongValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
long someLong = randomLong();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someLong, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [BIGINT] to [Timestamp] not supported", sqle.getMessage());
|
||||
|
||||
|
||||
Long randomLongNotShort = randomLongBetween(Integer.MAX_VALUE + 1, Long.MAX_VALUE);
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.INTEGER));
|
||||
assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, randomLongNotShort, Types.SMALLINT));
|
||||
assertEquals("Numeric " + randomLongNotShort + " out of range", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingFloatValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
float someFloat = randomFloat();
|
||||
jps.setFloat(1, someFloat);
|
||||
assertEquals(someFloat, value(jps));
|
||||
assertEquals(REAL, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someFloat);
|
||||
assertEquals(someFloat, value(jps));
|
||||
assertEquals(REAL, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Float);
|
||||
|
||||
|
||||
jps.setObject(1, someFloat, Types.VARCHAR);
|
||||
assertEquals(String.valueOf(someFloat), value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someFloat, Types.DOUBLE);
|
||||
assertEquals((double) someFloat, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someFloat, Types.FLOAT);
|
||||
assertEquals((double) someFloat, value(jps));
|
||||
assertEquals(FLOAT, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingFloatValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
float someFloat = randomFloat();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someFloat, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [REAL] to [Timestamp] not supported", sqle.getMessage());
|
||||
|
||||
|
||||
Float floatNotInt = 5_155_000_000f;
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.INTEGER));
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, floatNotInt, Types.SMALLINT));
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
Long.toString(Math.round(floatNotInt.doubleValue()))), sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingDoubleValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
double someDouble = randomDouble();
|
||||
jps.setDouble(1, someDouble);
|
||||
assertEquals(someDouble, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someDouble);
|
||||
assertEquals(someDouble, value(jps));
|
||||
assertEquals(DOUBLE, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof Double);
|
||||
|
||||
|
||||
jps.setObject(1, someDouble, Types.VARCHAR);
|
||||
assertEquals(String.valueOf(someDouble), value(jps));
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, someDouble, Types.REAL);
|
||||
assertEquals(new Float(someDouble), value(jps));
|
||||
assertEquals((float) someDouble, value(jps));
|
||||
assertEquals(REAL, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingDoubleValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
double someDouble = randomDouble();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLException.class, () -> jps.setObject(1, someDouble, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type [DOUBLE] to [Timestamp] not supported", sqle.getMessage());
|
||||
|
||||
|
||||
Double doubleNotInt = 5_155_000_000d;
|
||||
sqle = expectThrows(SQLException.class, () -> jps.setObject(1, doubleNotInt, Types.INTEGER));
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
assertEquals(String.format(Locale.ROOT, "Numeric %s out of range",
|
||||
Long.toString(((Number) doubleNotInt).longValue())), sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testUnsupportedClasses() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
SQLFeatureNotSupportedException sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new Struct() {
|
||||
|
@ -341,23 +341,23 @@ public class JdbcPreparedStatementTests extends ESTestCase {
|
|||
}
|
||||
}));
|
||||
assertEquals("Objects of type java.sql.Struct are not supported", sfnse.getMessage());
|
||||
|
||||
|
||||
sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, new URL("http://test")));
|
||||
assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage());
|
||||
|
||||
|
||||
sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setURL(1, new URL("http://test")));
|
||||
assertEquals("Objects of type java.net.URL are not supported", sfnse.getMessage());
|
||||
|
||||
|
||||
sfnse = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, this, Types.TIMESTAMP));
|
||||
assertEquals("Conversion from type " + this.getClass().getName() + " to TIMESTAMP not supported", sfnse.getMessage());
|
||||
|
||||
|
||||
SQLException se = expectThrows(SQLException.class, () -> jps.setObject(1, this, 1_000_000));
|
||||
assertEquals("Type:1000000 is not a valid Types.java value.", se.getMessage());
|
||||
|
||||
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> jps.setObject(1, randomShort(), Types.CHAR));
|
||||
assertEquals("Unsupported JDBC type [CHAR]", iae.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingTimestampValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
@ -365,186 +365,186 @@ public class JdbcPreparedStatementTests extends ESTestCase {
|
|||
jps.setTimestamp(1, someTimestamp);
|
||||
assertEquals(someTimestamp.getTime(), ((Date)value(jps)).getTime());
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
|
||||
|
||||
Calendar nonDefaultCal = randomCalendar();
|
||||
// February 29th, 2016. 01:17:55 GMT = 1456708675000 millis since epoch
|
||||
jps.setTimestamp(1, new Timestamp(1456708675000L), nonDefaultCal);
|
||||
assertEquals(1456708675000L, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
|
||||
|
||||
long beforeEpochTime = -randomMillisSinceEpoch();
|
||||
jps.setTimestamp(1, new Timestamp(beforeEpochTime), nonDefaultCal);
|
||||
assertEquals(beforeEpochTime, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, someTimestamp, Types.VARCHAR);
|
||||
assertEquals(someTimestamp.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingTimestampValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Timestamp someTimestamp = new Timestamp(randomMillisSinceEpoch());
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someTimestamp, Types.INTEGER));
|
||||
assertEquals("Conversion from type java.sql.Timestamp to INTEGER not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
public void testSettingTimeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
Time time = new Time(4675000);
|
||||
Calendar nonDefaultCal = randomCalendar();
|
||||
jps.setTime(1, time, nonDefaultCal);
|
||||
assertEquals(4675000, convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, time, Types.VARCHAR);
|
||||
assertEquals(time.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingTimeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Time time = new Time(4675000);
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, time, Types.INTEGER));
|
||||
assertEquals("Conversion from type java.sql.Time to INTEGER not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingSqlDateValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch());
|
||||
jps.setDate(1, someSqlDate);
|
||||
assertEquals(someSqlDate.getTime(), ((Date)value(jps)).getTime());
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
|
||||
|
||||
someSqlDate = new java.sql.Date(randomMillisSinceEpoch());
|
||||
Calendar nonDefaultCal = randomCalendar();
|
||||
jps.setDate(1, someSqlDate, nonDefaultCal);
|
||||
assertEquals(someSqlDate.getTime(), convertFromUTCtoCalendar(((Date)value(jps)), nonDefaultCal));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, someSqlDate, Types.VARCHAR);
|
||||
assertEquals(someSqlDate.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingSqlDateValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
java.sql.Date someSqlDate = new java.sql.Date(randomMillisSinceEpoch());
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class,
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class,
|
||||
() -> jps.setObject(1, new java.sql.Date(randomMillisSinceEpoch()), Types.DOUBLE));
|
||||
assertEquals("Conversion from type " + someSqlDate.getClass().getName() + " to DOUBLE not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingCalendarValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Calendar someCalendar = randomCalendar();
|
||||
someCalendar.setTimeInMillis(randomMillisSinceEpoch());
|
||||
|
||||
|
||||
jps.setObject(1, someCalendar);
|
||||
assertEquals(someCalendar.getTime(), (Date) value(jps));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, someCalendar, Types.VARCHAR);
|
||||
assertEquals(someCalendar.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
|
||||
|
||||
Calendar nonDefaultCal = randomCalendar();
|
||||
jps.setObject(1, nonDefaultCal);
|
||||
assertEquals(nonDefaultCal.getTime(), (Date) value(jps));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingCalendarValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Calendar someCalendar = randomCalendar();
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someCalendar, Types.DOUBLE));
|
||||
assertEquals("Conversion from type " + someCalendar.getClass().getName() + " to DOUBLE not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingDateValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Date someDate = new Date(randomMillisSinceEpoch());
|
||||
|
||||
|
||||
jps.setObject(1, someDate);
|
||||
assertEquals(someDate, (Date) value(jps));
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, someDate, Types.VARCHAR);
|
||||
assertEquals(someDate.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingDateValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
Date someDate = new Date(randomMillisSinceEpoch());
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, someDate, Types.BIGINT));
|
||||
assertEquals("Conversion from type " + someDate.getClass().getName() + " to BIGINT not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingLocalDateTimeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone());
|
||||
|
||||
|
||||
jps.setObject(1, ldt);
|
||||
assertEquals(Date.class, value(jps).getClass());
|
||||
assertEquals(TIMESTAMP, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof java.util.Date);
|
||||
|
||||
|
||||
jps.setObject(1, ldt, Types.VARCHAR);
|
||||
assertEquals(ldt.toString(), value(jps).toString());
|
||||
assertEquals(VARCHAR, jdbcType(jps));
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingLocalDateTimeValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
LocalDateTime ldt = LocalDateTime.now(Clock.systemDefaultZone());
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, ldt, Types.BIGINT));
|
||||
assertEquals("Conversion from type " + ldt.getClass().getName() + " to BIGINT not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testSettingByteArrayValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
|
||||
|
||||
byte[] buffer = "some data".getBytes(StandardCharsets.UTF_8);
|
||||
jps.setBytes(1, buffer);
|
||||
assertEquals(byte[].class, value(jps).getClass());
|
||||
assertEquals(VARBINARY, jdbcType(jps));
|
||||
|
||||
|
||||
jps.setObject(1, buffer);
|
||||
assertEquals(byte[].class, value(jps).getClass());
|
||||
assertEquals(VARBINARY, jdbcType(jps));
|
||||
assertTrue(value(jps) instanceof byte[]);
|
||||
|
||||
|
||||
jps.setObject(1, buffer, Types.VARBINARY);
|
||||
assertEquals((byte[]) value(jps), buffer);
|
||||
assertEquals(VARBINARY, jdbcType(jps));
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR));
|
||||
assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE));
|
||||
assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage());
|
||||
}
|
||||
|
||||
|
||||
public void testThrownExceptionsWhenSettingByteArrayValues() throws SQLException {
|
||||
JdbcPreparedStatement jps = createJdbcPreparedStatement();
|
||||
byte[] buffer = "foo".getBytes(StandardCharsets.UTF_8);
|
||||
|
||||
|
||||
SQLException sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.VARCHAR));
|
||||
assertEquals("Conversion from type byte[] to VARCHAR not supported", sqle.getMessage());
|
||||
|
||||
|
||||
sqle = expectThrows(SQLFeatureNotSupportedException.class, () -> jps.setObject(1, buffer, Types.DOUBLE));
|
||||
assertEquals("Conversion from type byte[] to DOUBLE not supported", sqle.getMessage());
|
||||
}
|
||||
|
@ -564,14 +564,14 @@ public class JdbcPreparedStatementTests extends ESTestCase {
|
|||
private Object value(JdbcPreparedStatement jps) throws SQLException {
|
||||
return jps.query.getParam(1).value;
|
||||
}
|
||||
|
||||
|
||||
private Calendar randomCalendar() {
|
||||
return Calendar.getInstance(randomTimeZone(), Locale.ROOT);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Converts from UTC to the provided Calendar.
|
||||
* Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert
|
||||
* Helps checking if the converted date/time values using Calendars in set*(...,Calendar) methods did convert
|
||||
* the values correctly to UTC.
|
||||
*/
|
||||
private long convertFromUTCtoCalendar(Date date, Calendar nonDefaultCal) throws SQLException {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.delete_user": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-delete-user",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-delete-user.html",
|
||||
"methods": [ "DELETE" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/user/{username}",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.disable_user": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-disable-user",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-disable-user.html",
|
||||
"methods": [ "PUT", "POST" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/user/{username}/_disable",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.enable_user": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-enable-user",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-enable-user.html",
|
||||
"methods": [ "PUT", "POST" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/user/{username}/_enable",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.get_token": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-get-token",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-token.html",
|
||||
"methods": [ "POST" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/oauth2/token",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.get_user": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-get-user",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-get-user.html",
|
||||
"methods": [ "GET" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/user/{username}",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.invalidate_token": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-tokens.html#security-api-invalidate-token",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-token.html",
|
||||
"methods": [ "DELETE" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/oauth2/token",
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"xpack.security.put_user": {
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-users.html#security-api-put-user",
|
||||
"documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-put-user.html",
|
||||
"methods": [ "PUT", "POST" ],
|
||||
"url": {
|
||||
"path": "/_xpack/security/user/{username}",
|
||||
|
|
|
@ -13,20 +13,15 @@ import java.util.Objects;
|
|||
|
||||
public abstract class CronnableSchedule implements Schedule {
|
||||
|
||||
private static final Comparator<Cron> CRON_COMPARATOR = new Comparator<Cron>() {
|
||||
@Override
|
||||
public int compare(Cron c1, Cron c2) {
|
||||
return c1.expression().compareTo(c2.expression());
|
||||
}
|
||||
};
|
||||
private static final Comparator<Cron> CRON_COMPARATOR = Comparator.comparing(Cron::expression);
|
||||
|
||||
protected final Cron[] crons;
|
||||
|
||||
public CronnableSchedule(String... expressions) {
|
||||
CronnableSchedule(String... expressions) {
|
||||
this(crons(expressions));
|
||||
}
|
||||
|
||||
public CronnableSchedule(Cron... crons) {
|
||||
private CronnableSchedule(Cron... crons) {
|
||||
assert crons.length > 0;
|
||||
this.crons = crons;
|
||||
Arrays.sort(crons, CRON_COMPARATOR);
|
||||
|
@ -37,7 +32,15 @@ public abstract class CronnableSchedule implements Schedule {
|
|||
assert time >= startTime;
|
||||
long nextTime = Long.MAX_VALUE;
|
||||
for (Cron cron : crons) {
|
||||
nextTime = Math.min(nextTime, cron.getNextValidTimeAfter(time));
|
||||
long nextValidTimeAfter = cron.getNextValidTimeAfter(time);
|
||||
|
||||
boolean previousCronExpired = nextTime == -1;
|
||||
boolean currentCronValid = nextValidTimeAfter > -1;
|
||||
if (previousCronExpired && currentCronValid) {
|
||||
nextTime = nextValidTimeAfter;
|
||||
} else {
|
||||
nextTime = Math.min(nextTime, nextValidTimeAfter);
|
||||
}
|
||||
}
|
||||
return nextTime;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
assertThat(CompareCondition.Op.EQ.eval(null, null), is(true));
|
||||
assertThat(CompareCondition.Op.EQ.eval(4, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.EQ.eval(3, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.EQ.eval(2, new Float(3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.EQ.eval(2, Float.valueOf((float)3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.EQ.eval(3, null), is(false));
|
||||
assertThat(CompareCondition.Op.EQ.eval(2, "2"), is(true)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.EQ.eval(3, "4"), is(false)); // comparing as strings
|
||||
|
@ -59,7 +59,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
assertThat(CompareCondition.Op.NOT_EQ.eval(null, null), is(false));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(4, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(3, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(2, new Float(3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(2, Float.valueOf((float)3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(3, null), is(true));
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(2, "2"), is(false)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.NOT_EQ.eval(3, "4"), is(true)); // comparing as strings
|
||||
|
@ -83,7 +83,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
public void testOpEvalGTE() throws Exception {
|
||||
assertThat(CompareCondition.Op.GTE.eval(4, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.GTE.eval(3, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.GTE.eval(2, new Float(3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.GTE.eval(2, Float.valueOf((float)3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.GTE.eval(3, null), is(false));
|
||||
assertThat(CompareCondition.Op.GTE.eval(3, "2"), is(true)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.GTE.eval(3, "4"), is(false)); // comparing as strings
|
||||
|
@ -103,7 +103,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
public void testOpEvalGT() throws Exception {
|
||||
assertThat(CompareCondition.Op.GT.eval(4, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.GT.eval(3, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.GT.eval(2, new Float(3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.GT.eval(2, Float.valueOf((float)3.0)), is(false));
|
||||
assertThat(CompareCondition.Op.GT.eval(3, null), is(false));
|
||||
assertThat(CompareCondition.Op.GT.eval(3, "2"), is(true)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.GT.eval(3, "4"), is(false)); // comparing as strings
|
||||
|
@ -124,7 +124,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
public void testOpEvalLTE() throws Exception {
|
||||
assertThat(CompareCondition.Op.LTE.eval(4, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.LTE.eval(3, 3.0), is(true));
|
||||
assertThat(CompareCondition.Op.LTE.eval(2, new Float(3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.LTE.eval(2, Float.valueOf((float) 3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.LTE.eval(3, null), is(false));
|
||||
assertThat(CompareCondition.Op.LTE.eval(3, "2"), is(false)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.LTE.eval(3, "4"), is(true)); // comparing as strings
|
||||
|
@ -144,7 +144,7 @@ public class CompareConditionTests extends ESTestCase {
|
|||
public void testOpEvalLT() throws Exception {
|
||||
assertThat(CompareCondition.Op.LT.eval(4, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.LT.eval(3, 3.0), is(false));
|
||||
assertThat(CompareCondition.Op.LT.eval(2, new Float(3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.LT.eval(2, Float.valueOf((float) 3.0)), is(true));
|
||||
assertThat(CompareCondition.Op.LT.eval(3, null), is(false));
|
||||
assertThat(CompareCondition.Op.LT.eval(3, "2"), is(false)); // comparing as strings
|
||||
assertThat(CompareCondition.Op.LT.eval(3, "4"), is(true)); // comparing as strings
|
||||
|
|
|
@ -11,11 +11,15 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.hasItemInArray;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class CronScheduleTests extends ScheduleTestCase {
|
||||
public void testInvalid() throws Exception {
|
||||
|
@ -54,18 +58,25 @@ public class CronScheduleTests extends ScheduleTestCase {
|
|||
assertThat(crons, hasItemInArray("0 0/3 * * * ?"));
|
||||
}
|
||||
|
||||
public void testMultipleCronsNextScheduledAfter() {
|
||||
CronSchedule schedule = new CronSchedule("0 5 9 1 1 ? 2019", "0 5 9 1 1 ? 2020", "0 5 9 1 1 ? 2017");
|
||||
ZonedDateTime start2019 = ZonedDateTime.of(2019, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
|
||||
ZonedDateTime start2020 = ZonedDateTime.of(2020, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC);
|
||||
long firstSchedule = schedule.nextScheduledTimeAfter(0, start2019.toInstant().toEpochMilli());
|
||||
long secondSchedule = schedule.nextScheduledTimeAfter(0, start2020.toInstant().toEpochMilli());
|
||||
|
||||
assertThat(firstSchedule, is(not(-1L)));
|
||||
assertThat(secondSchedule, is(not(-1L)));
|
||||
assertThat(firstSchedule, is(not(secondSchedule)));
|
||||
}
|
||||
|
||||
public void testParseInvalidBadExpression() throws Exception {
|
||||
XContentBuilder builder = jsonBuilder().value("0 0/5 * * ?");
|
||||
BytesReference bytes = BytesReference.bytes(builder);
|
||||
XContentParser parser = createParser(JsonXContent.jsonXContent, bytes);
|
||||
parser.nextToken();
|
||||
try {
|
||||
new CronSchedule.Parser().parse(parser);
|
||||
fail("expected cron parsing to fail when using invalid cron expression");
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
// expected
|
||||
assertThat(pe.getCause(), instanceOf(IllegalArgumentException.class));
|
||||
}
|
||||
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> new CronSchedule.Parser().parse(parser));
|
||||
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
|
||||
}
|
||||
|
||||
public void testParseInvalidEmpty() throws Exception {
|
||||
|
|
Loading…
Reference in New Issue